aic7xxx_osm.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640
  1. /*
  2. * Adaptec AIC7xxx device driver for Linux.
  3. *
  4. * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $
  5. *
  6. * Copyright (c) 1994 John Aycock
  7. * The University of Calgary Department of Computer Science.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; see the file COPYING. If not, write to
  21. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  22. *
  23. * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
  24. * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
  25. * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
  26. * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
  27. * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
  28. * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
  29. * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
  30. * ANSI SCSI-2 specification (draft 10c), ...
  31. *
  32. * --------------------------------------------------------------------------
  33. *
  34. * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
  35. *
  36. * Substantially modified to include support for wide and twin bus
  37. * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
  38. * SCB paging, and other rework of the code.
  39. *
  40. * --------------------------------------------------------------------------
  41. * Copyright (c) 1994-2000 Justin T. Gibbs.
  42. * Copyright (c) 2000-2001 Adaptec Inc.
  43. * All rights reserved.
  44. *
  45. * Redistribution and use in source and binary forms, with or without
  46. * modification, are permitted provided that the following conditions
  47. * are met:
  48. * 1. Redistributions of source code must retain the above copyright
  49. * notice, this list of conditions, and the following disclaimer,
  50. * without modification.
  51. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  52. * substantially similar to the "NO WARRANTY" disclaimer below
  53. * ("Disclaimer") and any redistribution must be conditioned upon
  54. * including a substantially similar Disclaimer requirement for further
  55. * binary redistribution.
  56. * 3. Neither the names of the above-listed copyright holders nor the names
  57. * of any contributors may be used to endorse or promote products derived
  58. * from this software without specific prior written permission.
  59. *
  60. * Alternatively, this software may be distributed under the terms of the
  61. * GNU General Public License ("GPL") version 2 as published by the Free
  62. * Software Foundation.
  63. *
  64. * NO WARRANTY
  65. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  66. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  67. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  68. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  69. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  70. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  71. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  72. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  73. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  74. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  75. * POSSIBILITY OF SUCH DAMAGES.
  76. *
  77. *---------------------------------------------------------------------------
  78. *
  79. * Thanks also go to (in alphabetical order) the following:
  80. *
  81. * Rory Bolt - Sequencer bug fixes
  82. * Jay Estabrook - Initial DEC Alpha support
  83. * Doug Ledford - Much needed abort/reset bug fixes
  84. * Kai Makisara - DMAing of SCBs
  85. *
  86. * A Boot time option was also added for not resetting the scsi bus.
  87. *
  88. * Form: aic7xxx=extended
  89. * aic7xxx=no_reset
  90. * aic7xxx=verbose
  91. *
  92. * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
  93. *
  94. * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
  95. */
  96. /*
  97. * Further driver modifications made by Doug Ledford <dledford@redhat.com>
  98. *
  99. * Copyright (c) 1997-1999 Doug Ledford
  100. *
  101. * These changes are released under the same licensing terms as the FreeBSD
  102. * driver written by Justin Gibbs. Please see his Copyright notice above
  103. * for the exact terms and conditions covering my changes as well as the
  104. * warranty statement.
  105. *
  106. * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
  107. * but are not limited to:
  108. *
  109. * 1: Import of the latest FreeBSD sequencer code for this driver
  110. * 2: Modification of kernel code to accommodate different sequencer semantics
  111. * 3: Extensive changes throughout kernel portion of driver to improve
  112. * abort/reset processing and error hanndling
  113. * 4: Other work contributed by various people on the Internet
  114. * 5: Changes to printk information and verbosity selection code
  115. * 6: General reliability related changes, especially in IRQ management
  116. * 7: Modifications to the default probe/attach order for supported cards
  117. * 8: SMP friendliness has been improved
  118. *
  119. */
  120. #include "aic7xxx_osm.h"
  121. #include "aic7xxx_inline.h"
  122. #include <scsi/scsicam.h>
  123. #include <scsi/scsi_transport.h>
  124. #include <scsi/scsi_transport_spi.h>
  125. static struct scsi_transport_template *ahc_linux_transport_template = NULL;
  126. /*
  127. * Include aiclib.c as part of our
  128. * "module dependencies are hard" work around.
  129. */
  130. #include "aiclib.c"
  131. #include <linux/init.h> /* __setup */
  132. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  133. #include "sd.h" /* For geometry detection */
  134. #endif
  135. #include <linux/mm.h> /* For fetching system memory size */
  136. #include <linux/blkdev.h> /* For block_size() */
  137. #include <linux/delay.h> /* For ssleep/msleep */
  138. /*
  139. * Lock protecting manipulation of the ahc softc list.
  140. */
  141. spinlock_t ahc_list_spinlock;
  142. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  143. /* For dynamic sglist size calculation. */
  144. u_int ahc_linux_nseg;
  145. #endif
  146. /*
  147. * Set this to the delay in seconds after SCSI bus reset.
  148. * Note, we honor this only for the initial bus reset.
  149. * The scsi error recovery code performs its own bus settle
  150. * delay handling for error recovery actions.
  151. */
  152. #ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
  153. #define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
  154. #else
  155. #define AIC7XXX_RESET_DELAY 5000
  156. #endif
  157. /*
  158. * Control collection of SCSI transfer statistics for the /proc filesystem.
  159. *
  160. * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
  161. * NOTE: This does affect performance since it has to maintain statistics.
  162. */
  163. #ifdef CONFIG_AIC7XXX_PROC_STATS
  164. #define AIC7XXX_PROC_STATS
  165. #endif
  166. /*
  167. * To change the default number of tagged transactions allowed per-device,
  168. * add a line to the lilo.conf file like:
  169. * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
  170. * which will result in the first four devices on the first two
  171. * controllers being set to a tagged queue depth of 32.
  172. *
  173. * The tag_commands is an array of 16 to allow for wide and twin adapters.
  174. * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
  175. * for channel 1.
  176. */
  177. typedef struct {
  178. uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */
  179. } adapter_tag_info_t;
  180. /*
  181. * Modify this as you see fit for your system.
  182. *
  183. * 0 tagged queuing disabled
  184. * 1 <= n <= 253 n == max tags ever dispatched.
  185. *
  186. * The driver will throttle the number of commands dispatched to a
  187. * device if it returns queue full. For devices with a fixed maximum
  188. * queue depth, the driver will eventually determine this depth and
  189. * lock it in (a console message is printed to indicate that a lock
  190. * has occurred). On some devices, queue full is returned for a temporary
  191. * resource shortage. These devices will return queue full at varying
  192. * depths. The driver will throttle back when the queue fulls occur and
  193. * attempt to slowly increase the depth over time as the device recovers
  194. * from the resource shortage.
  195. *
  196. * In this example, the first line will disable tagged queueing for all
  197. * the devices on the first probed aic7xxx adapter.
  198. *
  199. * The second line enables tagged queueing with 4 commands/LUN for IDs
  200. * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
  201. * driver to attempt to use up to 64 tags for ID 1.
  202. *
  203. * The third line is the same as the first line.
  204. *
  205. * The fourth line disables tagged queueing for devices 0 and 3. It
  206. * enables tagged queueing for the other IDs, with 16 commands/LUN
  207. * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
  208. * IDs 2, 5-7, and 9-15.
  209. */
  210. /*
  211. * NOTE: The below structure is for reference only, the actual structure
  212. * to modify in order to change things is just below this comment block.
  213. adapter_tag_info_t aic7xxx_tag_info[] =
  214. {
  215. {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
  216. {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
  217. {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
  218. {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
  219. };
  220. */
  221. #ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
  222. #define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
  223. #else
  224. #define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
  225. #endif
  226. #define AIC7XXX_CONFIGED_TAG_COMMANDS { \
  227. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  228. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  229. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  230. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  231. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  232. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  233. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  234. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \
  235. }
  236. /*
  237. * By default, use the number of commands specified by
  238. * the users kernel configuration.
  239. */
  240. static adapter_tag_info_t aic7xxx_tag_info[] =
  241. {
  242. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  243. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  244. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  245. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  246. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  247. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  248. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  249. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  250. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  251. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  252. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  253. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  254. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  255. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  256. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  257. {AIC7XXX_CONFIGED_TAG_COMMANDS}
  258. };
  259. /*
  260. * There should be a specific return value for this in scsi.h, but
  261. * it seems that most drivers ignore it.
  262. */
  263. #define DID_UNDERFLOW DID_ERROR
  264. void
  265. ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
  266. {
  267. printk("(scsi%d:%c:%d:%d): ",
  268. ahc->platform_data->host->host_no,
  269. scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
  270. scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
  271. scb != NULL ? SCB_GET_LUN(scb) : -1);
  272. }
  273. /*
  274. * XXX - these options apply unilaterally to _all_ 274x/284x/294x
  275. * cards in the system. This should be fixed. Exceptions to this
  276. * rule are noted in the comments.
  277. */
  278. /*
  279. * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
  280. * has no effect on any later resets that might occur due to things like
  281. * SCSI bus timeouts.
  282. */
  283. static uint32_t aic7xxx_no_reset;
  284. /*
  285. * Certain PCI motherboards will scan PCI devices from highest to lowest,
  286. * others scan from lowest to highest, and they tend to do all kinds of
  287. * strange things when they come into contact with PCI bridge chips. The
  288. * net result of all this is that the PCI card that is actually used to boot
  289. * the machine is very hard to detect. Most motherboards go from lowest
  290. * PCI slot number to highest, and the first SCSI controller found is the
  291. * one you boot from. The only exceptions to this are when a controller
  292. * has its BIOS disabled. So, we by default sort all of our SCSI controllers
  293. * from lowest PCI slot number to highest PCI slot number. We also force
  294. * all controllers with their BIOS disabled to the end of the list. This
  295. * works on *almost* all computers. Where it doesn't work, we have this
  296. * option. Setting this option to non-0 will reverse the order of the sort
  297. * to highest first, then lowest, but will still leave cards with their BIOS
  298. * disabled at the very end. That should fix everyone up unless there are
  299. * really strange cirumstances.
  300. */
  301. static uint32_t aic7xxx_reverse_scan;
  302. /*
  303. * Should we force EXTENDED translation on a controller.
  304. * 0 == Use whatever is in the SEEPROM or default to off
  305. * 1 == Use whatever is in the SEEPROM or default to on
  306. */
  307. static uint32_t aic7xxx_extended;
  308. /*
  309. * PCI bus parity checking of the Adaptec controllers. This is somewhat
  310. * dubious at best. To my knowledge, this option has never actually
  311. * solved a PCI parity problem, but on certain machines with broken PCI
  312. * chipset configurations where stray PCI transactions with bad parity are
  313. * the norm rather than the exception, the error messages can be overwelming.
  314. * It's included in the driver for completeness.
  315. * 0 = Shut off PCI parity check
  316. * non-0 = reverse polarity pci parity checking
  317. */
  318. static uint32_t aic7xxx_pci_parity = ~0;
  319. /*
  320. * Certain newer motherboards have put new PCI based devices into the
  321. * IO spaces that used to typically be occupied by VLB or EISA cards.
  322. * This overlap can cause these newer motherboards to lock up when scanned
  323. * for older EISA and VLB devices. Setting this option to non-0 will
  324. * cause the driver to skip scanning for any VLB or EISA controllers and
  325. * only support the PCI controllers. NOTE: this means that if the kernel
  326. * os compiled with PCI support disabled, then setting this to non-0
  327. * would result in never finding any devices :)
  328. */
  329. #ifndef CONFIG_AIC7XXX_PROBE_EISA_VL
  330. uint32_t aic7xxx_probe_eisa_vl;
  331. #else
  332. uint32_t aic7xxx_probe_eisa_vl = ~0;
  333. #endif
  334. /*
  335. * There are lots of broken chipsets in the world. Some of them will
  336. * violate the PCI spec when we issue byte sized memory writes to our
  337. * controller. I/O mapped register access, if allowed by the given
  338. * platform, will work in almost all cases.
  339. */
  340. uint32_t aic7xxx_allow_memio = ~0;
  341. /*
  342. * aic7xxx_detect() has been run, so register all device arrivals
  343. * immediately with the system rather than deferring to the sorted
  344. * attachment performed by aic7xxx_detect().
  345. */
  346. int aic7xxx_detect_complete;
  347. /*
  348. * So that we can set how long each device is given as a selection timeout.
  349. * The table of values goes like this:
  350. * 0 - 256ms
  351. * 1 - 128ms
  352. * 2 - 64ms
  353. * 3 - 32ms
  354. * We default to 256ms because some older devices need a longer time
  355. * to respond to initial selection.
  356. */
  357. static uint32_t aic7xxx_seltime;
  358. /*
  359. * Certain devices do not perform any aging on commands. Should the
  360. * device be saturated by commands in one portion of the disk, it is
  361. * possible for transactions on far away sectors to never be serviced.
  362. * To handle these devices, we can periodically send an ordered tag to
  363. * force all outstanding transactions to be serviced prior to a new
  364. * transaction.
  365. */
  366. uint32_t aic7xxx_periodic_otag;
  367. /*
  368. * Module information and settable options.
  369. */
  370. static char *aic7xxx = NULL;
  371. MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
  372. MODULE_DESCRIPTION("Adaptec Aic77XX/78XX SCSI Host Bus Adapter driver");
  373. MODULE_LICENSE("Dual BSD/GPL");
  374. MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
  375. module_param(aic7xxx, charp, 0444);
  376. MODULE_PARM_DESC(aic7xxx,
  377. "period delimited, options string.\n"
  378. " verbose Enable verbose/diagnostic logging\n"
  379. " allow_memio Allow device registers to be memory mapped\n"
  380. " debug Bitmask of debug values to enable\n"
  381. " no_probe Toggle EISA/VLB controller probing\n"
  382. " probe_eisa_vl Toggle EISA/VLB controller probing\n"
  383. " no_reset Supress initial bus resets\n"
  384. " extended Enable extended geometry on all controllers\n"
  385. " periodic_otag Send an ordered tagged transaction\n"
  386. " periodically to prevent tag starvation.\n"
  387. " This may be required by some older disk\n"
  388. " drives or RAID arrays.\n"
  389. " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
  390. " tag_info:<tag_str> Set per-target tag depth\n"
  391. " global_tag_depth:<int> Global tag depth for every target\n"
  392. " on every bus\n"
  393. " seltime:<int> Selection Timeout\n"
  394. " (0/256ms,1/128ms,2/64ms,3/32ms)\n"
  395. "\n"
  396. " Sample /etc/modprobe.conf line:\n"
  397. " Toggle EISA/VLB probing\n"
  398. " Set tag depth on Controller 1/Target 1 to 10 tags\n"
  399. " Shorten the selection timeout to 128ms\n"
  400. "\n"
  401. " options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
  402. );
  403. static void ahc_linux_handle_scsi_status(struct ahc_softc *,
  404. struct ahc_linux_device *,
  405. struct scb *);
  406. static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
  407. Scsi_Cmnd *cmd);
  408. static void ahc_linux_sem_timeout(u_long arg);
  409. static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
  410. static void ahc_linux_release_simq(u_long arg);
  411. static void ahc_linux_dev_timed_unfreeze(u_long arg);
  412. static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
  413. static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
  414. static void ahc_linux_size_nseg(void);
  415. static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
  416. static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
  417. struct ahc_devinfo *devinfo);
  418. static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
  419. struct ahc_linux_device *dev);
  420. static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*,
  421. u_int, u_int);
  422. static void ahc_linux_free_target(struct ahc_softc*,
  423. struct ahc_linux_target*);
  424. static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*,
  425. struct ahc_linux_target*,
  426. u_int);
  427. static void ahc_linux_free_device(struct ahc_softc*,
  428. struct ahc_linux_device*);
  429. static void ahc_linux_run_device_queue(struct ahc_softc*,
  430. struct ahc_linux_device*);
  431. static void ahc_linux_setup_tag_info_global(char *p);
  432. static aic_option_callback_t ahc_linux_setup_tag_info;
  433. static int aic7xxx_setup(char *s);
  434. static int ahc_linux_next_unit(void);
  435. static void ahc_runq_tasklet(unsigned long data);
  436. static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);
  437. /********************************* Inlines ************************************/
  438. static __inline void ahc_schedule_runq(struct ahc_softc *ahc);
  439. static __inline struct ahc_linux_device*
  440. ahc_linux_get_device(struct ahc_softc *ahc, u_int channel,
  441. u_int target, u_int lun, int alloc);
  442. static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);
  443. static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc,
  444. struct ahc_linux_device *dev);
  445. static __inline struct ahc_linux_device *
  446. ahc_linux_next_device_to_run(struct ahc_softc *ahc);
  447. static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);
  448. static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
  449. static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
  450. struct ahc_dma_seg *sg,
  451. dma_addr_t addr, bus_size_t len);
  452. static __inline void
  453. ahc_schedule_completeq(struct ahc_softc *ahc)
  454. {
  455. if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) {
  456. ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER;
  457. ahc->platform_data->completeq_timer.expires = jiffies;
  458. add_timer(&ahc->platform_data->completeq_timer);
  459. }
  460. }
  461. /*
  462. * Must be called with our lock held.
  463. */
  464. static __inline void
  465. ahc_schedule_runq(struct ahc_softc *ahc)
  466. {
  467. tasklet_schedule(&ahc->platform_data->runq_tasklet);
  468. }
  469. static __inline struct ahc_linux_device*
  470. ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
  471. u_int lun, int alloc)
  472. {
  473. struct ahc_linux_target *targ;
  474. struct ahc_linux_device *dev;
  475. u_int target_offset;
  476. target_offset = target;
  477. if (channel != 0)
  478. target_offset += 8;
  479. targ = ahc->platform_data->targets[target_offset];
  480. if (targ == NULL) {
  481. if (alloc != 0) {
  482. targ = ahc_linux_alloc_target(ahc, channel, target);
  483. if (targ == NULL)
  484. return (NULL);
  485. } else
  486. return (NULL);
  487. }
  488. dev = targ->devices[lun];
  489. if (dev == NULL && alloc != 0)
  490. dev = ahc_linux_alloc_device(ahc, targ, lun);
  491. return (dev);
  492. }
  493. #define AHC_LINUX_MAX_RETURNED_ERRORS 4
  494. static struct ahc_cmd *
  495. ahc_linux_run_complete_queue(struct ahc_softc *ahc)
  496. {
  497. struct ahc_cmd *acmd;
  498. u_long done_flags;
  499. int with_errors;
  500. with_errors = 0;
  501. ahc_done_lock(ahc, &done_flags);
  502. while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) {
  503. Scsi_Cmnd *cmd;
  504. if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) {
  505. /*
  506. * Linux uses stack recursion to requeue
  507. * commands that need to be retried. Avoid
  508. * blowing out the stack by "spoon feeding"
  509. * commands that completed with error back
  510. * the operating system in case they are going
  511. * to be retried. "ick"
  512. */
  513. ahc_schedule_completeq(ahc);
  514. break;
  515. }
  516. TAILQ_REMOVE(&ahc->platform_data->completeq,
  517. acmd, acmd_links.tqe);
  518. cmd = &acmd_scsi_cmd(acmd);
  519. cmd->host_scribble = NULL;
  520. if (ahc_cmd_get_transaction_status(cmd) != DID_OK
  521. || (cmd->result & 0xFF) != SCSI_STATUS_OK)
  522. with_errors++;
  523. cmd->scsi_done(cmd);
  524. }
  525. ahc_done_unlock(ahc, &done_flags);
  526. return (acmd);
  527. }
  528. static __inline void
  529. ahc_linux_check_device_queue(struct ahc_softc *ahc,
  530. struct ahc_linux_device *dev)
  531. {
  532. if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0
  533. && dev->active == 0) {
  534. dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY;
  535. dev->qfrozen--;
  536. }
  537. if (TAILQ_FIRST(&dev->busyq) == NULL
  538. || dev->openings == 0 || dev->qfrozen != 0)
  539. return;
  540. ahc_linux_run_device_queue(ahc, dev);
  541. }
  542. static __inline struct ahc_linux_device *
  543. ahc_linux_next_device_to_run(struct ahc_softc *ahc)
  544. {
  545. if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
  546. || (ahc->platform_data->qfrozen != 0))
  547. return (NULL);
  548. return (TAILQ_FIRST(&ahc->platform_data->device_runq));
  549. }
  550. static __inline void
  551. ahc_linux_run_device_queues(struct ahc_softc *ahc)
  552. {
  553. struct ahc_linux_device *dev;
  554. while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
  555. TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
  556. dev->flags &= ~AHC_DEV_ON_RUN_LIST;
  557. ahc_linux_check_device_queue(ahc, dev);
  558. }
  559. }
  560. static __inline void
  561. ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
  562. {
  563. Scsi_Cmnd *cmd;
  564. cmd = scb->io_ctx;
  565. ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
  566. if (cmd->use_sg != 0) {
  567. struct scatterlist *sg;
  568. sg = (struct scatterlist *)cmd->request_buffer;
  569. pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
  570. cmd->sc_data_direction);
  571. } else if (cmd->request_bufflen != 0) {
  572. pci_unmap_single(ahc->dev_softc,
  573. scb->platform_data->buf_busaddr,
  574. cmd->request_bufflen,
  575. cmd->sc_data_direction);
  576. }
  577. }
  578. static __inline int
  579. ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
  580. struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
  581. {
  582. int consumed;
  583. if ((scb->sg_count + 1) > AHC_NSEG)
  584. panic("Too few segs for dma mapping. "
  585. "Increase AHC_NSEG\n");
  586. consumed = 1;
  587. sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
  588. scb->platform_data->xfer_len += len;
  589. if (sizeof(dma_addr_t) > 4
  590. && (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
  591. len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
  592. sg->len = ahc_htole32(len);
  593. return (consumed);
  594. }
  595. /************************ Host template entry points *************************/
  596. static int ahc_linux_detect(Scsi_Host_Template *);
  597. static int ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
  598. static const char *ahc_linux_info(struct Scsi_Host *);
  599. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  600. static int ahc_linux_slave_alloc(Scsi_Device *);
  601. static int ahc_linux_slave_configure(Scsi_Device *);
  602. static void ahc_linux_slave_destroy(Scsi_Device *);
  603. #if defined(__i386__)
  604. static int ahc_linux_biosparam(struct scsi_device*,
  605. struct block_device*,
  606. sector_t, int[]);
  607. #endif
  608. #else
  609. static int ahc_linux_release(struct Scsi_Host *);
  610. static void ahc_linux_select_queue_depth(struct Scsi_Host *host,
  611. Scsi_Device *scsi_devs);
  612. #if defined(__i386__)
  613. static int ahc_linux_biosparam(Disk *, kdev_t, int[]);
  614. #endif
  615. #endif
  616. static int ahc_linux_bus_reset(Scsi_Cmnd *);
  617. static int ahc_linux_dev_reset(Scsi_Cmnd *);
  618. static int ahc_linux_abort(Scsi_Cmnd *);
  619. /*
  620. * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg).
  621. *
  622. * In pre-2.5.X...
  623. * The midlayer allocates an S/G array dynamically when a command is issued
  624. * using SCSI malloc. This array, which is in an OS dependent format that
  625. * must later be copied to our private S/G list, is sized to house just the
  626. * number of segments needed for the current transfer. Since the code that
  627. * sizes the SCSI malloc pool does not take into consideration fragmentation
  628. * of the pool, executing transactions numbering just a fraction of our
  629. * concurrent transaction limit with list lengths aproaching AHC_NSEG will
  630. * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
  631. * mid-layer does not properly handle this scsi malloc failures for the S/G
  632. * array and the result can be a lockup of the I/O subsystem. We try to size
  633. * our S/G list so that it satisfies our drivers allocation requirements in
  634. * addition to avoiding fragmentation of the SCSI malloc pool.
  635. */
  636. static void
  637. ahc_linux_size_nseg(void)
  638. {
  639. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  640. u_int cur_size;
  641. u_int best_size;
  642. /*
  643. * The SCSI allocator rounds to the nearest 512 bytes
  644. * an cannot allocate across a page boundary. Our algorithm
  645. * is to start at 1K of scsi malloc space per-command and
  646. * loop through all factors of the PAGE_SIZE and pick the best.
  647. */
  648. best_size = 0;
  649. for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
  650. u_int nseg;
  651. nseg = cur_size / sizeof(struct scatterlist);
  652. if (nseg < AHC_LINUX_MIN_NSEG)
  653. continue;
  654. if (best_size == 0) {
  655. best_size = cur_size;
  656. ahc_linux_nseg = nseg;
  657. } else {
  658. u_int best_rem;
  659. u_int cur_rem;
  660. /*
  661. * Compare the traits of the current "best_size"
  662. * with the current size to determine if the
  663. * current size is a better size.
  664. */
  665. best_rem = best_size % sizeof(struct scatterlist);
  666. cur_rem = cur_size % sizeof(struct scatterlist);
  667. if (cur_rem < best_rem) {
  668. best_size = cur_size;
  669. ahc_linux_nseg = nseg;
  670. }
  671. }
  672. }
  673. #endif
  674. }
  675. /*
  676. * Try to detect an Adaptec 7XXX controller.
  677. */
  678. static int
  679. ahc_linux_detect(Scsi_Host_Template *template)
  680. {
  681. struct ahc_softc *ahc;
  682. int found = 0;
  683. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  684. /*
  685. * It is a bug that the upper layer takes
  686. * this lock just prior to calling us.
  687. */
  688. spin_unlock_irq(&io_request_lock);
  689. #endif
  690. /*
  691. * Sanity checking of Linux SCSI data structures so
  692. * that some of our hacks^H^H^H^H^Hassumptions aren't
  693. * violated.
  694. */
  695. if (offsetof(struct ahc_cmd_internal, end)
  696. > offsetof(struct scsi_cmnd, host_scribble)) {
  697. printf("ahc_linux_detect: SCSI data structures changed.\n");
  698. printf("ahc_linux_detect: Unable to attach\n");
  699. return (0);
  700. }
  701. ahc_linux_size_nseg();
  702. /*
  703. * If we've been passed any parameters, process them now.
  704. */
  705. if (aic7xxx)
  706. aic7xxx_setup(aic7xxx);
  707. template->proc_name = "aic7xxx";
  708. /*
  709. * Initialize our softc list lock prior to
  710. * probing for any adapters.
  711. */
  712. ahc_list_lockinit();
  713. found = ahc_linux_pci_init();
  714. if (!ahc_linux_eisa_init())
  715. found++;
  716. /*
  717. * Register with the SCSI layer all
  718. * controllers we've found.
  719. */
  720. TAILQ_FOREACH(ahc, &ahc_tailq, links) {
  721. if (ahc_linux_register_host(ahc, template) == 0)
  722. found++;
  723. }
  724. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  725. spin_lock_irq(&io_request_lock);
  726. #endif
  727. aic7xxx_detect_complete++;
  728. return (found);
  729. }
  730. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  731. /*
  732. * Free the passed in Scsi_Host memory structures prior to unloading the
  733. * module.
  734. */
  735. int
  736. ahc_linux_release(struct Scsi_Host * host)
  737. {
  738. struct ahc_softc *ahc;
  739. u_long l;
  740. ahc_list_lock(&l);
  741. if (host != NULL) {
  742. /*
  743. * We should be able to just perform
  744. * the free directly, but check our
  745. * list for extra sanity.
  746. */
  747. ahc = ahc_find_softc(*(struct ahc_softc **)host->hostdata);
  748. if (ahc != NULL) {
  749. u_long s;
  750. ahc_lock(ahc, &s);
  751. ahc_intr_enable(ahc, FALSE);
  752. ahc_unlock(ahc, &s);
  753. ahc_free(ahc);
  754. }
  755. }
  756. ahc_list_unlock(&l);
  757. return (0);
  758. }
  759. #endif
  760. /*
  761. * Return a string describing the driver.
  762. */
  763. static const char *
  764. ahc_linux_info(struct Scsi_Host *host)
  765. {
  766. static char buffer[512];
  767. char ahc_info[256];
  768. char *bp;
  769. struct ahc_softc *ahc;
  770. bp = &buffer[0];
  771. ahc = *(struct ahc_softc **)host->hostdata;
  772. memset(bp, 0, sizeof(buffer));
  773. strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
  774. strcat(bp, AIC7XXX_DRIVER_VERSION);
  775. strcat(bp, "\n");
  776. strcat(bp, " <");
  777. strcat(bp, ahc->description);
  778. strcat(bp, ">\n");
  779. strcat(bp, " ");
  780. ahc_controller_info(ahc, ahc_info);
  781. strcat(bp, ahc_info);
  782. strcat(bp, "\n");
  783. return (bp);
  784. }
  785. /*
  786. * Queue an SCB to the controller.
  787. */
  788. static int
  789. ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
  790. {
  791. struct ahc_softc *ahc;
  792. struct ahc_linux_device *dev;
  793. u_long flags;
  794. ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
  795. /*
  796. * Save the callback on completion function.
  797. */
  798. cmd->scsi_done = scsi_done;
  799. ahc_midlayer_entrypoint_lock(ahc, &flags);
  800. /*
  801. * Close the race of a command that was in the process of
  802. * being queued to us just as our simq was frozen. Let
  803. * DV commands through so long as we are only frozen to
  804. * perform DV.
  805. */
  806. if (ahc->platform_data->qfrozen != 0) {
  807. ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
  808. ahc_linux_queue_cmd_complete(ahc, cmd);
  809. ahc_schedule_completeq(ahc);
  810. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  811. return (0);
  812. }
  813. dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
  814. cmd->device->lun, /*alloc*/TRUE);
  815. if (dev == NULL) {
  816. ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
  817. ahc_linux_queue_cmd_complete(ahc, cmd);
  818. ahc_schedule_completeq(ahc);
  819. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  820. printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n",
  821. ahc_name(ahc));
  822. return (0);
  823. }
  824. cmd->result = CAM_REQ_INPROG << 16;
  825. TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe);
  826. if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
  827. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
  828. dev->flags |= AHC_DEV_ON_RUN_LIST;
  829. ahc_linux_run_device_queues(ahc);
  830. }
  831. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  832. return (0);
  833. }
  834. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  835. static int
  836. ahc_linux_slave_alloc(Scsi_Device *device)
  837. {
  838. struct ahc_softc *ahc;
  839. ahc = *((struct ahc_softc **)device->host->hostdata);
  840. if (bootverbose)
  841. printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id);
  842. return (0);
  843. }
  844. static int
  845. ahc_linux_slave_configure(Scsi_Device *device)
  846. {
  847. struct ahc_softc *ahc;
  848. struct ahc_linux_device *dev;
  849. u_long flags;
  850. ahc = *((struct ahc_softc **)device->host->hostdata);
  851. if (bootverbose)
  852. printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id);
  853. ahc_midlayer_entrypoint_lock(ahc, &flags);
  854. /*
  855. * Since Linux has attached to the device, configure
  856. * it so we don't free and allocate the device
  857. * structure on every command.
  858. */
  859. dev = ahc_linux_get_device(ahc, device->channel,
  860. device->id, device->lun,
  861. /*alloc*/TRUE);
  862. if (dev != NULL) {
  863. dev->flags &= ~AHC_DEV_UNCONFIGURED;
  864. dev->scsi_device = device;
  865. ahc_linux_device_queue_depth(ahc, dev);
  866. }
  867. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  868. /* Initial Domain Validation */
  869. if (!spi_initial_dv(device->sdev_target))
  870. spi_dv_device(device);
  871. return (0);
  872. }
  873. static void
  874. ahc_linux_slave_destroy(Scsi_Device *device)
  875. {
  876. struct ahc_softc *ahc;
  877. struct ahc_linux_device *dev;
  878. u_long flags;
  879. ahc = *((struct ahc_softc **)device->host->hostdata);
  880. if (bootverbose)
  881. printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id);
  882. ahc_midlayer_entrypoint_lock(ahc, &flags);
  883. dev = ahc_linux_get_device(ahc, device->channel,
  884. device->id, device->lun,
  885. /*alloc*/FALSE);
  886. /*
  887. * Filter out "silly" deletions of real devices by only
  888. * deleting devices that have had slave_configure()
  889. * called on them. All other devices that have not
  890. * been configured will automatically be deleted by
  891. * the refcounting process.
  892. */
  893. if (dev != NULL
  894. && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) {
  895. dev->flags |= AHC_DEV_UNCONFIGURED;
  896. if (TAILQ_EMPTY(&dev->busyq)
  897. && dev->active == 0
  898. && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
  899. ahc_linux_free_device(ahc, dev);
  900. }
  901. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  902. }
  903. #else
  904. /*
  905. * Sets the queue depth for each SCSI device hanging
  906. * off the input host adapter.
  907. */
  908. static void
  909. ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs)
  910. {
  911. Scsi_Device *device;
  912. Scsi_Device *ldev;
  913. struct ahc_softc *ahc;
  914. u_long flags;
  915. ahc = *((struct ahc_softc **)host->hostdata);
  916. ahc_lock(ahc, &flags);
  917. for (device = scsi_devs; device != NULL; device = device->next) {
  918. /*
  919. * Watch out for duplicate devices. This works around
  920. * some quirks in how the SCSI scanning code does its
  921. * device management.
  922. */
  923. for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
  924. if (ldev->host == device->host
  925. && ldev->channel == device->channel
  926. && ldev->id == device->id
  927. && ldev->lun == device->lun)
  928. break;
  929. }
  930. /* Skip duplicate. */
  931. if (ldev != device)
  932. continue;
  933. if (device->host == host) {
  934. struct ahc_linux_device *dev;
  935. /*
  936. * Since Linux has attached to the device, configure
  937. * it so we don't free and allocate the device
  938. * structure on every command.
  939. */
  940. dev = ahc_linux_get_device(ahc, device->channel,
  941. device->id, device->lun,
  942. /*alloc*/TRUE);
  943. if (dev != NULL) {
  944. dev->flags &= ~AHC_DEV_UNCONFIGURED;
  945. dev->scsi_device = device;
  946. ahc_linux_device_queue_depth(ahc, dev);
  947. device->queue_depth = dev->openings
  948. + dev->active;
  949. if ((dev->flags & (AHC_DEV_Q_BASIC
  950. | AHC_DEV_Q_TAGGED)) == 0) {
  951. /*
  952. * We allow the OS to queue 2 untagged
  953. * transactions to us at any time even
  954. * though we can only execute them
  955. * serially on the controller/device.
  956. * This should remove some latency.
  957. */
  958. device->queue_depth = 2;
  959. }
  960. }
  961. }
  962. }
  963. ahc_unlock(ahc, &flags);
  964. }
  965. #endif
  966. #if defined(__i386__)
  967. /*
  968. * Return the disk geometry for the given SCSI device.
  969. */
  970. static int
  971. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  972. ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
  973. sector_t capacity, int geom[])
  974. {
  975. uint8_t *bh;
  976. #else
  977. ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
  978. {
  979. struct scsi_device *sdev = disk->device;
  980. u_long capacity = disk->capacity;
  981. struct buffer_head *bh;
  982. #endif
  983. int heads;
  984. int sectors;
  985. int cylinders;
  986. int ret;
  987. int extended;
  988. struct ahc_softc *ahc;
  989. u_int channel;
  990. ahc = *((struct ahc_softc **)sdev->host->hostdata);
  991. channel = sdev->channel;
  992. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  993. bh = scsi_bios_ptable(bdev);
  994. #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
  995. bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
  996. #else
  997. bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
  998. #endif
  999. if (bh) {
  1000. ret = scsi_partsize(bh, capacity,
  1001. &geom[2], &geom[0], &geom[1]);
  1002. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1003. kfree(bh);
  1004. #else
  1005. brelse(bh);
  1006. #endif
  1007. if (ret != -1)
  1008. return (ret);
  1009. }
  1010. heads = 64;
  1011. sectors = 32;
  1012. cylinders = aic_sector_div(capacity, heads, sectors);
  1013. if (aic7xxx_extended != 0)
  1014. extended = 1;
  1015. else if (channel == 0)
  1016. extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
  1017. else
  1018. extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
  1019. if (extended && cylinders >= 1024) {
  1020. heads = 255;
  1021. sectors = 63;
  1022. cylinders = aic_sector_div(capacity, heads, sectors);
  1023. }
  1024. geom[0] = heads;
  1025. geom[1] = sectors;
  1026. geom[2] = cylinders;
  1027. return (0);
  1028. }
  1029. #endif
  1030. /*
  1031. * Abort the current SCSI command(s).
  1032. */
  1033. static int
  1034. ahc_linux_abort(Scsi_Cmnd *cmd)
  1035. {
  1036. int error;
  1037. error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
  1038. if (error != 0)
  1039. printf("aic7xxx_abort returns 0x%x\n", error);
  1040. return (error);
  1041. }
  1042. /*
  1043. * Attempt to send a target reset message to the device that timed out.
  1044. */
  1045. static int
  1046. ahc_linux_dev_reset(Scsi_Cmnd *cmd)
  1047. {
  1048. int error;
  1049. error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
  1050. if (error != 0)
  1051. printf("aic7xxx_dev_reset returns 0x%x\n", error);
  1052. return (error);
  1053. }
  1054. /*
  1055. * Reset the SCSI bus.
  1056. */
  1057. static int
  1058. ahc_linux_bus_reset(Scsi_Cmnd *cmd)
  1059. {
  1060. struct ahc_softc *ahc;
  1061. u_long s;
  1062. int found;
  1063. ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
  1064. ahc_midlayer_entrypoint_lock(ahc, &s);
  1065. found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
  1066. /*initiate reset*/TRUE);
  1067. ahc_linux_run_complete_queue(ahc);
  1068. ahc_midlayer_entrypoint_unlock(ahc, &s);
  1069. if (bootverbose)
  1070. printf("%s: SCSI bus reset delivered. "
  1071. "%d SCBs aborted.\n", ahc_name(ahc), found);
  1072. return SUCCESS;
  1073. }
  1074. Scsi_Host_Template aic7xxx_driver_template = {
  1075. .module = THIS_MODULE,
  1076. .name = "aic7xxx",
  1077. .proc_info = ahc_linux_proc_info,
  1078. .info = ahc_linux_info,
  1079. .queuecommand = ahc_linux_queue,
  1080. .eh_abort_handler = ahc_linux_abort,
  1081. .eh_device_reset_handler = ahc_linux_dev_reset,
  1082. .eh_bus_reset_handler = ahc_linux_bus_reset,
  1083. #if defined(__i386__)
  1084. .bios_param = ahc_linux_biosparam,
  1085. #endif
  1086. .can_queue = AHC_MAX_QUEUE,
  1087. .this_id = -1,
  1088. .cmd_per_lun = 2,
  1089. .use_clustering = ENABLE_CLUSTERING,
  1090. .slave_alloc = ahc_linux_slave_alloc,
  1091. .slave_configure = ahc_linux_slave_configure,
  1092. .slave_destroy = ahc_linux_slave_destroy,
  1093. };
  1094. /**************************** Tasklet Handler *********************************/
  1095. /*
  1096. * In 2.4.X and above, this routine is called from a tasklet,
  1097. * so we must re-acquire our lock prior to executing this code.
  1098. * In all prior kernels, ahc_schedule_runq() calls this routine
  1099. * directly and ahc_schedule_runq() is called with our lock held.
  1100. */
  1101. static void
  1102. ahc_runq_tasklet(unsigned long data)
  1103. {
  1104. struct ahc_softc* ahc;
  1105. struct ahc_linux_device *dev;
  1106. u_long flags;
  1107. ahc = (struct ahc_softc *)data;
  1108. ahc_lock(ahc, &flags);
  1109. while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
  1110. TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
  1111. dev->flags &= ~AHC_DEV_ON_RUN_LIST;
  1112. ahc_linux_check_device_queue(ahc, dev);
  1113. /* Yeild to our interrupt handler */
  1114. ahc_unlock(ahc, &flags);
  1115. ahc_lock(ahc, &flags);
  1116. }
  1117. ahc_unlock(ahc, &flags);
  1118. }
  1119. /******************************** Macros **************************************/
  1120. #define BUILD_SCSIID(ahc, cmd) \
  1121. ((((cmd)->device->id << TID_SHIFT) & TID) \
  1122. | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
  1123. | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
  1124. /******************************** Bus DMA *************************************/
  1125. int
  1126. ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
  1127. bus_size_t alignment, bus_size_t boundary,
  1128. dma_addr_t lowaddr, dma_addr_t highaddr,
  1129. bus_dma_filter_t *filter, void *filterarg,
  1130. bus_size_t maxsize, int nsegments,
  1131. bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
  1132. {
  1133. bus_dma_tag_t dmat;
  1134. dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
  1135. if (dmat == NULL)
  1136. return (ENOMEM);
  1137. /*
  1138. * Linux is very simplistic about DMA memory. For now don't
  1139. * maintain all specification information. Once Linux supplies
  1140. * better facilities for doing these operations, or the
  1141. * needs of this particular driver change, we might need to do
  1142. * more here.
  1143. */
  1144. dmat->alignment = alignment;
  1145. dmat->boundary = boundary;
  1146. dmat->maxsize = maxsize;
  1147. *ret_tag = dmat;
  1148. return (0);
  1149. }
  1150. void
  1151. ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
  1152. {
  1153. free(dmat, M_DEVBUF);
  1154. }
  1155. int
  1156. ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
  1157. int flags, bus_dmamap_t *mapp)
  1158. {
  1159. bus_dmamap_t map;
  1160. map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
  1161. if (map == NULL)
  1162. return (ENOMEM);
  1163. /*
  1164. * Although we can dma data above 4GB, our
  1165. * "consistent" memory is below 4GB for
  1166. * space efficiency reasons (only need a 4byte
  1167. * address). For this reason, we have to reset
  1168. * our dma mask when doing allocations.
  1169. */
  1170. if (ahc->dev_softc != NULL)
  1171. if (pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF)) {
  1172. printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
  1173. kfree(map);
  1174. return (ENODEV);
  1175. }
  1176. *vaddr = pci_alloc_consistent(ahc->dev_softc,
  1177. dmat->maxsize, &map->bus_addr);
  1178. if (ahc->dev_softc != NULL)
  1179. if (pci_set_dma_mask(ahc->dev_softc,
  1180. ahc->platform_data->hw_dma_mask)) {
  1181. printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
  1182. kfree(map);
  1183. return (ENODEV);
  1184. }
  1185. if (*vaddr == NULL)
  1186. return (ENOMEM);
  1187. *mapp = map;
  1188. return(0);
  1189. }
  1190. void
  1191. ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
  1192. void* vaddr, bus_dmamap_t map)
  1193. {
  1194. pci_free_consistent(ahc->dev_softc, dmat->maxsize,
  1195. vaddr, map->bus_addr);
  1196. }
  1197. int
  1198. ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
  1199. void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
  1200. void *cb_arg, int flags)
  1201. {
  1202. /*
  1203. * Assume for now that this will only be used during
  1204. * initialization and not for per-transaction buffer mapping.
  1205. */
  1206. bus_dma_segment_t stack_sg;
  1207. stack_sg.ds_addr = map->bus_addr;
  1208. stack_sg.ds_len = dmat->maxsize;
  1209. cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
  1210. return (0);
  1211. }
  1212. void
  1213. ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
  1214. {
  1215. /*
  1216. * The map may is NULL in our < 2.3.X implementation.
  1217. * Now it's 2.6.5, but just in case...
  1218. */
  1219. BUG_ON(map == NULL);
  1220. free(map, M_DEVBUF);
  1221. }
  1222. int
  1223. ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
  1224. {
  1225. /* Nothing to do */
  1226. return (0);
  1227. }
  1228. /********************* Platform Dependent Functions ***************************/
  1229. /*
  1230. * Compare "left hand" softc with "right hand" softc, returning:
  1231. * < 0 - lahc has a lower priority than rahc
  1232. * 0 - Softcs are equal
  1233. * > 0 - lahc has a higher priority than rahc
  1234. */
  1235. int
  1236. ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
  1237. {
  1238. int value;
  1239. int rvalue;
  1240. int lvalue;
  1241. /*
  1242. * Under Linux, cards are ordered as follows:
  1243. * 1) VLB/EISA BIOS enabled devices sorted by BIOS address.
  1244. * 2) PCI devices with BIOS enabled sorted by bus/slot/func.
  1245. * 3) All remaining VLB/EISA devices sorted by ioport.
  1246. * 4) All remaining PCI devices sorted by bus/slot/func.
  1247. */
  1248. value = (lahc->flags & AHC_BIOS_ENABLED)
  1249. - (rahc->flags & AHC_BIOS_ENABLED);
  1250. if (value != 0)
  1251. /* Controllers with BIOS enabled have a *higher* priority */
  1252. return (value);
  1253. /*
  1254. * Same BIOS setting, now sort based on bus type.
  1255. * EISA and VL controllers sort together. EISA/VL
  1256. * have higher priority than PCI.
  1257. */
  1258. rvalue = (rahc->chip & AHC_BUS_MASK);
  1259. if (rvalue == AHC_VL)
  1260. rvalue = AHC_EISA;
  1261. lvalue = (lahc->chip & AHC_BUS_MASK);
  1262. if (lvalue == AHC_VL)
  1263. lvalue = AHC_EISA;
  1264. value = rvalue - lvalue;
  1265. if (value != 0)
  1266. return (value);
  1267. /* Still equal. Sort by BIOS address, ioport, or bus/slot/func. */
  1268. switch (rvalue) {
  1269. #ifdef CONFIG_PCI
  1270. case AHC_PCI:
  1271. {
  1272. char primary_channel;
  1273. if (aic7xxx_reverse_scan != 0)
  1274. value = ahc_get_pci_bus(lahc->dev_softc)
  1275. - ahc_get_pci_bus(rahc->dev_softc);
  1276. else
  1277. value = ahc_get_pci_bus(rahc->dev_softc)
  1278. - ahc_get_pci_bus(lahc->dev_softc);
  1279. if (value != 0)
  1280. break;
  1281. if (aic7xxx_reverse_scan != 0)
  1282. value = ahc_get_pci_slot(lahc->dev_softc)
  1283. - ahc_get_pci_slot(rahc->dev_softc);
  1284. else
  1285. value = ahc_get_pci_slot(rahc->dev_softc)
  1286. - ahc_get_pci_slot(lahc->dev_softc);
  1287. if (value != 0)
  1288. break;
  1289. /*
  1290. * On multi-function devices, the user can choose
  1291. * to have function 1 probed before function 0.
  1292. * Give whichever channel is the primary channel
  1293. * the highest priority.
  1294. */
  1295. primary_channel = (lahc->flags & AHC_PRIMARY_CHANNEL) + 'A';
  1296. value = -1;
  1297. if (lahc->channel == primary_channel)
  1298. value = 1;
  1299. break;
  1300. }
  1301. #endif
  1302. case AHC_EISA:
  1303. if ((rahc->flags & AHC_BIOS_ENABLED) != 0) {
  1304. value = rahc->platform_data->bios_address
  1305. - lahc->platform_data->bios_address;
  1306. } else {
  1307. value = rahc->bsh.ioport
  1308. - lahc->bsh.ioport;
  1309. }
  1310. break;
  1311. default:
  1312. panic("ahc_softc_sort: invalid bus type");
  1313. }
  1314. return (value);
  1315. }
  1316. static void
  1317. ahc_linux_setup_tag_info_global(char *p)
  1318. {
  1319. int tags, i, j;
  1320. tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
  1321. printf("Setting Global Tags= %d\n", tags);
  1322. for (i = 0; i < NUM_ELEMENTS(aic7xxx_tag_info); i++) {
  1323. for (j = 0; j < AHC_NUM_TARGETS; j++) {
  1324. aic7xxx_tag_info[i].tag_commands[j] = tags;
  1325. }
  1326. }
  1327. }
  1328. static void
  1329. ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
  1330. {
  1331. if ((instance >= 0) && (targ >= 0)
  1332. && (instance < NUM_ELEMENTS(aic7xxx_tag_info))
  1333. && (targ < AHC_NUM_TARGETS)) {
  1334. aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
  1335. if (bootverbose)
  1336. printf("tag_info[%d:%d] = %d\n", instance, targ, value);
  1337. }
  1338. }
  1339. /*
  1340. * Handle Linux boot parameters. This routine allows for assigning a value
  1341. * to a parameter with a ':' between the parameter and the value.
  1342. * ie. aic7xxx=stpwlev:1,extended
  1343. */
  1344. static int
  1345. aic7xxx_setup(char *s)
  1346. {
  1347. int i, n;
  1348. char *p;
  1349. char *end;
  1350. static struct {
  1351. const char *name;
  1352. uint32_t *flag;
  1353. } options[] = {
  1354. { "extended", &aic7xxx_extended },
  1355. { "no_reset", &aic7xxx_no_reset },
  1356. { "verbose", &aic7xxx_verbose },
  1357. { "allow_memio", &aic7xxx_allow_memio},
  1358. #ifdef AHC_DEBUG
  1359. { "debug", &ahc_debug },
  1360. #endif
  1361. { "reverse_scan", &aic7xxx_reverse_scan },
  1362. { "no_probe", &aic7xxx_probe_eisa_vl },
  1363. { "probe_eisa_vl", &aic7xxx_probe_eisa_vl },
  1364. { "periodic_otag", &aic7xxx_periodic_otag },
  1365. { "pci_parity", &aic7xxx_pci_parity },
  1366. { "seltime", &aic7xxx_seltime },
  1367. { "tag_info", NULL },
  1368. { "global_tag_depth", NULL },
  1369. { "dv", NULL }
  1370. };
  1371. end = strchr(s, '\0');
  1372. /*
  1373. * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
  1374. * will never be 0 in this case.
  1375. */
  1376. n = 0;
  1377. while ((p = strsep(&s, ",.")) != NULL) {
  1378. if (*p == '\0')
  1379. continue;
  1380. for (i = 0; i < NUM_ELEMENTS(options); i++) {
  1381. n = strlen(options[i].name);
  1382. if (strncmp(options[i].name, p, n) == 0)
  1383. break;
  1384. }
  1385. if (i == NUM_ELEMENTS(options))
  1386. continue;
  1387. if (strncmp(p, "global_tag_depth", n) == 0) {
  1388. ahc_linux_setup_tag_info_global(p + n);
  1389. } else if (strncmp(p, "tag_info", n) == 0) {
  1390. s = aic_parse_brace_option("tag_info", p + n, end,
  1391. 2, ahc_linux_setup_tag_info, 0);
  1392. } else if (p[n] == ':') {
  1393. *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
  1394. } else if (strncmp(p, "verbose", n) == 0) {
  1395. *(options[i].flag) = 1;
  1396. } else {
  1397. *(options[i].flag) ^= 0xFFFFFFFF;
  1398. }
  1399. }
  1400. return 1;
  1401. }
  1402. __setup("aic7xxx=", aic7xxx_setup);
  1403. uint32_t aic7xxx_verbose;
  1404. int
  1405. ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
  1406. {
  1407. char buf[80];
  1408. struct Scsi_Host *host;
  1409. char *new_name;
  1410. u_long s;
  1411. template->name = ahc->description;
  1412. host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
  1413. if (host == NULL)
  1414. return (ENOMEM);
  1415. *((struct ahc_softc **)host->hostdata) = ahc;
  1416. ahc_lock(ahc, &s);
  1417. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1418. scsi_assign_lock(host, &ahc->platform_data->spin_lock);
  1419. #elif AHC_SCSI_HAS_HOST_LOCK != 0
  1420. host->lock = &ahc->platform_data->spin_lock;
  1421. #endif
  1422. ahc->platform_data->host = host;
  1423. host->can_queue = AHC_MAX_QUEUE;
  1424. host->cmd_per_lun = 2;
  1425. /* XXX No way to communicate the ID for multiple channels */
  1426. host->this_id = ahc->our_id;
  1427. host->irq = ahc->platform_data->irq;
  1428. host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
  1429. host->max_lun = AHC_NUM_LUNS;
  1430. host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
  1431. host->sg_tablesize = AHC_NSEG;
  1432. ahc_set_unit(ahc, ahc_linux_next_unit());
  1433. sprintf(buf, "scsi%d", host->host_no);
  1434. new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
  1435. if (new_name != NULL) {
  1436. strcpy(new_name, buf);
  1437. ahc_set_name(ahc, new_name);
  1438. }
  1439. host->unique_id = ahc->unit;
  1440. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  1441. scsi_set_pci_device(host, ahc->dev_softc);
  1442. #endif
  1443. ahc_linux_initialize_scsi_bus(ahc);
  1444. ahc_intr_enable(ahc, TRUE);
  1445. ahc_unlock(ahc, &s);
  1446. host->transportt = ahc_linux_transport_template;
  1447. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1448. scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
  1449. scsi_scan_host(host);
  1450. #endif
  1451. return (0);
  1452. }
  1453. uint64_t
  1454. ahc_linux_get_memsize(void)
  1455. {
  1456. struct sysinfo si;
  1457. si_meminfo(&si);
  1458. return ((uint64_t)si.totalram << PAGE_SHIFT);
  1459. }
  1460. /*
  1461. * Find the smallest available unit number to use
  1462. * for a new device. We don't just use a static
  1463. * count to handle the "repeated hot-(un)plug"
  1464. * scenario.
  1465. */
  1466. static int
  1467. ahc_linux_next_unit(void)
  1468. {
  1469. struct ahc_softc *ahc;
  1470. int unit;
  1471. unit = 0;
  1472. retry:
  1473. TAILQ_FOREACH(ahc, &ahc_tailq, links) {
  1474. if (ahc->unit == unit) {
  1475. unit++;
  1476. goto retry;
  1477. }
  1478. }
  1479. return (unit);
  1480. }
  1481. /*
  1482. * Place the SCSI bus into a known state by either resetting it,
  1483. * or forcing transfer negotiations on the next command to any
  1484. * target.
  1485. */
  1486. void
  1487. ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
  1488. {
  1489. int i;
  1490. int numtarg;
  1491. i = 0;
  1492. numtarg = 0;
  1493. if (aic7xxx_no_reset != 0)
  1494. ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
  1495. if ((ahc->flags & AHC_RESET_BUS_A) != 0)
  1496. ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
  1497. else
  1498. numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
  1499. if ((ahc->features & AHC_TWIN) != 0) {
  1500. if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
  1501. ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
  1502. } else {
  1503. if (numtarg == 0)
  1504. i = 8;
  1505. numtarg += 8;
  1506. }
  1507. }
  1508. /*
  1509. * Force negotiation to async for all targets that
  1510. * will not see an initial bus reset.
  1511. */
  1512. for (; i < numtarg; i++) {
  1513. struct ahc_devinfo devinfo;
  1514. struct ahc_initiator_tinfo *tinfo;
  1515. struct ahc_tmode_tstate *tstate;
  1516. u_int our_id;
  1517. u_int target_id;
  1518. char channel;
  1519. channel = 'A';
  1520. our_id = ahc->our_id;
  1521. target_id = i;
  1522. if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
  1523. channel = 'B';
  1524. our_id = ahc->our_id_b;
  1525. target_id = i % 8;
  1526. }
  1527. tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
  1528. target_id, &tstate);
  1529. ahc_compile_devinfo(&devinfo, our_id, target_id,
  1530. CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
  1531. ahc_update_neg_request(ahc, &devinfo, tstate,
  1532. tinfo, AHC_NEG_ALWAYS);
  1533. }
  1534. /* Give the bus some time to recover */
  1535. if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
  1536. ahc_linux_freeze_simq(ahc);
  1537. init_timer(&ahc->platform_data->reset_timer);
  1538. ahc->platform_data->reset_timer.data = (u_long)ahc;
  1539. ahc->platform_data->reset_timer.expires =
  1540. jiffies + (AIC7XXX_RESET_DELAY * HZ)/1000;
  1541. ahc->platform_data->reset_timer.function =
  1542. ahc_linux_release_simq;
  1543. add_timer(&ahc->platform_data->reset_timer);
  1544. }
  1545. }
  1546. int
  1547. ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
  1548. {
  1549. ahc->platform_data =
  1550. malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
  1551. if (ahc->platform_data == NULL)
  1552. return (ENOMEM);
  1553. memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
  1554. TAILQ_INIT(&ahc->platform_data->completeq);
  1555. TAILQ_INIT(&ahc->platform_data->device_runq);
  1556. ahc->platform_data->irq = AHC_LINUX_NOIRQ;
  1557. ahc->platform_data->hw_dma_mask = 0xFFFFFFFF;
  1558. ahc_lockinit(ahc);
  1559. ahc_done_lockinit(ahc);
  1560. init_timer(&ahc->platform_data->completeq_timer);
  1561. ahc->platform_data->completeq_timer.data = (u_long)ahc;
  1562. ahc->platform_data->completeq_timer.function =
  1563. (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
  1564. init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
  1565. tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
  1566. (unsigned long)ahc);
  1567. ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
  1568. ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
  1569. if (aic7xxx_pci_parity == 0)
  1570. ahc->flags |= AHC_DISABLE_PCI_PERR;
  1571. return (0);
  1572. }
  1573. void
  1574. ahc_platform_free(struct ahc_softc *ahc)
  1575. {
  1576. struct ahc_linux_target *targ;
  1577. struct ahc_linux_device *dev;
  1578. int i, j;
  1579. if (ahc->platform_data != NULL) {
  1580. del_timer_sync(&ahc->platform_data->completeq_timer);
  1581. tasklet_kill(&ahc->platform_data->runq_tasklet);
  1582. if (ahc->platform_data->host != NULL) {
  1583. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1584. scsi_remove_host(ahc->platform_data->host);
  1585. #endif
  1586. scsi_host_put(ahc->platform_data->host);
  1587. }
  1588. /* destroy all of the device and target objects */
  1589. for (i = 0; i < AHC_NUM_TARGETS; i++) {
  1590. targ = ahc->platform_data->targets[i];
  1591. if (targ != NULL) {
  1592. /* Keep target around through the loop. */
  1593. targ->refcount++;
  1594. for (j = 0; j < AHC_NUM_LUNS; j++) {
  1595. if (targ->devices[j] == NULL)
  1596. continue;
  1597. dev = targ->devices[j];
  1598. ahc_linux_free_device(ahc, dev);
  1599. }
  1600. /*
  1601. * Forcibly free the target now that
  1602. * all devices are gone.
  1603. */
  1604. ahc_linux_free_target(ahc, targ);
  1605. }
  1606. }
  1607. if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
  1608. free_irq(ahc->platform_data->irq, ahc);
  1609. if (ahc->tag == BUS_SPACE_PIO
  1610. && ahc->bsh.ioport != 0)
  1611. release_region(ahc->bsh.ioport, 256);
  1612. if (ahc->tag == BUS_SPACE_MEMIO
  1613. && ahc->bsh.maddr != NULL) {
  1614. iounmap(ahc->bsh.maddr);
  1615. release_mem_region(ahc->platform_data->mem_busaddr,
  1616. 0x1000);
  1617. }
  1618. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  1619. /*
  1620. * In 2.4 we detach from the scsi midlayer before the PCI
  1621. * layer invokes our remove callback. No per-instance
  1622. * detach is provided, so we must reach inside the PCI
  1623. * subsystem's internals and detach our driver manually.
  1624. */
  1625. if (ahc->dev_softc != NULL)
  1626. ahc->dev_softc->driver = NULL;
  1627. #endif
  1628. free(ahc->platform_data, M_DEVBUF);
  1629. }
  1630. }
  1631. void
  1632. ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
  1633. {
  1634. ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
  1635. SCB_GET_CHANNEL(ahc, scb),
  1636. SCB_GET_LUN(scb), SCB_LIST_NULL,
  1637. ROLE_UNKNOWN, CAM_REQUEUE_REQ);
  1638. }
  1639. void
  1640. ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  1641. ahc_queue_alg alg)
  1642. {
  1643. struct ahc_linux_device *dev;
  1644. int was_queuing;
  1645. int now_queuing;
  1646. dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
  1647. devinfo->target,
  1648. devinfo->lun, /*alloc*/FALSE);
  1649. if (dev == NULL)
  1650. return;
  1651. was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
  1652. switch (alg) {
  1653. default:
  1654. case AHC_QUEUE_NONE:
  1655. now_queuing = 0;
  1656. break;
  1657. case AHC_QUEUE_BASIC:
  1658. now_queuing = AHC_DEV_Q_BASIC;
  1659. break;
  1660. case AHC_QUEUE_TAGGED:
  1661. now_queuing = AHC_DEV_Q_TAGGED;
  1662. break;
  1663. }
  1664. if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
  1665. && (was_queuing != now_queuing)
  1666. && (dev->active != 0)) {
  1667. dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
  1668. dev->qfrozen++;
  1669. }
  1670. dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
  1671. if (now_queuing) {
  1672. u_int usertags;
  1673. usertags = ahc_linux_user_tagdepth(ahc, devinfo);
  1674. if (!was_queuing) {
  1675. /*
  1676. * Start out agressively and allow our
  1677. * dynamic queue depth algorithm to take
  1678. * care of the rest.
  1679. */
  1680. dev->maxtags = usertags;
  1681. dev->openings = dev->maxtags - dev->active;
  1682. }
  1683. if (dev->maxtags == 0) {
  1684. /*
  1685. * Queueing is disabled by the user.
  1686. */
  1687. dev->openings = 1;
  1688. } else if (alg == AHC_QUEUE_TAGGED) {
  1689. dev->flags |= AHC_DEV_Q_TAGGED;
  1690. if (aic7xxx_periodic_otag != 0)
  1691. dev->flags |= AHC_DEV_PERIODIC_OTAG;
  1692. } else
  1693. dev->flags |= AHC_DEV_Q_BASIC;
  1694. } else {
  1695. /* We can only have one opening. */
  1696. dev->maxtags = 0;
  1697. dev->openings = 1 - dev->active;
  1698. }
  1699. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1700. if (dev->scsi_device != NULL) {
  1701. switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
  1702. case AHC_DEV_Q_BASIC:
  1703. scsi_adjust_queue_depth(dev->scsi_device,
  1704. MSG_SIMPLE_TASK,
  1705. dev->openings + dev->active);
  1706. break;
  1707. case AHC_DEV_Q_TAGGED:
  1708. scsi_adjust_queue_depth(dev->scsi_device,
  1709. MSG_ORDERED_TASK,
  1710. dev->openings + dev->active);
  1711. break;
  1712. default:
  1713. /*
  1714. * We allow the OS to queue 2 untagged transactions to
  1715. * us at any time even though we can only execute them
  1716. * serially on the controller/device. This should
  1717. * remove some latency.
  1718. */
  1719. scsi_adjust_queue_depth(dev->scsi_device,
  1720. /*NON-TAGGED*/0,
  1721. /*queue depth*/2);
  1722. break;
  1723. }
  1724. }
  1725. #endif
  1726. }
  1727. int
  1728. ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
  1729. int lun, u_int tag, role_t role, uint32_t status)
  1730. {
  1731. int chan;
  1732. int maxchan;
  1733. int targ;
  1734. int maxtarg;
  1735. int clun;
  1736. int maxlun;
  1737. int count;
  1738. if (tag != SCB_LIST_NULL)
  1739. return (0);
  1740. chan = 0;
  1741. if (channel != ALL_CHANNELS) {
  1742. chan = channel - 'A';
  1743. maxchan = chan + 1;
  1744. } else {
  1745. maxchan = (ahc->features & AHC_TWIN) ? 2 : 1;
  1746. }
  1747. targ = 0;
  1748. if (target != CAM_TARGET_WILDCARD) {
  1749. targ = target;
  1750. maxtarg = targ + 1;
  1751. } else {
  1752. maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
  1753. }
  1754. clun = 0;
  1755. if (lun != CAM_LUN_WILDCARD) {
  1756. clun = lun;
  1757. maxlun = clun + 1;
  1758. } else {
  1759. maxlun = AHC_NUM_LUNS;
  1760. }
  1761. count = 0;
  1762. for (; chan < maxchan; chan++) {
  1763. for (; targ < maxtarg; targ++) {
  1764. for (; clun < maxlun; clun++) {
  1765. struct ahc_linux_device *dev;
  1766. struct ahc_busyq *busyq;
  1767. struct ahc_cmd *acmd;
  1768. dev = ahc_linux_get_device(ahc, chan,
  1769. targ, clun,
  1770. /*alloc*/FALSE);
  1771. if (dev == NULL)
  1772. continue;
  1773. busyq = &dev->busyq;
  1774. while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
  1775. Scsi_Cmnd *cmd;
  1776. cmd = &acmd_scsi_cmd(acmd);
  1777. TAILQ_REMOVE(busyq, acmd,
  1778. acmd_links.tqe);
  1779. count++;
  1780. cmd->result = status << 16;
  1781. ahc_linux_queue_cmd_complete(ahc, cmd);
  1782. }
  1783. }
  1784. }
  1785. }
  1786. return (count);
  1787. }
  1788. static void
  1789. ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
  1790. {
  1791. u_long flags;
  1792. ahc_lock(ahc, &flags);
  1793. del_timer(&ahc->platform_data->completeq_timer);
  1794. ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER;
  1795. ahc_linux_run_complete_queue(ahc);
  1796. ahc_unlock(ahc, &flags);
  1797. }
  1798. static u_int
  1799. ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  1800. {
  1801. static int warned_user;
  1802. u_int tags;
  1803. tags = 0;
  1804. if ((ahc->user_discenable & devinfo->target_mask) != 0) {
  1805. if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
  1806. if (warned_user == 0) {
  1807. printf(KERN_WARNING
  1808. "aic7xxx: WARNING: Insufficient tag_info instances\n"
  1809. "aic7xxx: for installed controllers. Using defaults\n"
  1810. "aic7xxx: Please update the aic7xxx_tag_info array in\n"
  1811. "aic7xxx: the aic7xxx_osm..c source file.\n");
  1812. warned_user++;
  1813. }
  1814. tags = AHC_MAX_QUEUE;
  1815. } else {
  1816. adapter_tag_info_t *tag_info;
  1817. tag_info = &aic7xxx_tag_info[ahc->unit];
  1818. tags = tag_info->tag_commands[devinfo->target_offset];
  1819. if (tags > AHC_MAX_QUEUE)
  1820. tags = AHC_MAX_QUEUE;
  1821. }
  1822. }
  1823. return (tags);
  1824. }
  1825. /*
  1826. * Determines the queue depth for a given device.
  1827. */
  1828. static void
  1829. ahc_linux_device_queue_depth(struct ahc_softc *ahc,
  1830. struct ahc_linux_device *dev)
  1831. {
  1832. struct ahc_devinfo devinfo;
  1833. u_int tags;
  1834. ahc_compile_devinfo(&devinfo,
  1835. dev->target->channel == 0
  1836. ? ahc->our_id : ahc->our_id_b,
  1837. dev->target->target, dev->lun,
  1838. dev->target->channel == 0 ? 'A' : 'B',
  1839. ROLE_INITIATOR);
  1840. tags = ahc_linux_user_tagdepth(ahc, &devinfo);
  1841. if (tags != 0
  1842. && dev->scsi_device != NULL
  1843. && dev->scsi_device->tagged_supported != 0) {
  1844. ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
  1845. ahc_print_devinfo(ahc, &devinfo);
  1846. printf("Tagged Queuing enabled. Depth %d\n", tags);
  1847. } else {
  1848. ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE);
  1849. }
  1850. }
  1851. static void
  1852. ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
  1853. {
  1854. struct ahc_cmd *acmd;
  1855. struct scsi_cmnd *cmd;
  1856. struct scb *scb;
  1857. struct hardware_scb *hscb;
  1858. struct ahc_initiator_tinfo *tinfo;
  1859. struct ahc_tmode_tstate *tstate;
  1860. uint16_t mask;
  1861. if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0)
  1862. panic("running device on run list");
  1863. while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
  1864. && dev->openings > 0 && dev->qfrozen == 0) {
  1865. /*
  1866. * Schedule us to run later. The only reason we are not
  1867. * running is because the whole controller Q is frozen.
  1868. */
  1869. if (ahc->platform_data->qfrozen != 0) {
  1870. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
  1871. dev, links);
  1872. dev->flags |= AHC_DEV_ON_RUN_LIST;
  1873. return;
  1874. }
  1875. /*
  1876. * Get an scb to use.
  1877. */
  1878. if ((scb = ahc_get_scb(ahc)) == NULL) {
  1879. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
  1880. dev, links);
  1881. dev->flags |= AHC_DEV_ON_RUN_LIST;
  1882. ahc->flags |= AHC_RESOURCE_SHORTAGE;
  1883. return;
  1884. }
  1885. TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
  1886. cmd = &acmd_scsi_cmd(acmd);
  1887. scb->io_ctx = cmd;
  1888. scb->platform_data->dev = dev;
  1889. hscb = scb->hscb;
  1890. cmd->host_scribble = (char *)scb;
  1891. /*
  1892. * Fill out basics of the HSCB.
  1893. */
  1894. hscb->control = 0;
  1895. hscb->scsiid = BUILD_SCSIID(ahc, cmd);
  1896. hscb->lun = cmd->device->lun;
  1897. mask = SCB_GET_TARGET_MASK(ahc, scb);
  1898. tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
  1899. SCB_GET_OUR_ID(scb),
  1900. SCB_GET_TARGET(ahc, scb), &tstate);
  1901. hscb->scsirate = tinfo->scsirate;
  1902. hscb->scsioffset = tinfo->curr.offset;
  1903. if ((tstate->ultraenb & mask) != 0)
  1904. hscb->control |= ULTRAENB;
  1905. if ((ahc->user_discenable & mask) != 0)
  1906. hscb->control |= DISCENB;
  1907. if ((tstate->auto_negotiate & mask) != 0) {
  1908. scb->flags |= SCB_AUTO_NEGOTIATE;
  1909. scb->hscb->control |= MK_MESSAGE;
  1910. }
  1911. if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
  1912. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1913. int msg_bytes;
  1914. uint8_t tag_msgs[2];
  1915. msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
  1916. if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
  1917. hscb->control |= tag_msgs[0];
  1918. if (tag_msgs[0] == MSG_ORDERED_TASK)
  1919. dev->commands_since_idle_or_otag = 0;
  1920. } else
  1921. #endif
  1922. if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
  1923. && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
  1924. hscb->control |= MSG_ORDERED_TASK;
  1925. dev->commands_since_idle_or_otag = 0;
  1926. } else {
  1927. hscb->control |= MSG_SIMPLE_TASK;
  1928. }
  1929. }
  1930. hscb->cdb_len = cmd->cmd_len;
  1931. if (hscb->cdb_len <= 12) {
  1932. memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
  1933. } else {
  1934. memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
  1935. scb->flags |= SCB_CDB32_PTR;
  1936. }
  1937. scb->platform_data->xfer_len = 0;
  1938. ahc_set_residual(scb, 0);
  1939. ahc_set_sense_residual(scb, 0);
  1940. scb->sg_count = 0;
  1941. if (cmd->use_sg != 0) {
  1942. struct ahc_dma_seg *sg;
  1943. struct scatterlist *cur_seg;
  1944. struct scatterlist *end_seg;
  1945. int nseg;
  1946. cur_seg = (struct scatterlist *)cmd->request_buffer;
  1947. nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
  1948. cmd->sc_data_direction);
  1949. end_seg = cur_seg + nseg;
  1950. /* Copy the segments into the SG list. */
  1951. sg = scb->sg_list;
  1952. /*
  1953. * The sg_count may be larger than nseg if
  1954. * a transfer crosses a 32bit page.
  1955. */
  1956. while (cur_seg < end_seg) {
  1957. dma_addr_t addr;
  1958. bus_size_t len;
  1959. int consumed;
  1960. addr = sg_dma_address(cur_seg);
  1961. len = sg_dma_len(cur_seg);
  1962. consumed = ahc_linux_map_seg(ahc, scb,
  1963. sg, addr, len);
  1964. sg += consumed;
  1965. scb->sg_count += consumed;
  1966. cur_seg++;
  1967. }
  1968. sg--;
  1969. sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
  1970. /*
  1971. * Reset the sg list pointer.
  1972. */
  1973. scb->hscb->sgptr =
  1974. ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
  1975. /*
  1976. * Copy the first SG into the "current"
  1977. * data pointer area.
  1978. */
  1979. scb->hscb->dataptr = scb->sg_list->addr;
  1980. scb->hscb->datacnt = scb->sg_list->len;
  1981. } else if (cmd->request_bufflen != 0) {
  1982. struct ahc_dma_seg *sg;
  1983. dma_addr_t addr;
  1984. sg = scb->sg_list;
  1985. addr = pci_map_single(ahc->dev_softc,
  1986. cmd->request_buffer,
  1987. cmd->request_bufflen,
  1988. cmd->sc_data_direction);
  1989. scb->platform_data->buf_busaddr = addr;
  1990. scb->sg_count = ahc_linux_map_seg(ahc, scb,
  1991. sg, addr,
  1992. cmd->request_bufflen);
  1993. sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
  1994. /*
  1995. * Reset the sg list pointer.
  1996. */
  1997. scb->hscb->sgptr =
  1998. ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
  1999. /*
  2000. * Copy the first SG into the "current"
  2001. * data pointer area.
  2002. */
  2003. scb->hscb->dataptr = sg->addr;
  2004. scb->hscb->datacnt = sg->len;
  2005. } else {
  2006. scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
  2007. scb->hscb->dataptr = 0;
  2008. scb->hscb->datacnt = 0;
  2009. scb->sg_count = 0;
  2010. }
  2011. ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE);
  2012. LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
  2013. dev->openings--;
  2014. dev->active++;
  2015. dev->commands_issued++;
  2016. if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
  2017. dev->commands_since_idle_or_otag++;
  2018. /*
  2019. * We only allow one untagged transaction
  2020. * per target in the initiator role unless
  2021. * we are storing a full busy target *lun*
  2022. * table in SCB space.
  2023. */
  2024. if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
  2025. && (ahc->features & AHC_SCB_BTT) == 0) {
  2026. struct scb_tailq *untagged_q;
  2027. int target_offset;
  2028. target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
  2029. untagged_q = &(ahc->untagged_queues[target_offset]);
  2030. TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
  2031. scb->flags |= SCB_UNTAGGEDQ;
  2032. if (TAILQ_FIRST(untagged_q) != scb)
  2033. continue;
  2034. }
  2035. scb->flags |= SCB_ACTIVE;
  2036. ahc_queue_scb(ahc, scb);
  2037. }
  2038. }
  2039. /*
  2040. * SCSI controller interrupt handler.
  2041. */
  2042. irqreturn_t
  2043. ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
  2044. {
  2045. struct ahc_softc *ahc;
  2046. u_long flags;
  2047. int ours;
  2048. ahc = (struct ahc_softc *) dev_id;
  2049. ahc_lock(ahc, &flags);
  2050. ours = ahc_intr(ahc);
  2051. if (ahc_linux_next_device_to_run(ahc) != NULL)
  2052. ahc_schedule_runq(ahc);
  2053. ahc_linux_run_complete_queue(ahc);
  2054. ahc_unlock(ahc, &flags);
  2055. return IRQ_RETVAL(ours);
  2056. }
  2057. void
  2058. ahc_platform_flushwork(struct ahc_softc *ahc)
  2059. {
  2060. while (ahc_linux_run_complete_queue(ahc) != NULL)
  2061. ;
  2062. }
  2063. static struct ahc_linux_target*
  2064. ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target)
  2065. {
  2066. struct ahc_linux_target *targ;
  2067. u_int target_offset;
  2068. target_offset = target;
  2069. if (channel != 0)
  2070. target_offset += 8;
  2071. targ = malloc(sizeof(*targ), M_DEVBUG, M_NOWAIT);
  2072. if (targ == NULL)
  2073. return (NULL);
  2074. memset(targ, 0, sizeof(*targ));
  2075. targ->channel = channel;
  2076. targ->target = target;
  2077. targ->ahc = ahc;
  2078. ahc->platform_data->targets[target_offset] = targ;
  2079. return (targ);
  2080. }
  2081. static void
  2082. ahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
  2083. {
  2084. struct ahc_devinfo devinfo;
  2085. struct ahc_initiator_tinfo *tinfo;
  2086. struct ahc_tmode_tstate *tstate;
  2087. u_int our_id;
  2088. u_int target_offset;
  2089. char channel;
  2090. /*
  2091. * Force a negotiation to async/narrow on any
  2092. * future command to this device unless a bus
  2093. * reset occurs between now and that command.
  2094. */
  2095. channel = 'A' + targ->channel;
  2096. our_id = ahc->our_id;
  2097. target_offset = targ->target;
  2098. if (targ->channel != 0) {
  2099. target_offset += 8;
  2100. our_id = ahc->our_id_b;
  2101. }
  2102. tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
  2103. targ->target, &tstate);
  2104. ahc_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
  2105. channel, ROLE_INITIATOR);
  2106. ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
  2107. AHC_TRANS_GOAL, /*paused*/FALSE);
  2108. ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  2109. AHC_TRANS_GOAL, /*paused*/FALSE);
  2110. ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS);
  2111. ahc->platform_data->targets[target_offset] = NULL;
  2112. free(targ, M_DEVBUF);
  2113. }
  2114. static struct ahc_linux_device*
  2115. ahc_linux_alloc_device(struct ahc_softc *ahc,
  2116. struct ahc_linux_target *targ, u_int lun)
  2117. {
  2118. struct ahc_linux_device *dev;
  2119. dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
  2120. if (dev == NULL)
  2121. return (NULL);
  2122. memset(dev, 0, sizeof(*dev));
  2123. init_timer(&dev->timer);
  2124. TAILQ_INIT(&dev->busyq);
  2125. dev->flags = AHC_DEV_UNCONFIGURED;
  2126. dev->lun = lun;
  2127. dev->target = targ;
  2128. /*
  2129. * We start out life using untagged
  2130. * transactions of which we allow one.
  2131. */
  2132. dev->openings = 1;
  2133. /*
  2134. * Set maxtags to 0. This will be changed if we
  2135. * later determine that we are dealing with
  2136. * a tagged queuing capable device.
  2137. */
  2138. dev->maxtags = 0;
  2139. targ->refcount++;
  2140. targ->devices[lun] = dev;
  2141. return (dev);
  2142. }
  2143. static void
  2144. __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
  2145. {
  2146. struct ahc_linux_target *targ;
  2147. targ = dev->target;
  2148. targ->devices[dev->lun] = NULL;
  2149. free(dev, M_DEVBUF);
  2150. targ->refcount--;
  2151. if (targ->refcount == 0)
  2152. ahc_linux_free_target(ahc, targ);
  2153. }
  2154. static void
  2155. ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
  2156. {
  2157. del_timer_sync(&dev->timer);
  2158. __ahc_linux_free_device(ahc, dev);
  2159. }
  2160. void
  2161. ahc_send_async(struct ahc_softc *ahc, char channel,
  2162. u_int target, u_int lun, ac_code code, void *arg)
  2163. {
  2164. switch (code) {
  2165. case AC_TRANSFER_NEG:
  2166. {
  2167. char buf[80];
  2168. struct ahc_linux_target *targ;
  2169. struct info_str info;
  2170. struct ahc_initiator_tinfo *tinfo;
  2171. struct ahc_tmode_tstate *tstate;
  2172. int target_offset;
  2173. info.buffer = buf;
  2174. info.length = sizeof(buf);
  2175. info.offset = 0;
  2176. info.pos = 0;
  2177. tinfo = ahc_fetch_transinfo(ahc, channel,
  2178. channel == 'A' ? ahc->our_id
  2179. : ahc->our_id_b,
  2180. target, &tstate);
  2181. /*
  2182. * Don't bother reporting results while
  2183. * negotiations are still pending.
  2184. */
  2185. if (tinfo->curr.period != tinfo->goal.period
  2186. || tinfo->curr.width != tinfo->goal.width
  2187. || tinfo->curr.offset != tinfo->goal.offset
  2188. || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
  2189. if (bootverbose == 0)
  2190. break;
  2191. /*
  2192. * Don't bother reporting results that
  2193. * are identical to those last reported.
  2194. */
  2195. target_offset = target;
  2196. if (channel == 'B')
  2197. target_offset += 8;
  2198. targ = ahc->platform_data->targets[target_offset];
  2199. if (targ == NULL)
  2200. break;
  2201. if (tinfo->curr.period == targ->last_tinfo.period
  2202. && tinfo->curr.width == targ->last_tinfo.width
  2203. && tinfo->curr.offset == targ->last_tinfo.offset
  2204. && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
  2205. if (bootverbose == 0)
  2206. break;
  2207. targ->last_tinfo.period = tinfo->curr.period;
  2208. targ->last_tinfo.width = tinfo->curr.width;
  2209. targ->last_tinfo.offset = tinfo->curr.offset;
  2210. targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
  2211. printf("(%s:%c:", ahc_name(ahc), channel);
  2212. if (target == CAM_TARGET_WILDCARD)
  2213. printf("*): ");
  2214. else
  2215. printf("%d): ", target);
  2216. ahc_format_transinfo(&info, &tinfo->curr);
  2217. if (info.pos < info.length)
  2218. *info.buffer = '\0';
  2219. else
  2220. buf[info.length - 1] = '\0';
  2221. printf("%s", buf);
  2222. break;
  2223. }
  2224. case AC_SENT_BDR:
  2225. {
  2226. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  2227. WARN_ON(lun != CAM_LUN_WILDCARD);
  2228. scsi_report_device_reset(ahc->platform_data->host,
  2229. channel - 'A', target);
  2230. #else
  2231. Scsi_Device *scsi_dev;
  2232. /*
  2233. * Find the SCSI device associated with this
  2234. * request and indicate that a UA is expected.
  2235. */
  2236. for (scsi_dev = ahc->platform_data->host->host_queue;
  2237. scsi_dev != NULL; scsi_dev = scsi_dev->next) {
  2238. if (channel - 'A' == scsi_dev->channel
  2239. && target == scsi_dev->id
  2240. && (lun == CAM_LUN_WILDCARD
  2241. || lun == scsi_dev->lun)) {
  2242. scsi_dev->was_reset = 1;
  2243. scsi_dev->expecting_cc_ua = 1;
  2244. }
  2245. }
  2246. #endif
  2247. break;
  2248. }
  2249. case AC_BUS_RESET:
  2250. if (ahc->platform_data->host != NULL) {
  2251. scsi_report_bus_reset(ahc->platform_data->host,
  2252. channel - 'A');
  2253. }
  2254. break;
  2255. default:
  2256. panic("ahc_send_async: Unexpected async event");
  2257. }
  2258. }
  2259. /*
  2260. * Calls the higher level scsi done function and frees the scb.
  2261. */
  2262. void
  2263. ahc_done(struct ahc_softc *ahc, struct scb *scb)
  2264. {
  2265. Scsi_Cmnd *cmd;
  2266. struct ahc_linux_device *dev;
  2267. LIST_REMOVE(scb, pending_links);
  2268. if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
  2269. struct scb_tailq *untagged_q;
  2270. int target_offset;
  2271. target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
  2272. untagged_q = &(ahc->untagged_queues[target_offset]);
  2273. TAILQ_REMOVE(untagged_q, scb, links.tqe);
  2274. ahc_run_untagged_queue(ahc, untagged_q);
  2275. }
  2276. if ((scb->flags & SCB_ACTIVE) == 0) {
  2277. printf("SCB %d done'd twice\n", scb->hscb->tag);
  2278. ahc_dump_card_state(ahc);
  2279. panic("Stopping for safety");
  2280. }
  2281. cmd = scb->io_ctx;
  2282. dev = scb->platform_data->dev;
  2283. dev->active--;
  2284. dev->openings++;
  2285. if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
  2286. cmd->result &= ~(CAM_DEV_QFRZN << 16);
  2287. dev->qfrozen--;
  2288. }
  2289. ahc_linux_unmap_scb(ahc, scb);
  2290. /*
  2291. * Guard against stale sense data.
  2292. * The Linux mid-layer assumes that sense
  2293. * was retrieved anytime the first byte of
  2294. * the sense buffer looks "sane".
  2295. */
  2296. cmd->sense_buffer[0] = 0;
  2297. if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
  2298. uint32_t amount_xferred;
  2299. amount_xferred =
  2300. ahc_get_transfer_length(scb) - ahc_get_residual(scb);
  2301. if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
  2302. #ifdef AHC_DEBUG
  2303. if ((ahc_debug & AHC_SHOW_MISC) != 0) {
  2304. ahc_print_path(ahc, scb);
  2305. printf("Set CAM_UNCOR_PARITY\n");
  2306. }
  2307. #endif
  2308. ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
  2309. #ifdef AHC_REPORT_UNDERFLOWS
  2310. /*
  2311. * This code is disabled by default as some
  2312. * clients of the SCSI system do not properly
  2313. * initialize the underflow parameter. This
  2314. * results in spurious termination of commands
  2315. * that complete as expected (e.g. underflow is
  2316. * allowed as command can return variable amounts
  2317. * of data.
  2318. */
  2319. } else if (amount_xferred < scb->io_ctx->underflow) {
  2320. u_int i;
  2321. ahc_print_path(ahc, scb);
  2322. printf("CDB:");
  2323. for (i = 0; i < scb->io_ctx->cmd_len; i++)
  2324. printf(" 0x%x", scb->io_ctx->cmnd[i]);
  2325. printf("\n");
  2326. ahc_print_path(ahc, scb);
  2327. printf("Saw underflow (%ld of %ld bytes). "
  2328. "Treated as error\n",
  2329. ahc_get_residual(scb),
  2330. ahc_get_transfer_length(scb));
  2331. ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
  2332. #endif
  2333. } else {
  2334. ahc_set_transaction_status(scb, CAM_REQ_CMP);
  2335. }
  2336. } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
  2337. ahc_linux_handle_scsi_status(ahc, dev, scb);
  2338. } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
  2339. dev->flags |= AHC_DEV_UNCONFIGURED;
  2340. }
  2341. if (dev->openings == 1
  2342. && ahc_get_transaction_status(scb) == CAM_REQ_CMP
  2343. && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
  2344. dev->tag_success_count++;
  2345. /*
  2346. * Some devices deal with temporary internal resource
  2347. * shortages by returning queue full. When the queue
  2348. * full occurrs, we throttle back. Slowly try to get
  2349. * back to our previous queue depth.
  2350. */
  2351. if ((dev->openings + dev->active) < dev->maxtags
  2352. && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
  2353. dev->tag_success_count = 0;
  2354. dev->openings++;
  2355. }
  2356. if (dev->active == 0)
  2357. dev->commands_since_idle_or_otag = 0;
  2358. if (TAILQ_EMPTY(&dev->busyq)) {
  2359. if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0
  2360. && dev->active == 0
  2361. && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
  2362. ahc_linux_free_device(ahc, dev);
  2363. } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
  2364. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
  2365. dev->flags |= AHC_DEV_ON_RUN_LIST;
  2366. }
  2367. if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
  2368. printf("Recovery SCB completes\n");
  2369. if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
  2370. || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
  2371. ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
  2372. if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
  2373. ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
  2374. up(&ahc->platform_data->eh_sem);
  2375. }
  2376. }
  2377. ahc_free_scb(ahc, scb);
  2378. ahc_linux_queue_cmd_complete(ahc, cmd);
  2379. }
  2380. static void
  2381. ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
  2382. struct ahc_linux_device *dev, struct scb *scb)
  2383. {
  2384. struct ahc_devinfo devinfo;
  2385. ahc_compile_devinfo(&devinfo,
  2386. ahc->our_id,
  2387. dev->target->target, dev->lun,
  2388. dev->target->channel == 0 ? 'A' : 'B',
  2389. ROLE_INITIATOR);
  2390. /*
  2391. * We don't currently trust the mid-layer to
  2392. * properly deal with queue full or busy. So,
  2393. * when one occurs, we tell the mid-layer to
  2394. * unconditionally requeue the command to us
  2395. * so that we can retry it ourselves. We also
  2396. * implement our own throttling mechanism so
  2397. * we don't clobber the device with too many
  2398. * commands.
  2399. */
  2400. switch (ahc_get_scsi_status(scb)) {
  2401. default:
  2402. break;
  2403. case SCSI_STATUS_CHECK_COND:
  2404. case SCSI_STATUS_CMD_TERMINATED:
  2405. {
  2406. Scsi_Cmnd *cmd;
  2407. /*
  2408. * Copy sense information to the OS's cmd
  2409. * structure if it is available.
  2410. */
  2411. cmd = scb->io_ctx;
  2412. if (scb->flags & SCB_SENSE) {
  2413. u_int sense_size;
  2414. sense_size = MIN(sizeof(struct scsi_sense_data)
  2415. - ahc_get_sense_residual(scb),
  2416. sizeof(cmd->sense_buffer));
  2417. memcpy(cmd->sense_buffer,
  2418. ahc_get_sense_buf(ahc, scb), sense_size);
  2419. if (sense_size < sizeof(cmd->sense_buffer))
  2420. memset(&cmd->sense_buffer[sense_size], 0,
  2421. sizeof(cmd->sense_buffer) - sense_size);
  2422. cmd->result |= (DRIVER_SENSE << 24);
  2423. #ifdef AHC_DEBUG
  2424. if (ahc_debug & AHC_SHOW_SENSE) {
  2425. int i;
  2426. printf("Copied %d bytes of sense data:",
  2427. sense_size);
  2428. for (i = 0; i < sense_size; i++) {
  2429. if ((i & 0xF) == 0)
  2430. printf("\n");
  2431. printf("0x%x ", cmd->sense_buffer[i]);
  2432. }
  2433. printf("\n");
  2434. }
  2435. #endif
  2436. }
  2437. break;
  2438. }
  2439. case SCSI_STATUS_QUEUE_FULL:
  2440. {
  2441. /*
  2442. * By the time the core driver has returned this
  2443. * command, all other commands that were queued
  2444. * to us but not the device have been returned.
  2445. * This ensures that dev->active is equal to
  2446. * the number of commands actually queued to
  2447. * the device.
  2448. */
  2449. dev->tag_success_count = 0;
  2450. if (dev->active != 0) {
  2451. /*
  2452. * Drop our opening count to the number
  2453. * of commands currently outstanding.
  2454. */
  2455. dev->openings = 0;
  2456. /*
  2457. ahc_print_path(ahc, scb);
  2458. printf("Dropping tag count to %d\n", dev->active);
  2459. */
  2460. if (dev->active == dev->tags_on_last_queuefull) {
  2461. dev->last_queuefull_same_count++;
  2462. /*
  2463. * If we repeatedly see a queue full
  2464. * at the same queue depth, this
  2465. * device has a fixed number of tag
  2466. * slots. Lock in this tag depth
  2467. * so we stop seeing queue fulls from
  2468. * this device.
  2469. */
  2470. if (dev->last_queuefull_same_count
  2471. == AHC_LOCK_TAGS_COUNT) {
  2472. dev->maxtags = dev->active;
  2473. ahc_print_path(ahc, scb);
  2474. printf("Locking max tag count at %d\n",
  2475. dev->active);
  2476. }
  2477. } else {
  2478. dev->tags_on_last_queuefull = dev->active;
  2479. dev->last_queuefull_same_count = 0;
  2480. }
  2481. ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
  2482. ahc_set_scsi_status(scb, SCSI_STATUS_OK);
  2483. ahc_platform_set_tags(ahc, &devinfo,
  2484. (dev->flags & AHC_DEV_Q_BASIC)
  2485. ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
  2486. break;
  2487. }
  2488. /*
  2489. * Drop down to a single opening, and treat this
  2490. * as if the target returned BUSY SCSI status.
  2491. */
  2492. dev->openings = 1;
  2493. ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
  2494. ahc_platform_set_tags(ahc, &devinfo,
  2495. (dev->flags & AHC_DEV_Q_BASIC)
  2496. ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
  2497. /* FALLTHROUGH */
  2498. }
  2499. case SCSI_STATUS_BUSY:
  2500. {
  2501. /*
  2502. * Set a short timer to defer sending commands for
  2503. * a bit since Linux will not delay in this case.
  2504. */
  2505. if ((dev->flags & AHC_DEV_TIMER_ACTIVE) != 0) {
  2506. printf("%s:%c:%d: Device Timer still active during "
  2507. "busy processing\n", ahc_name(ahc),
  2508. dev->target->channel, dev->target->target);
  2509. break;
  2510. }
  2511. dev->flags |= AHC_DEV_TIMER_ACTIVE;
  2512. dev->qfrozen++;
  2513. init_timer(&dev->timer);
  2514. dev->timer.data = (u_long)dev;
  2515. dev->timer.expires = jiffies + (HZ/2);
  2516. dev->timer.function = ahc_linux_dev_timed_unfreeze;
  2517. add_timer(&dev->timer);
  2518. break;
  2519. }
  2520. }
  2521. }
  2522. static void
  2523. ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
  2524. {
  2525. /*
  2526. * Typically, the complete queue has very few entries
  2527. * queued to it before the queue is emptied by
  2528. * ahc_linux_run_complete_queue, so sorting the entries
  2529. * by generation number should be inexpensive.
  2530. * We perform the sort so that commands that complete
  2531. * with an error are retuned in the order origionally
  2532. * queued to the controller so that any subsequent retries
  2533. * are performed in order. The underlying ahc routines do
  2534. * not guarantee the order that aborted commands will be
  2535. * returned to us.
  2536. */
  2537. struct ahc_completeq *completeq;
  2538. struct ahc_cmd *list_cmd;
  2539. struct ahc_cmd *acmd;
  2540. /*
  2541. * Map CAM error codes into Linux Error codes. We
  2542. * avoid the conversion so that the DV code has the
  2543. * full error information available when making
  2544. * state change decisions.
  2545. */
  2546. {
  2547. u_int new_status;
  2548. switch (ahc_cmd_get_transaction_status(cmd)) {
  2549. case CAM_REQ_INPROG:
  2550. case CAM_REQ_CMP:
  2551. case CAM_SCSI_STATUS_ERROR:
  2552. new_status = DID_OK;
  2553. break;
  2554. case CAM_REQ_ABORTED:
  2555. new_status = DID_ABORT;
  2556. break;
  2557. case CAM_BUSY:
  2558. new_status = DID_BUS_BUSY;
  2559. break;
  2560. case CAM_REQ_INVALID:
  2561. case CAM_PATH_INVALID:
  2562. new_status = DID_BAD_TARGET;
  2563. break;
  2564. case CAM_SEL_TIMEOUT:
  2565. new_status = DID_NO_CONNECT;
  2566. break;
  2567. case CAM_SCSI_BUS_RESET:
  2568. case CAM_BDR_SENT:
  2569. new_status = DID_RESET;
  2570. break;
  2571. case CAM_UNCOR_PARITY:
  2572. new_status = DID_PARITY;
  2573. break;
  2574. case CAM_CMD_TIMEOUT:
  2575. new_status = DID_TIME_OUT;
  2576. break;
  2577. case CAM_UA_ABORT:
  2578. case CAM_REQ_CMP_ERR:
  2579. case CAM_AUTOSENSE_FAIL:
  2580. case CAM_NO_HBA:
  2581. case CAM_DATA_RUN_ERR:
  2582. case CAM_UNEXP_BUSFREE:
  2583. case CAM_SEQUENCE_FAIL:
  2584. case CAM_CCB_LEN_ERR:
  2585. case CAM_PROVIDE_FAIL:
  2586. case CAM_REQ_TERMIO:
  2587. case CAM_UNREC_HBA_ERROR:
  2588. case CAM_REQ_TOO_BIG:
  2589. new_status = DID_ERROR;
  2590. break;
  2591. case CAM_REQUEUE_REQ:
  2592. /*
  2593. * If we want the request requeued, make sure there
  2594. * are sufficent retries. In the old scsi error code,
  2595. * we used to be able to specify a result code that
  2596. * bypassed the retry count. Now we must use this
  2597. * hack. We also "fake" a check condition with
  2598. * a sense code of ABORTED COMMAND. This seems to
  2599. * evoke a retry even if this command is being sent
  2600. * via the eh thread. Ick! Ick! Ick!
  2601. */
  2602. if (cmd->retries > 0)
  2603. cmd->retries--;
  2604. new_status = DID_OK;
  2605. ahc_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
  2606. cmd->result |= (DRIVER_SENSE << 24);
  2607. memset(cmd->sense_buffer, 0,
  2608. sizeof(cmd->sense_buffer));
  2609. cmd->sense_buffer[0] = SSD_ERRCODE_VALID
  2610. | SSD_CURRENT_ERROR;
  2611. cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
  2612. break;
  2613. default:
  2614. /* We should never get here */
  2615. new_status = DID_ERROR;
  2616. break;
  2617. }
  2618. ahc_cmd_set_transaction_status(cmd, new_status);
  2619. }
  2620. completeq = &ahc->platform_data->completeq;
  2621. list_cmd = TAILQ_FIRST(completeq);
  2622. acmd = (struct ahc_cmd *)cmd;
  2623. while (list_cmd != NULL
  2624. && acmd_scsi_cmd(list_cmd).serial_number
  2625. < acmd_scsi_cmd(acmd).serial_number)
  2626. list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
  2627. if (list_cmd != NULL)
  2628. TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
  2629. else
  2630. TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
  2631. }
  2632. static void
  2633. ahc_linux_sem_timeout(u_long arg)
  2634. {
  2635. struct ahc_softc *ahc;
  2636. u_long s;
  2637. ahc = (struct ahc_softc *)arg;
  2638. ahc_lock(ahc, &s);
  2639. if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
  2640. ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
  2641. up(&ahc->platform_data->eh_sem);
  2642. }
  2643. ahc_unlock(ahc, &s);
  2644. }
  2645. static void
  2646. ahc_linux_freeze_simq(struct ahc_softc *ahc)
  2647. {
  2648. ahc->platform_data->qfrozen++;
  2649. if (ahc->platform_data->qfrozen == 1) {
  2650. scsi_block_requests(ahc->platform_data->host);
  2651. /* XXX What about Twin channels? */
  2652. ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
  2653. CAM_LUN_WILDCARD, SCB_LIST_NULL,
  2654. ROLE_INITIATOR, CAM_REQUEUE_REQ);
  2655. }
  2656. }
  2657. static void
  2658. ahc_linux_release_simq(u_long arg)
  2659. {
  2660. struct ahc_softc *ahc;
  2661. u_long s;
  2662. int unblock_reqs;
  2663. ahc = (struct ahc_softc *)arg;
  2664. unblock_reqs = 0;
  2665. ahc_lock(ahc, &s);
  2666. if (ahc->platform_data->qfrozen > 0)
  2667. ahc->platform_data->qfrozen--;
  2668. if (ahc->platform_data->qfrozen == 0)
  2669. unblock_reqs = 1;
  2670. ahc_schedule_runq(ahc);
  2671. ahc_unlock(ahc, &s);
  2672. /*
  2673. * There is still a race here. The mid-layer
  2674. * should keep its own freeze count and use
  2675. * a bottom half handler to run the queues
  2676. * so we can unblock with our own lock held.
  2677. */
  2678. if (unblock_reqs)
  2679. scsi_unblock_requests(ahc->platform_data->host);
  2680. }
  2681. static void
  2682. ahc_linux_dev_timed_unfreeze(u_long arg)
  2683. {
  2684. struct ahc_linux_device *dev;
  2685. struct ahc_softc *ahc;
  2686. u_long s;
  2687. dev = (struct ahc_linux_device *)arg;
  2688. ahc = dev->target->ahc;
  2689. ahc_lock(ahc, &s);
  2690. dev->flags &= ~AHC_DEV_TIMER_ACTIVE;
  2691. if (dev->qfrozen > 0)
  2692. dev->qfrozen--;
  2693. if (dev->qfrozen == 0
  2694. && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0)
  2695. ahc_linux_run_device_queue(ahc, dev);
  2696. if (TAILQ_EMPTY(&dev->busyq)
  2697. && dev->active == 0)
  2698. __ahc_linux_free_device(ahc, dev);
  2699. ahc_unlock(ahc, &s);
  2700. }
  2701. static int
  2702. ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
  2703. {
  2704. struct ahc_softc *ahc;
  2705. struct ahc_cmd *acmd;
  2706. struct ahc_cmd *list_acmd;
  2707. struct ahc_linux_device *dev;
  2708. struct scb *pending_scb;
  2709. u_long s;
  2710. u_int saved_scbptr;
  2711. u_int active_scb_index;
  2712. u_int last_phase;
  2713. u_int saved_scsiid;
  2714. u_int cdb_byte;
  2715. int retval;
  2716. int was_paused;
  2717. int paused;
  2718. int wait;
  2719. int disconnected;
  2720. pending_scb = NULL;
  2721. paused = FALSE;
  2722. wait = FALSE;
  2723. ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
  2724. acmd = (struct ahc_cmd *)cmd;
  2725. printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
  2726. ahc_name(ahc), cmd->device->channel,
  2727. cmd->device->id, cmd->device->lun,
  2728. flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
  2729. printf("CDB:");
  2730. for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
  2731. printf(" 0x%x", cmd->cmnd[cdb_byte]);
  2732. printf("\n");
  2733. /*
  2734. * In all versions of Linux, we have to work around
  2735. * a major flaw in how the mid-layer is locked down
  2736. * if we are to sleep successfully in our error handler
  2737. * while allowing our interrupt handler to run. Since
  2738. * the midlayer acquires either the io_request_lock or
  2739. * our lock prior to calling us, we must use the
  2740. * spin_unlock_irq() method for unlocking our lock.
  2741. * This will force interrupts to be enabled on the
  2742. * current CPU. Since the EH thread should not have
  2743. * been running with CPU interrupts disabled other than
  2744. * by acquiring either the io_request_lock or our own
  2745. * lock, this *should* be safe.
  2746. */
  2747. ahc_midlayer_entrypoint_lock(ahc, &s);
  2748. /*
  2749. * First determine if we currently own this command.
  2750. * Start by searching the device queue. If not found
  2751. * there, check the pending_scb list. If not found
  2752. * at all, and the system wanted us to just abort the
  2753. * command, return success.
  2754. */
  2755. dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
  2756. cmd->device->lun, /*alloc*/FALSE);
  2757. if (dev == NULL) {
  2758. /*
  2759. * No target device for this command exists,
  2760. * so we must not still own the command.
  2761. */
  2762. printf("%s:%d:%d:%d: Is not an active device\n",
  2763. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2764. cmd->device->lun);
  2765. retval = SUCCESS;
  2766. goto no_cmd;
  2767. }
  2768. TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
  2769. if (list_acmd == acmd)
  2770. break;
  2771. }
  2772. if (list_acmd != NULL) {
  2773. printf("%s:%d:%d:%d: Command found on device queue\n",
  2774. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2775. cmd->device->lun);
  2776. if (flag == SCB_ABORT) {
  2777. TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
  2778. cmd->result = DID_ABORT << 16;
  2779. ahc_linux_queue_cmd_complete(ahc, cmd);
  2780. retval = SUCCESS;
  2781. goto done;
  2782. }
  2783. }
  2784. if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
  2785. && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
  2786. cmd->device->channel + 'A',
  2787. cmd->device->lun,
  2788. CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
  2789. printf("%s:%d:%d:%d: Command found on untagged queue\n",
  2790. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2791. cmd->device->lun);
  2792. retval = SUCCESS;
  2793. goto done;
  2794. }
  2795. /*
  2796. * See if we can find a matching cmd in the pending list.
  2797. */
  2798. LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
  2799. if (pending_scb->io_ctx == cmd)
  2800. break;
  2801. }
  2802. if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
  2803. /* Any SCB for this device will do for a target reset */
  2804. LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
  2805. if (ahc_match_scb(ahc, pending_scb, cmd->device->id,
  2806. cmd->device->channel + 'A',
  2807. CAM_LUN_WILDCARD,
  2808. SCB_LIST_NULL, ROLE_INITIATOR) == 0)
  2809. break;
  2810. }
  2811. }
  2812. if (pending_scb == NULL) {
  2813. printf("%s:%d:%d:%d: Command not found\n",
  2814. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2815. cmd->device->lun);
  2816. goto no_cmd;
  2817. }
  2818. if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
  2819. /*
  2820. * We can't queue two recovery actions using the same SCB
  2821. */
  2822. retval = FAILED;
  2823. goto done;
  2824. }
  2825. /*
  2826. * Ensure that the card doesn't do anything
  2827. * behind our back and that we didn't "just" miss
  2828. * an interrupt that would affect this cmd.
  2829. */
  2830. was_paused = ahc_is_paused(ahc);
  2831. ahc_pause_and_flushwork(ahc);
  2832. paused = TRUE;
  2833. if ((pending_scb->flags & SCB_ACTIVE) == 0) {
  2834. printf("%s:%d:%d:%d: Command already completed\n",
  2835. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2836. cmd->device->lun);
  2837. goto no_cmd;
  2838. }
  2839. printf("%s: At time of recovery, card was %spaused\n",
  2840. ahc_name(ahc), was_paused ? "" : "not ");
  2841. ahc_dump_card_state(ahc);
  2842. disconnected = TRUE;
  2843. if (flag == SCB_ABORT) {
  2844. if (ahc_search_qinfifo(ahc, cmd->device->id,
  2845. cmd->device->channel + 'A',
  2846. cmd->device->lun,
  2847. pending_scb->hscb->tag,
  2848. ROLE_INITIATOR, CAM_REQ_ABORTED,
  2849. SEARCH_COMPLETE) > 0) {
  2850. printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
  2851. ahc_name(ahc), cmd->device->channel,
  2852. cmd->device->id, cmd->device->lun);
  2853. retval = SUCCESS;
  2854. goto done;
  2855. }
  2856. } else if (ahc_search_qinfifo(ahc, cmd->device->id,
  2857. cmd->device->channel + 'A',
  2858. cmd->device->lun, pending_scb->hscb->tag,
  2859. ROLE_INITIATOR, /*status*/0,
  2860. SEARCH_COUNT) > 0) {
  2861. disconnected = FALSE;
  2862. }
  2863. if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
  2864. struct scb *bus_scb;
  2865. bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
  2866. if (bus_scb == pending_scb)
  2867. disconnected = FALSE;
  2868. else if (flag != SCB_ABORT
  2869. && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
  2870. && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
  2871. disconnected = FALSE;
  2872. }
  2873. /*
  2874. * At this point, pending_scb is the scb associated with the
  2875. * passed in command. That command is currently active on the
  2876. * bus, is in the disconnected state, or we're hoping to find
  2877. * a command for the same target active on the bus to abuse to
  2878. * send a BDR. Queue the appropriate message based on which of
  2879. * these states we are in.
  2880. */
  2881. last_phase = ahc_inb(ahc, LASTPHASE);
  2882. saved_scbptr = ahc_inb(ahc, SCBPTR);
  2883. active_scb_index = ahc_inb(ahc, SCB_TAG);
  2884. saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
  2885. if (last_phase != P_BUSFREE
  2886. && (pending_scb->hscb->tag == active_scb_index
  2887. || (flag == SCB_DEVICE_RESET
  2888. && SCSIID_TARGET(ahc, saved_scsiid) == cmd->device->id))) {
  2889. /*
  2890. * We're active on the bus, so assert ATN
  2891. * and hope that the target responds.
  2892. */
  2893. pending_scb = ahc_lookup_scb(ahc, active_scb_index);
  2894. pending_scb->flags |= SCB_RECOVERY_SCB|flag;
  2895. ahc_outb(ahc, MSG_OUT, HOST_MSG);
  2896. ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
  2897. printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
  2898. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2899. cmd->device->lun);
  2900. wait = TRUE;
  2901. } else if (disconnected) {
  2902. /*
  2903. * Actually re-queue this SCB in an attempt
  2904. * to select the device before it reconnects.
  2905. * In either case (selection or reselection),
  2906. * we will now issue the approprate message
  2907. * to the timed-out device.
  2908. *
  2909. * Set the MK_MESSAGE control bit indicating
  2910. * that we desire to send a message. We
  2911. * also set the disconnected flag since
  2912. * in the paging case there is no guarantee
  2913. * that our SCB control byte matches the
  2914. * version on the card. We don't want the
  2915. * sequencer to abort the command thinking
  2916. * an unsolicited reselection occurred.
  2917. */
  2918. pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
  2919. pending_scb->flags |= SCB_RECOVERY_SCB|flag;
  2920. /*
  2921. * Remove any cached copy of this SCB in the
  2922. * disconnected list in preparation for the
  2923. * queuing of our abort SCB. We use the
  2924. * same element in the SCB, SCB_NEXT, for
  2925. * both the qinfifo and the disconnected list.
  2926. */
  2927. ahc_search_disc_list(ahc, cmd->device->id,
  2928. cmd->device->channel + 'A',
  2929. cmd->device->lun, pending_scb->hscb->tag,
  2930. /*stop_on_first*/TRUE,
  2931. /*remove*/TRUE,
  2932. /*save_state*/FALSE);
  2933. /*
  2934. * In the non-paging case, the sequencer will
  2935. * never re-reference the in-core SCB.
  2936. * To make sure we are notified during
  2937. * reslection, set the MK_MESSAGE flag in
  2938. * the card's copy of the SCB.
  2939. */
  2940. if ((ahc->flags & AHC_PAGESCBS) == 0) {
  2941. ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
  2942. ahc_outb(ahc, SCB_CONTROL,
  2943. ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
  2944. }
  2945. /*
  2946. * Clear out any entries in the QINFIFO first
  2947. * so we are the next SCB for this target
  2948. * to run.
  2949. */
  2950. ahc_search_qinfifo(ahc, cmd->device->id,
  2951. cmd->device->channel + 'A',
  2952. cmd->device->lun, SCB_LIST_NULL,
  2953. ROLE_INITIATOR, CAM_REQUEUE_REQ,
  2954. SEARCH_COMPLETE);
  2955. ahc_qinfifo_requeue_tail(ahc, pending_scb);
  2956. ahc_outb(ahc, SCBPTR, saved_scbptr);
  2957. ahc_print_path(ahc, pending_scb);
  2958. printf("Device is disconnected, re-queuing SCB\n");
  2959. wait = TRUE;
  2960. } else {
  2961. printf("%s:%d:%d:%d: Unable to deliver message\n",
  2962. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  2963. cmd->device->lun);
  2964. retval = FAILED;
  2965. goto done;
  2966. }
  2967. no_cmd:
  2968. /*
  2969. * Our assumption is that if we don't have the command, no
  2970. * recovery action was required, so we return success. Again,
  2971. * the semantics of the mid-layer recovery engine are not
  2972. * well defined, so this may change in time.
  2973. */
  2974. retval = SUCCESS;
  2975. done:
  2976. if (paused)
  2977. ahc_unpause(ahc);
  2978. if (wait) {
  2979. struct timer_list timer;
  2980. int ret;
  2981. ahc->platform_data->flags |= AHC_UP_EH_SEMAPHORE;
  2982. spin_unlock_irq(&ahc->platform_data->spin_lock);
  2983. init_timer(&timer);
  2984. timer.data = (u_long)ahc;
  2985. timer.expires = jiffies + (5 * HZ);
  2986. timer.function = ahc_linux_sem_timeout;
  2987. add_timer(&timer);
  2988. printf("Recovery code sleeping\n");
  2989. down(&ahc->platform_data->eh_sem);
  2990. printf("Recovery code awake\n");
  2991. ret = del_timer_sync(&timer);
  2992. if (ret == 0) {
  2993. printf("Timer Expired\n");
  2994. retval = FAILED;
  2995. }
  2996. spin_lock_irq(&ahc->platform_data->spin_lock);
  2997. }
  2998. ahc_schedule_runq(ahc);
  2999. ahc_linux_run_complete_queue(ahc);
  3000. ahc_midlayer_entrypoint_unlock(ahc, &s);
  3001. return (retval);
  3002. }
  3003. void
  3004. ahc_platform_dump_card_state(struct ahc_softc *ahc)
  3005. {
  3006. struct ahc_linux_device *dev;
  3007. int channel;
  3008. int maxchannel;
  3009. int target;
  3010. int maxtarget;
  3011. int lun;
  3012. int i;
  3013. maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0;
  3014. maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7;
  3015. for (channel = 0; channel <= maxchannel; channel++) {
  3016. for (target = 0; target <=maxtarget; target++) {
  3017. for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
  3018. struct ahc_cmd *acmd;
  3019. dev = ahc_linux_get_device(ahc, channel, target,
  3020. lun, /*alloc*/FALSE);
  3021. if (dev == NULL)
  3022. continue;
  3023. printf("DevQ(%d:%d:%d): ",
  3024. channel, target, lun);
  3025. i = 0;
  3026. TAILQ_FOREACH(acmd, &dev->busyq,
  3027. acmd_links.tqe) {
  3028. if (i++ > AHC_SCB_MAX)
  3029. break;
  3030. }
  3031. printf("%d waiting\n", i);
  3032. }
  3033. }
  3034. }
  3035. }
  3036. static void ahc_linux_exit(void);
  3037. static void ahc_linux_get_period(struct scsi_target *starget)
  3038. {
  3039. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3040. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3041. struct ahc_tmode_tstate *tstate;
  3042. struct ahc_initiator_tinfo *tinfo
  3043. = ahc_fetch_transinfo(ahc,
  3044. starget->channel + 'A',
  3045. shost->this_id, starget->id, &tstate);
  3046. spi_period(starget) = tinfo->curr.period;
  3047. }
  3048. static void ahc_linux_set_period(struct scsi_target *starget, int period)
  3049. {
  3050. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3051. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3052. struct ahc_tmode_tstate *tstate;
  3053. struct ahc_initiator_tinfo *tinfo
  3054. = ahc_fetch_transinfo(ahc,
  3055. starget->channel + 'A',
  3056. shost->this_id, starget->id, &tstate);
  3057. struct ahc_devinfo devinfo;
  3058. unsigned int ppr_options = tinfo->curr.ppr_options;
  3059. unsigned long flags;
  3060. unsigned long offset = tinfo->curr.offset;
  3061. struct ahc_syncrate *syncrate;
  3062. if (offset == 0)
  3063. offset = MAX_OFFSET;
  3064. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  3065. starget->channel + 'A', ROLE_INITIATOR);
  3066. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
  3067. ahc_lock(ahc, &flags);
  3068. ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
  3069. ppr_options, AHC_TRANS_GOAL, FALSE);
  3070. ahc_unlock(ahc, &flags);
  3071. }
  3072. static void ahc_linux_get_offset(struct scsi_target *starget)
  3073. {
  3074. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3075. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3076. struct ahc_tmode_tstate *tstate;
  3077. struct ahc_initiator_tinfo *tinfo
  3078. = ahc_fetch_transinfo(ahc,
  3079. starget->channel + 'A',
  3080. shost->this_id, starget->id, &tstate);
  3081. spi_offset(starget) = tinfo->curr.offset;
  3082. }
  3083. static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
  3084. {
  3085. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3086. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3087. struct ahc_tmode_tstate *tstate;
  3088. struct ahc_initiator_tinfo *tinfo
  3089. = ahc_fetch_transinfo(ahc,
  3090. starget->channel + 'A',
  3091. shost->this_id, starget->id, &tstate);
  3092. struct ahc_devinfo devinfo;
  3093. unsigned int ppr_options = 0;
  3094. unsigned int period = 0;
  3095. unsigned long flags;
  3096. struct ahc_syncrate *syncrate = NULL;
  3097. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  3098. starget->channel + 'A', ROLE_INITIATOR);
  3099. if (offset != 0) {
  3100. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
  3101. period = tinfo->curr.period;
  3102. ppr_options = tinfo->curr.ppr_options;
  3103. }
  3104. ahc_lock(ahc, &flags);
  3105. ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
  3106. ppr_options, AHC_TRANS_GOAL, FALSE);
  3107. ahc_unlock(ahc, &flags);
  3108. }
  3109. static void ahc_linux_get_width(struct scsi_target *starget)
  3110. {
  3111. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3112. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3113. struct ahc_tmode_tstate *tstate;
  3114. struct ahc_initiator_tinfo *tinfo
  3115. = ahc_fetch_transinfo(ahc,
  3116. starget->channel + 'A',
  3117. shost->this_id, starget->id, &tstate);
  3118. spi_width(starget) = tinfo->curr.width;
  3119. }
  3120. static void ahc_linux_set_width(struct scsi_target *starget, int width)
  3121. {
  3122. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3123. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3124. struct ahc_devinfo devinfo;
  3125. unsigned long flags;
  3126. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  3127. starget->channel + 'A', ROLE_INITIATOR);
  3128. ahc_lock(ahc, &flags);
  3129. ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
  3130. ahc_unlock(ahc, &flags);
  3131. }
  3132. static void ahc_linux_get_dt(struct scsi_target *starget)
  3133. {
  3134. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3135. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3136. struct ahc_tmode_tstate *tstate;
  3137. struct ahc_initiator_tinfo *tinfo
  3138. = ahc_fetch_transinfo(ahc,
  3139. starget->channel + 'A',
  3140. shost->this_id, starget->id, &tstate);
  3141. spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ;
  3142. }
  3143. static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
  3144. {
  3145. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3146. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3147. struct ahc_tmode_tstate *tstate;
  3148. struct ahc_initiator_tinfo *tinfo
  3149. = ahc_fetch_transinfo(ahc,
  3150. starget->channel + 'A',
  3151. shost->this_id, starget->id, &tstate);
  3152. struct ahc_devinfo devinfo;
  3153. unsigned int ppr_options = tinfo->curr.ppr_options
  3154. & ~MSG_EXT_PPR_DT_REQ;
  3155. unsigned int period = tinfo->curr.period;
  3156. unsigned long flags;
  3157. struct ahc_syncrate *syncrate;
  3158. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  3159. starget->channel + 'A', ROLE_INITIATOR);
  3160. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  3161. dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
  3162. ahc_lock(ahc, &flags);
  3163. ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
  3164. ppr_options, AHC_TRANS_GOAL, FALSE);
  3165. ahc_unlock(ahc, &flags);
  3166. }
  3167. static void ahc_linux_get_qas(struct scsi_target *starget)
  3168. {
  3169. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3170. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3171. struct ahc_tmode_tstate *tstate;
  3172. struct ahc_initiator_tinfo *tinfo
  3173. = ahc_fetch_transinfo(ahc,
  3174. starget->channel + 'A',
  3175. shost->this_id, starget->id, &tstate);
  3176. spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ;
  3177. }
  3178. static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
  3179. {
  3180. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3181. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3182. struct ahc_tmode_tstate *tstate;
  3183. struct ahc_initiator_tinfo *tinfo
  3184. = ahc_fetch_transinfo(ahc,
  3185. starget->channel + 'A',
  3186. shost->this_id, starget->id, &tstate);
  3187. struct ahc_devinfo devinfo;
  3188. unsigned int ppr_options = tinfo->curr.ppr_options
  3189. & ~MSG_EXT_PPR_QAS_REQ;
  3190. unsigned int period = tinfo->curr.period;
  3191. unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
  3192. unsigned long flags;
  3193. struct ahc_syncrate *syncrate;
  3194. if (qas)
  3195. ppr_options |= MSG_EXT_PPR_QAS_REQ;
  3196. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  3197. starget->channel + 'A', ROLE_INITIATOR);
  3198. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  3199. dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
  3200. ahc_lock(ahc, &flags);
  3201. ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
  3202. ppr_options, AHC_TRANS_GOAL, FALSE);
  3203. ahc_unlock(ahc, &flags);
  3204. }
  3205. static void ahc_linux_get_iu(struct scsi_target *starget)
  3206. {
  3207. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3208. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3209. struct ahc_tmode_tstate *tstate;
  3210. struct ahc_initiator_tinfo *tinfo
  3211. = ahc_fetch_transinfo(ahc,
  3212. starget->channel + 'A',
  3213. shost->this_id, starget->id, &tstate);
  3214. spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ;
  3215. }
  3216. static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
  3217. {
  3218. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  3219. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  3220. struct ahc_tmode_tstate *tstate;
  3221. struct ahc_initiator_tinfo *tinfo
  3222. = ahc_fetch_transinfo(ahc,
  3223. starget->channel + 'A',
  3224. shost->this_id, starget->id, &tstate);
  3225. struct ahc_devinfo devinfo;
  3226. unsigned int ppr_options = tinfo->curr.ppr_options
  3227. & ~MSG_EXT_PPR_IU_REQ;
  3228. unsigned int period = tinfo->curr.period;
  3229. unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
  3230. unsigned long flags;
  3231. struct ahc_syncrate *syncrate;
  3232. if (iu)
  3233. ppr_options |= MSG_EXT_PPR_IU_REQ;
  3234. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  3235. starget->channel + 'A', ROLE_INITIATOR);
  3236. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  3237. dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
  3238. ahc_lock(ahc, &flags);
  3239. ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
  3240. ppr_options, AHC_TRANS_GOAL, FALSE);
  3241. ahc_unlock(ahc, &flags);
  3242. }
  3243. static struct spi_function_template ahc_linux_transport_functions = {
  3244. .get_offset = ahc_linux_get_offset,
  3245. .set_offset = ahc_linux_set_offset,
  3246. .show_offset = 1,
  3247. .get_period = ahc_linux_get_period,
  3248. .set_period = ahc_linux_set_period,
  3249. .show_period = 1,
  3250. .get_width = ahc_linux_get_width,
  3251. .set_width = ahc_linux_set_width,
  3252. .show_width = 1,
  3253. .get_dt = ahc_linux_get_dt,
  3254. .set_dt = ahc_linux_set_dt,
  3255. .show_dt = 1,
  3256. .get_iu = ahc_linux_get_iu,
  3257. .set_iu = ahc_linux_set_iu,
  3258. .show_iu = 1,
  3259. .get_qas = ahc_linux_get_qas,
  3260. .set_qas = ahc_linux_set_qas,
  3261. .show_qas = 1,
  3262. };
  3263. static int __init
  3264. ahc_linux_init(void)
  3265. {
  3266. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  3267. ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions);
  3268. if (!ahc_linux_transport_template)
  3269. return -ENODEV;
  3270. if (ahc_linux_detect(&aic7xxx_driver_template))
  3271. return 0;
  3272. spi_release_transport(ahc_linux_transport_template);
  3273. ahc_linux_exit();
  3274. return -ENODEV;
  3275. #else
  3276. scsi_register_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
  3277. if (aic7xxx_driver_template.present == 0) {
  3278. scsi_unregister_module(MODULE_SCSI_HA,
  3279. &aic7xxx_driver_template);
  3280. return (-ENODEV);
  3281. }
  3282. return (0);
  3283. #endif
  3284. }
  3285. static void
  3286. ahc_linux_exit(void)
  3287. {
  3288. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  3289. /*
  3290. * In 2.4 we have to unregister from the PCI core _after_
  3291. * unregistering from the scsi midlayer to avoid dangling
  3292. * references.
  3293. */
  3294. scsi_unregister_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
  3295. #endif
  3296. ahc_linux_pci_exit();
  3297. ahc_linux_eisa_exit();
  3298. spi_release_transport(ahc_linux_transport_template);
  3299. }
  3300. module_init(ahc_linux_init);
  3301. module_exit(ahc_linux_exit);