qla_os.c 147 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/moduleparam.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. #include <linux/kthread.h>
  12. #include <linux/mutex.h>
  13. #include <linux/kobject.h>
  14. #include <linux/slab.h>
  15. #include <scsi/scsi_tcq.h>
  16. #include <scsi/scsicam.h>
  17. #include <scsi/scsi_transport.h>
  18. #include <scsi/scsi_transport_fc.h>
  19. #include "qla_target.h"
  20. /*
  21. * Driver version
  22. */
  23. char qla2x00_version_str[40];
  24. static int apidev_major;
  25. /*
  26. * SRB allocation cache
  27. */
  28. static struct kmem_cache *srb_cachep;
  29. /*
  30. * CT6 CTX allocation cache
  31. */
  32. static struct kmem_cache *ctx_cachep;
  33. /*
  34. * error level for logging
  35. */
  36. int ql_errlev = ql_log_all;
  37. static int ql2xenableclass2;
  38. module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
  39. MODULE_PARM_DESC(ql2xenableclass2,
  40. "Specify if Class 2 operations are supported from the very "
  41. "beginning. Default is 0 - class 2 not supported.");
  42. int ql2xlogintimeout = 20;
  43. module_param(ql2xlogintimeout, int, S_IRUGO);
  44. MODULE_PARM_DESC(ql2xlogintimeout,
  45. "Login timeout value in seconds.");
  46. int qlport_down_retry;
  47. module_param(qlport_down_retry, int, S_IRUGO);
  48. MODULE_PARM_DESC(qlport_down_retry,
  49. "Maximum number of command retries to a port that returns "
  50. "a PORT-DOWN status.");
  51. int ql2xplogiabsentdevice;
  52. module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  53. MODULE_PARM_DESC(ql2xplogiabsentdevice,
  54. "Option to enable PLOGI to devices that are not present after "
  55. "a Fabric scan. This is needed for several broken switches. "
  56. "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
  57. int ql2xloginretrycount = 0;
  58. module_param(ql2xloginretrycount, int, S_IRUGO);
  59. MODULE_PARM_DESC(ql2xloginretrycount,
  60. "Specify an alternate value for the NVRAM login retry count.");
  61. int ql2xallocfwdump = 1;
  62. module_param(ql2xallocfwdump, int, S_IRUGO);
  63. MODULE_PARM_DESC(ql2xallocfwdump,
  64. "Option to enable allocation of memory for a firmware dump "
  65. "during HBA initialization. Memory allocation requirements "
  66. "vary by ISP type. Default is 1 - allocate memory.");
  67. int ql2xextended_error_logging;
  68. module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  69. MODULE_PARM_DESC(ql2xextended_error_logging,
  70. "Option to enable extended error logging,\n"
  71. "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
  72. "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
  73. "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
  74. "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
  75. "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
  76. "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
  77. "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
  78. "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
  79. "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
  80. "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
  81. "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
  82. "\t\t0x1e400000 - Preferred value for capturing essential "
  83. "debug information (equivalent to old "
  84. "ql2xextended_error_logging=1).\n"
  85. "\t\tDo LOGICAL OR of the value to enable more than one level");
  86. int ql2xshiftctondsd = 6;
  87. module_param(ql2xshiftctondsd, int, S_IRUGO);
  88. MODULE_PARM_DESC(ql2xshiftctondsd,
  89. "Set to control shifting of command type processing "
  90. "based on total number of SG elements.");
  91. static void qla2x00_free_device(scsi_qla_host_t *);
  92. int ql2xfdmienable=1;
  93. module_param(ql2xfdmienable, int, S_IRUGO);
  94. MODULE_PARM_DESC(ql2xfdmienable,
  95. "Enables FDMI registrations. "
  96. "0 - no FDMI. Default is 1 - perform FDMI.");
  97. int ql2xmaxqdepth = MAX_Q_DEPTH;
  98. module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
  99. MODULE_PARM_DESC(ql2xmaxqdepth,
  100. "Maximum queue depth to set for each LUN. "
  101. "Default is 32.");
  102. int ql2xenabledif = 2;
  103. module_param(ql2xenabledif, int, S_IRUGO);
  104. MODULE_PARM_DESC(ql2xenabledif,
  105. " Enable T10-CRC-DIF "
  106. " Default is 0 - No DIF Support. 1 - Enable it"
  107. ", 2 - Enable DIF for all types, except Type 0.");
  108. int ql2xenablehba_err_chk = 2;
  109. module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
  110. MODULE_PARM_DESC(ql2xenablehba_err_chk,
  111. " Enable T10-CRC-DIF Error isolation by HBA:\n"
  112. " Default is 1.\n"
  113. " 0 -- Error isolation disabled\n"
  114. " 1 -- Error isolation enabled only for DIX Type 0\n"
  115. " 2 -- Error isolation enabled for all Types\n");
  116. int ql2xiidmaenable=1;
  117. module_param(ql2xiidmaenable, int, S_IRUGO);
  118. MODULE_PARM_DESC(ql2xiidmaenable,
  119. "Enables iIDMA settings "
  120. "Default is 1 - perform iIDMA. 0 - no iIDMA.");
  121. int ql2xmaxqueues = 1;
  122. module_param(ql2xmaxqueues, int, S_IRUGO);
  123. MODULE_PARM_DESC(ql2xmaxqueues,
  124. "Enables MQ settings "
  125. "Default is 1 for single queue. Set it to number "
  126. "of queues in MQ mode.");
  127. int ql2xmultique_tag;
  128. module_param(ql2xmultique_tag, int, S_IRUGO);
  129. MODULE_PARM_DESC(ql2xmultique_tag,
  130. "Enables CPU affinity settings for the driver "
  131. "Default is 0 for no affinity of request and response IO. "
  132. "Set it to 1 to turn on the cpu affinity.");
  133. int ql2xfwloadbin;
  134. module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
  135. MODULE_PARM_DESC(ql2xfwloadbin,
  136. "Option to specify location from which to load ISP firmware:.\n"
  137. " 2 -- load firmware via the request_firmware() (hotplug).\n"
  138. " interface.\n"
  139. " 1 -- load firmware from flash.\n"
  140. " 0 -- use default semantics.\n");
  141. int ql2xetsenable;
  142. module_param(ql2xetsenable, int, S_IRUGO);
  143. MODULE_PARM_DESC(ql2xetsenable,
  144. "Enables firmware ETS burst."
  145. "Default is 0 - skip ETS enablement.");
  146. int ql2xdbwr = 1;
  147. module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
  148. MODULE_PARM_DESC(ql2xdbwr,
  149. "Option to specify scheme for request queue posting.\n"
  150. " 0 -- Regular doorbell.\n"
  151. " 1 -- CAMRAM doorbell (faster).\n");
  152. int ql2xtargetreset = 1;
  153. module_param(ql2xtargetreset, int, S_IRUGO);
  154. MODULE_PARM_DESC(ql2xtargetreset,
  155. "Enable target reset."
  156. "Default is 1 - use hw defaults.");
  157. int ql2xgffidenable;
  158. module_param(ql2xgffidenable, int, S_IRUGO);
  159. MODULE_PARM_DESC(ql2xgffidenable,
  160. "Enables GFF_ID checks of port type. "
  161. "Default is 0 - Do not use GFF_ID information.");
  162. int ql2xasynctmfenable;
  163. module_param(ql2xasynctmfenable, int, S_IRUGO);
  164. MODULE_PARM_DESC(ql2xasynctmfenable,
  165. "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
  166. "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
  167. int ql2xdontresethba;
  168. module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
  169. MODULE_PARM_DESC(ql2xdontresethba,
  170. "Option to specify reset behaviour.\n"
  171. " 0 (Default) -- Reset on failure.\n"
  172. " 1 -- Do not reset on failure.\n");
  173. uint ql2xmaxlun = MAX_LUNS;
  174. module_param(ql2xmaxlun, uint, S_IRUGO);
  175. MODULE_PARM_DESC(ql2xmaxlun,
  176. "Defines the maximum LU number to register with the SCSI "
  177. "midlayer. Default is 65535.");
  178. int ql2xmdcapmask = 0x1F;
  179. module_param(ql2xmdcapmask, int, S_IRUGO);
  180. MODULE_PARM_DESC(ql2xmdcapmask,
  181. "Set the Minidump driver capture mask level. "
  182. "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
  183. int ql2xmdenable = 1;
  184. module_param(ql2xmdenable, int, S_IRUGO);
  185. MODULE_PARM_DESC(ql2xmdenable,
  186. "Enable/disable MiniDump. "
  187. "0 - MiniDump disabled. "
  188. "1 (Default) - MiniDump enabled.");
  189. /*
  190. * SCSI host template entry points
  191. */
  192. static int qla2xxx_slave_configure(struct scsi_device * device);
  193. static int qla2xxx_slave_alloc(struct scsi_device *);
  194. static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
  195. static void qla2xxx_scan_start(struct Scsi_Host *);
  196. static void qla2xxx_slave_destroy(struct scsi_device *);
  197. static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  198. static int qla2xxx_eh_abort(struct scsi_cmnd *);
  199. static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
  200. static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
  201. static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
  202. static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
  203. static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
  204. static int qla2x00_change_queue_type(struct scsi_device *, int);
  205. struct scsi_host_template qla2xxx_driver_template = {
  206. .module = THIS_MODULE,
  207. .name = QLA2XXX_DRIVER_NAME,
  208. .queuecommand = qla2xxx_queuecommand,
  209. .eh_abort_handler = qla2xxx_eh_abort,
  210. .eh_device_reset_handler = qla2xxx_eh_device_reset,
  211. .eh_target_reset_handler = qla2xxx_eh_target_reset,
  212. .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
  213. .eh_host_reset_handler = qla2xxx_eh_host_reset,
  214. .slave_configure = qla2xxx_slave_configure,
  215. .slave_alloc = qla2xxx_slave_alloc,
  216. .slave_destroy = qla2xxx_slave_destroy,
  217. .scan_finished = qla2xxx_scan_finished,
  218. .scan_start = qla2xxx_scan_start,
  219. .change_queue_depth = qla2x00_change_queue_depth,
  220. .change_queue_type = qla2x00_change_queue_type,
  221. .this_id = -1,
  222. .cmd_per_lun = 3,
  223. .use_clustering = ENABLE_CLUSTERING,
  224. .sg_tablesize = SG_ALL,
  225. .max_sectors = 0xFFFF,
  226. .shost_attrs = qla2x00_host_attrs,
  227. .supported_mode = MODE_INITIATOR,
  228. };
  229. static struct scsi_transport_template *qla2xxx_transport_template = NULL;
  230. struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
  231. /* TODO Convert to inlines
  232. *
  233. * Timer routines
  234. */
  235. __inline__ void
  236. qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
  237. {
  238. init_timer(&vha->timer);
  239. vha->timer.expires = jiffies + interval * HZ;
  240. vha->timer.data = (unsigned long)vha;
  241. vha->timer.function = (void (*)(unsigned long))func;
  242. add_timer(&vha->timer);
  243. vha->timer_active = 1;
  244. }
  245. static inline void
  246. qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
  247. {
  248. /* Currently used for 82XX only. */
  249. if (vha->device_flags & DFLG_DEV_FAILED) {
  250. ql_dbg(ql_dbg_timer, vha, 0x600d,
  251. "Device in a failed state, returning.\n");
  252. return;
  253. }
  254. mod_timer(&vha->timer, jiffies + interval * HZ);
  255. }
  256. static __inline__ void
  257. qla2x00_stop_timer(scsi_qla_host_t *vha)
  258. {
  259. del_timer_sync(&vha->timer);
  260. vha->timer_active = 0;
  261. }
  262. static int qla2x00_do_dpc(void *data);
  263. static void qla2x00_rst_aen(scsi_qla_host_t *);
  264. static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
  265. struct req_que **, struct rsp_que **);
  266. static void qla2x00_free_fw_dump(struct qla_hw_data *);
  267. static void qla2x00_mem_free(struct qla_hw_data *);
  268. /* -------------------------------------------------------------------------- */
  269. static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
  270. struct rsp_que *rsp)
  271. {
  272. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  273. ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
  274. GFP_KERNEL);
  275. if (!ha->req_q_map) {
  276. ql_log(ql_log_fatal, vha, 0x003b,
  277. "Unable to allocate memory for request queue ptrs.\n");
  278. goto fail_req_map;
  279. }
  280. ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
  281. GFP_KERNEL);
  282. if (!ha->rsp_q_map) {
  283. ql_log(ql_log_fatal, vha, 0x003c,
  284. "Unable to allocate memory for response queue ptrs.\n");
  285. goto fail_rsp_map;
  286. }
  287. /*
  288. * Make sure we record at least the request and response queue zero in
  289. * case we need to free them if part of the probe fails.
  290. */
  291. ha->rsp_q_map[0] = rsp;
  292. ha->req_q_map[0] = req;
  293. set_bit(0, ha->rsp_qid_map);
  294. set_bit(0, ha->req_qid_map);
  295. return 1;
  296. fail_rsp_map:
  297. kfree(ha->req_q_map);
  298. ha->req_q_map = NULL;
  299. fail_req_map:
  300. return -ENOMEM;
  301. }
  302. static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
  303. {
  304. if (req && req->ring)
  305. dma_free_coherent(&ha->pdev->dev,
  306. (req->length + 1) * sizeof(request_t),
  307. req->ring, req->dma);
  308. if (req)
  309. kfree(req->outstanding_cmds);
  310. kfree(req);
  311. req = NULL;
  312. }
  313. static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
  314. {
  315. if (rsp && rsp->ring)
  316. dma_free_coherent(&ha->pdev->dev,
  317. (rsp->length + 1) * sizeof(response_t),
  318. rsp->ring, rsp->dma);
  319. kfree(rsp);
  320. rsp = NULL;
  321. }
  322. static void qla2x00_free_queues(struct qla_hw_data *ha)
  323. {
  324. struct req_que *req;
  325. struct rsp_que *rsp;
  326. int cnt;
  327. for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
  328. req = ha->req_q_map[cnt];
  329. qla2x00_free_req_que(ha, req);
  330. }
  331. kfree(ha->req_q_map);
  332. ha->req_q_map = NULL;
  333. for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
  334. rsp = ha->rsp_q_map[cnt];
  335. qla2x00_free_rsp_que(ha, rsp);
  336. }
  337. kfree(ha->rsp_q_map);
  338. ha->rsp_q_map = NULL;
  339. }
  340. static int qla25xx_setup_mode(struct scsi_qla_host *vha)
  341. {
  342. uint16_t options = 0;
  343. int ques, req, ret;
  344. struct qla_hw_data *ha = vha->hw;
  345. if (!(ha->fw_attributes & BIT_6)) {
  346. ql_log(ql_log_warn, vha, 0x00d8,
  347. "Firmware is not multi-queue capable.\n");
  348. goto fail;
  349. }
  350. if (ql2xmultique_tag) {
  351. /* create a request queue for IO */
  352. options |= BIT_7;
  353. req = qla25xx_create_req_que(ha, options, 0, 0, -1,
  354. QLA_DEFAULT_QUE_QOS);
  355. if (!req) {
  356. ql_log(ql_log_warn, vha, 0x00e0,
  357. "Failed to create request queue.\n");
  358. goto fail;
  359. }
  360. ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
  361. vha->req = ha->req_q_map[req];
  362. options |= BIT_1;
  363. for (ques = 1; ques < ha->max_rsp_queues; ques++) {
  364. ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
  365. if (!ret) {
  366. ql_log(ql_log_warn, vha, 0x00e8,
  367. "Failed to create response queue.\n");
  368. goto fail2;
  369. }
  370. }
  371. ha->flags.cpu_affinity_enabled = 1;
  372. ql_dbg(ql_dbg_multiq, vha, 0xc007,
  373. "CPU affinity mode enalbed, "
  374. "no. of response queues:%d no. of request queues:%d.\n",
  375. ha->max_rsp_queues, ha->max_req_queues);
  376. ql_dbg(ql_dbg_init, vha, 0x00e9,
  377. "CPU affinity mode enalbed, "
  378. "no. of response queues:%d no. of request queues:%d.\n",
  379. ha->max_rsp_queues, ha->max_req_queues);
  380. }
  381. return 0;
  382. fail2:
  383. qla25xx_delete_queues(vha);
  384. destroy_workqueue(ha->wq);
  385. ha->wq = NULL;
  386. vha->req = ha->req_q_map[0];
  387. fail:
  388. ha->mqenable = 0;
  389. kfree(ha->req_q_map);
  390. kfree(ha->rsp_q_map);
  391. ha->max_req_queues = ha->max_rsp_queues = 1;
  392. return 1;
  393. }
  394. static char *
  395. qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
  396. {
  397. struct qla_hw_data *ha = vha->hw;
  398. static char *pci_bus_modes[] = {
  399. "33", "66", "100", "133",
  400. };
  401. uint16_t pci_bus;
  402. strcpy(str, "PCI");
  403. pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
  404. if (pci_bus) {
  405. strcat(str, "-X (");
  406. strcat(str, pci_bus_modes[pci_bus]);
  407. } else {
  408. pci_bus = (ha->pci_attr & BIT_8) >> 8;
  409. strcat(str, " (");
  410. strcat(str, pci_bus_modes[pci_bus]);
  411. }
  412. strcat(str, " MHz)");
  413. return (str);
  414. }
  415. static char *
  416. qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
  417. {
  418. static char *pci_bus_modes[] = { "33", "66", "100", "133", };
  419. struct qla_hw_data *ha = vha->hw;
  420. uint32_t pci_bus;
  421. int pcie_reg;
  422. pcie_reg = pci_pcie_cap(ha->pdev);
  423. if (pcie_reg) {
  424. char lwstr[6];
  425. uint16_t pcie_lstat, lspeed, lwidth;
  426. pcie_reg += PCI_EXP_LNKCAP;
  427. pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
  428. lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
  429. lwidth = (pcie_lstat &
  430. (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
  431. strcpy(str, "PCIe (");
  432. switch (lspeed) {
  433. case 1:
  434. strcat(str, "2.5GT/s ");
  435. break;
  436. case 2:
  437. strcat(str, "5.0GT/s ");
  438. break;
  439. case 3:
  440. strcat(str, "8.0GT/s ");
  441. break;
  442. default:
  443. strcat(str, "<unknown> ");
  444. break;
  445. }
  446. snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
  447. strcat(str, lwstr);
  448. return str;
  449. }
  450. strcpy(str, "PCI");
  451. pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
  452. if (pci_bus == 0 || pci_bus == 8) {
  453. strcat(str, " (");
  454. strcat(str, pci_bus_modes[pci_bus >> 3]);
  455. } else {
  456. strcat(str, "-X ");
  457. if (pci_bus & BIT_2)
  458. strcat(str, "Mode 2");
  459. else
  460. strcat(str, "Mode 1");
  461. strcat(str, " (");
  462. strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
  463. }
  464. strcat(str, " MHz)");
  465. return str;
  466. }
  467. static char *
  468. qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
  469. {
  470. char un_str[10];
  471. struct qla_hw_data *ha = vha->hw;
  472. sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
  473. ha->fw_minor_version,
  474. ha->fw_subminor_version);
  475. if (ha->fw_attributes & BIT_9) {
  476. strcat(str, "FLX");
  477. return (str);
  478. }
  479. switch (ha->fw_attributes & 0xFF) {
  480. case 0x7:
  481. strcat(str, "EF");
  482. break;
  483. case 0x17:
  484. strcat(str, "TP");
  485. break;
  486. case 0x37:
  487. strcat(str, "IP");
  488. break;
  489. case 0x77:
  490. strcat(str, "VI");
  491. break;
  492. default:
  493. sprintf(un_str, "(%x)", ha->fw_attributes);
  494. strcat(str, un_str);
  495. break;
  496. }
  497. if (ha->fw_attributes & 0x100)
  498. strcat(str, "X");
  499. return (str);
  500. }
  501. static char *
  502. qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
  503. {
  504. struct qla_hw_data *ha = vha->hw;
  505. sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
  506. ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
  507. return str;
  508. }
  509. void
  510. qla2x00_sp_free_dma(void *vha, void *ptr)
  511. {
  512. srb_t *sp = (srb_t *)ptr;
  513. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  514. struct qla_hw_data *ha = sp->fcport->vha->hw;
  515. void *ctx = GET_CMD_CTX_SP(sp);
  516. if (sp->flags & SRB_DMA_VALID) {
  517. scsi_dma_unmap(cmd);
  518. sp->flags &= ~SRB_DMA_VALID;
  519. }
  520. if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
  521. dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
  522. scsi_prot_sg_count(cmd), cmd->sc_data_direction);
  523. sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
  524. }
  525. if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
  526. /* List assured to be having elements */
  527. qla2x00_clean_dsd_pool(ha, sp);
  528. sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
  529. }
  530. if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
  531. dma_pool_free(ha->dl_dma_pool, ctx,
  532. ((struct crc_context *)ctx)->crc_ctx_dma);
  533. sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
  534. }
  535. if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
  536. struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
  537. dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
  538. ctx1->fcp_cmnd_dma);
  539. list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
  540. ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
  541. ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
  542. mempool_free(ctx1, ha->ctx_mempool);
  543. ctx1 = NULL;
  544. }
  545. CMD_SP(cmd) = NULL;
  546. qla2x00_rel_sp(sp->fcport->vha, sp);
  547. }
  548. static void
  549. qla2x00_sp_compl(void *data, void *ptr, int res)
  550. {
  551. struct qla_hw_data *ha = (struct qla_hw_data *)data;
  552. srb_t *sp = (srb_t *)ptr;
  553. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  554. cmd->result = res;
  555. if (atomic_read(&sp->ref_count) == 0) {
  556. ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
  557. "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
  558. sp, GET_CMD_SP(sp));
  559. if (ql2xextended_error_logging & ql_dbg_io)
  560. BUG();
  561. return;
  562. }
  563. if (!atomic_dec_and_test(&sp->ref_count))
  564. return;
  565. qla2x00_sp_free_dma(ha, sp);
  566. cmd->scsi_done(cmd);
  567. }
  568. static int
  569. qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  570. {
  571. scsi_qla_host_t *vha = shost_priv(host);
  572. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  573. struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
  574. struct qla_hw_data *ha = vha->hw;
  575. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  576. srb_t *sp;
  577. int rval;
  578. if (ha->flags.eeh_busy) {
  579. if (ha->flags.pci_channel_io_perm_failure) {
  580. ql_dbg(ql_dbg_aer, vha, 0x9010,
  581. "PCI Channel IO permanent failure, exiting "
  582. "cmd=%p.\n", cmd);
  583. cmd->result = DID_NO_CONNECT << 16;
  584. } else {
  585. ql_dbg(ql_dbg_aer, vha, 0x9011,
  586. "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
  587. cmd->result = DID_REQUEUE << 16;
  588. }
  589. goto qc24_fail_command;
  590. }
  591. rval = fc_remote_port_chkready(rport);
  592. if (rval) {
  593. cmd->result = rval;
  594. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
  595. "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
  596. cmd, rval);
  597. goto qc24_fail_command;
  598. }
  599. if (!vha->flags.difdix_supported &&
  600. scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
  601. ql_dbg(ql_dbg_io, vha, 0x3004,
  602. "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
  603. cmd);
  604. cmd->result = DID_NO_CONNECT << 16;
  605. goto qc24_fail_command;
  606. }
  607. if (!fcport) {
  608. cmd->result = DID_NO_CONNECT << 16;
  609. goto qc24_fail_command;
  610. }
  611. if (atomic_read(&fcport->state) != FCS_ONLINE) {
  612. if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
  613. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  614. ql_dbg(ql_dbg_io, vha, 0x3005,
  615. "Returning DNC, fcport_state=%d loop_state=%d.\n",
  616. atomic_read(&fcport->state),
  617. atomic_read(&base_vha->loop_state));
  618. cmd->result = DID_NO_CONNECT << 16;
  619. goto qc24_fail_command;
  620. }
  621. goto qc24_target_busy;
  622. }
  623. sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
  624. if (!sp) {
  625. set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
  626. goto qc24_host_busy;
  627. }
  628. sp->u.scmd.cmd = cmd;
  629. sp->type = SRB_SCSI_CMD;
  630. atomic_set(&sp->ref_count, 1);
  631. CMD_SP(cmd) = (void *)sp;
  632. sp->free = qla2x00_sp_free_dma;
  633. sp->done = qla2x00_sp_compl;
  634. rval = ha->isp_ops->start_scsi(sp);
  635. if (rval != QLA_SUCCESS) {
  636. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
  637. "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
  638. set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
  639. goto qc24_host_busy_free_sp;
  640. }
  641. return 0;
  642. qc24_host_busy_free_sp:
  643. qla2x00_sp_free_dma(ha, sp);
  644. qc24_host_busy:
  645. return SCSI_MLQUEUE_HOST_BUSY;
  646. qc24_target_busy:
  647. return SCSI_MLQUEUE_TARGET_BUSY;
  648. qc24_fail_command:
  649. cmd->scsi_done(cmd);
  650. return 0;
  651. }
  652. /*
  653. * qla2x00_eh_wait_on_command
  654. * Waits for the command to be returned by the Firmware for some
  655. * max time.
  656. *
  657. * Input:
  658. * cmd = Scsi Command to wait on.
  659. *
  660. * Return:
  661. * Not Found : 0
  662. * Found : 1
  663. */
  664. static int
  665. qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
  666. {
  667. #define ABORT_POLLING_PERIOD 1000
  668. #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
  669. unsigned long wait_iter = ABORT_WAIT_ITER;
  670. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  671. struct qla_hw_data *ha = vha->hw;
  672. int ret = QLA_SUCCESS;
  673. if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
  674. ql_dbg(ql_dbg_taskm, vha, 0x8005,
  675. "Return:eh_wait.\n");
  676. return ret;
  677. }
  678. while (CMD_SP(cmd) && wait_iter--) {
  679. msleep(ABORT_POLLING_PERIOD);
  680. }
  681. if (CMD_SP(cmd))
  682. ret = QLA_FUNCTION_FAILED;
  683. return ret;
  684. }
  685. /*
  686. * qla2x00_wait_for_hba_online
  687. * Wait till the HBA is online after going through
  688. * <= MAX_RETRIES_OF_ISP_ABORT or
  689. * finally HBA is disabled ie marked offline
  690. *
  691. * Input:
  692. * ha - pointer to host adapter structure
  693. *
  694. * Note:
  695. * Does context switching-Release SPIN_LOCK
  696. * (if any) before calling this routine.
  697. *
  698. * Return:
  699. * Success (Adapter is online) : 0
  700. * Failed (Adapter is offline/disabled) : 1
  701. */
  702. int
  703. qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
  704. {
  705. int return_status;
  706. unsigned long wait_online;
  707. struct qla_hw_data *ha = vha->hw;
  708. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  709. wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  710. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  711. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  712. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  713. ha->dpc_active) && time_before(jiffies, wait_online)) {
  714. msleep(1000);
  715. }
  716. if (base_vha->flags.online)
  717. return_status = QLA_SUCCESS;
  718. else
  719. return_status = QLA_FUNCTION_FAILED;
  720. return (return_status);
  721. }
  722. /*
  723. * qla2x00_wait_for_reset_ready
  724. * Wait till the HBA is online after going through
  725. * <= MAX_RETRIES_OF_ISP_ABORT or
  726. * finally HBA is disabled ie marked offline or flash
  727. * operations are in progress.
  728. *
  729. * Input:
  730. * ha - pointer to host adapter structure
  731. *
  732. * Note:
  733. * Does context switching-Release SPIN_LOCK
  734. * (if any) before calling this routine.
  735. *
  736. * Return:
  737. * Success (Adapter is online/no flash ops) : 0
  738. * Failed (Adapter is offline/disabled/flash ops in progress) : 1
  739. */
  740. static int
  741. qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
  742. {
  743. int return_status;
  744. unsigned long wait_online;
  745. struct qla_hw_data *ha = vha->hw;
  746. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  747. wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  748. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  749. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  750. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  751. ha->optrom_state != QLA_SWAITING ||
  752. ha->dpc_active) && time_before(jiffies, wait_online))
  753. msleep(1000);
  754. if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
  755. return_status = QLA_SUCCESS;
  756. else
  757. return_status = QLA_FUNCTION_FAILED;
  758. ql_dbg(ql_dbg_taskm, vha, 0x8019,
  759. "%s return status=%d.\n", __func__, return_status);
  760. return return_status;
  761. }
  762. int
  763. qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
  764. {
  765. int return_status;
  766. unsigned long wait_reset;
  767. struct qla_hw_data *ha = vha->hw;
  768. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  769. wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  770. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  771. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  772. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  773. ha->dpc_active) && time_before(jiffies, wait_reset)) {
  774. msleep(1000);
  775. if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
  776. ha->flags.chip_reset_done)
  777. break;
  778. }
  779. if (ha->flags.chip_reset_done)
  780. return_status = QLA_SUCCESS;
  781. else
  782. return_status = QLA_FUNCTION_FAILED;
  783. return return_status;
  784. }
  785. static void
  786. sp_get(struct srb *sp)
  787. {
  788. atomic_inc(&sp->ref_count);
  789. }
  790. /**************************************************************************
  791. * qla2xxx_eh_abort
  792. *
  793. * Description:
  794. * The abort function will abort the specified command.
  795. *
  796. * Input:
  797. * cmd = Linux SCSI command packet to be aborted.
  798. *
  799. * Returns:
  800. * Either SUCCESS or FAILED.
  801. *
  802. * Note:
  803. * Only return FAILED if command not returned by firmware.
  804. **************************************************************************/
  805. static int
  806. qla2xxx_eh_abort(struct scsi_cmnd *cmd)
  807. {
  808. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  809. srb_t *sp;
  810. int ret;
  811. unsigned int id, lun;
  812. unsigned long flags;
  813. int wait = 0;
  814. struct qla_hw_data *ha = vha->hw;
  815. if (!CMD_SP(cmd))
  816. return SUCCESS;
  817. ret = fc_block_scsi_eh(cmd);
  818. if (ret != 0)
  819. return ret;
  820. ret = SUCCESS;
  821. id = cmd->device->id;
  822. lun = cmd->device->lun;
  823. spin_lock_irqsave(&ha->hardware_lock, flags);
  824. sp = (srb_t *) CMD_SP(cmd);
  825. if (!sp) {
  826. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  827. return SUCCESS;
  828. }
  829. ql_dbg(ql_dbg_taskm, vha, 0x8002,
  830. "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
  831. vha->host_no, id, lun, sp, cmd);
  832. /* Get a reference to the sp and drop the lock.*/
  833. sp_get(sp);
  834. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  835. if (ha->isp_ops->abort_command(sp)) {
  836. ret = FAILED;
  837. ql_dbg(ql_dbg_taskm, vha, 0x8003,
  838. "Abort command mbx failed cmd=%p.\n", cmd);
  839. } else {
  840. ql_dbg(ql_dbg_taskm, vha, 0x8004,
  841. "Abort command mbx success cmd=%p.\n", cmd);
  842. wait = 1;
  843. }
  844. spin_lock_irqsave(&ha->hardware_lock, flags);
  845. sp->done(ha, sp, 0);
  846. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  847. /* Did the command return during mailbox execution? */
  848. if (ret == FAILED && !CMD_SP(cmd))
  849. ret = SUCCESS;
  850. /* Wait for the command to be returned. */
  851. if (wait) {
  852. if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
  853. ql_log(ql_log_warn, vha, 0x8006,
  854. "Abort handler timed out cmd=%p.\n", cmd);
  855. ret = FAILED;
  856. }
  857. }
  858. ql_log(ql_log_info, vha, 0x801c,
  859. "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
  860. vha->host_no, id, lun, wait, ret);
  861. return ret;
  862. }
  863. int
  864. qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
  865. unsigned int l, enum nexus_wait_type type)
  866. {
  867. int cnt, match, status;
  868. unsigned long flags;
  869. struct qla_hw_data *ha = vha->hw;
  870. struct req_que *req;
  871. srb_t *sp;
  872. struct scsi_cmnd *cmd;
  873. status = QLA_SUCCESS;
  874. spin_lock_irqsave(&ha->hardware_lock, flags);
  875. req = vha->req;
  876. for (cnt = 1; status == QLA_SUCCESS &&
  877. cnt < req->num_outstanding_cmds; cnt++) {
  878. sp = req->outstanding_cmds[cnt];
  879. if (!sp)
  880. continue;
  881. if (sp->type != SRB_SCSI_CMD)
  882. continue;
  883. if (vha->vp_idx != sp->fcport->vha->vp_idx)
  884. continue;
  885. match = 0;
  886. cmd = GET_CMD_SP(sp);
  887. switch (type) {
  888. case WAIT_HOST:
  889. match = 1;
  890. break;
  891. case WAIT_TARGET:
  892. match = cmd->device->id == t;
  893. break;
  894. case WAIT_LUN:
  895. match = (cmd->device->id == t &&
  896. cmd->device->lun == l);
  897. break;
  898. }
  899. if (!match)
  900. continue;
  901. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  902. status = qla2x00_eh_wait_on_command(cmd);
  903. spin_lock_irqsave(&ha->hardware_lock, flags);
  904. }
  905. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  906. return status;
  907. }
  908. static char *reset_errors[] = {
  909. "HBA not online",
  910. "HBA not ready",
  911. "Task management failed",
  912. "Waiting for command completions",
  913. };
  914. static int
  915. __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
  916. struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
  917. {
  918. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  919. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  920. int err;
  921. if (!fcport) {
  922. return FAILED;
  923. }
  924. err = fc_block_scsi_eh(cmd);
  925. if (err != 0)
  926. return err;
  927. ql_log(ql_log_info, vha, 0x8009,
  928. "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
  929. cmd->device->id, cmd->device->lun, cmd);
  930. err = 0;
  931. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  932. ql_log(ql_log_warn, vha, 0x800a,
  933. "Wait for hba online failed for cmd=%p.\n", cmd);
  934. goto eh_reset_failed;
  935. }
  936. err = 2;
  937. if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
  938. != QLA_SUCCESS) {
  939. ql_log(ql_log_warn, vha, 0x800c,
  940. "do_reset failed for cmd=%p.\n", cmd);
  941. goto eh_reset_failed;
  942. }
  943. err = 3;
  944. if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
  945. cmd->device->lun, type) != QLA_SUCCESS) {
  946. ql_log(ql_log_warn, vha, 0x800d,
  947. "wait for pending cmds failed for cmd=%p.\n", cmd);
  948. goto eh_reset_failed;
  949. }
  950. ql_log(ql_log_info, vha, 0x800e,
  951. "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
  952. vha->host_no, cmd->device->id, cmd->device->lun, cmd);
  953. return SUCCESS;
  954. eh_reset_failed:
  955. ql_log(ql_log_info, vha, 0x800f,
  956. "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
  957. reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
  958. cmd);
  959. return FAILED;
  960. }
  961. static int
  962. qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
  963. {
  964. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  965. struct qla_hw_data *ha = vha->hw;
  966. return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
  967. ha->isp_ops->lun_reset);
  968. }
  969. static int
  970. qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
  971. {
  972. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  973. struct qla_hw_data *ha = vha->hw;
  974. return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
  975. ha->isp_ops->target_reset);
  976. }
  977. /**************************************************************************
  978. * qla2xxx_eh_bus_reset
  979. *
  980. * Description:
  981. * The bus reset function will reset the bus and abort any executing
  982. * commands.
  983. *
  984. * Input:
  985. * cmd = Linux SCSI command packet of the command that cause the
  986. * bus reset.
  987. *
  988. * Returns:
  989. * SUCCESS/FAILURE (defined as macro in scsi.h).
  990. *
  991. **************************************************************************/
  992. static int
  993. qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
  994. {
  995. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  996. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  997. int ret = FAILED;
  998. unsigned int id, lun;
  999. id = cmd->device->id;
  1000. lun = cmd->device->lun;
  1001. if (!fcport) {
  1002. return ret;
  1003. }
  1004. ret = fc_block_scsi_eh(cmd);
  1005. if (ret != 0)
  1006. return ret;
  1007. ret = FAILED;
  1008. ql_log(ql_log_info, vha, 0x8012,
  1009. "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
  1010. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1011. ql_log(ql_log_fatal, vha, 0x8013,
  1012. "Wait for hba online failed board disabled.\n");
  1013. goto eh_bus_reset_done;
  1014. }
  1015. if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
  1016. ret = SUCCESS;
  1017. if (ret == FAILED)
  1018. goto eh_bus_reset_done;
  1019. /* Flush outstanding commands. */
  1020. if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
  1021. QLA_SUCCESS) {
  1022. ql_log(ql_log_warn, vha, 0x8014,
  1023. "Wait for pending commands failed.\n");
  1024. ret = FAILED;
  1025. }
  1026. eh_bus_reset_done:
  1027. ql_log(ql_log_warn, vha, 0x802b,
  1028. "BUS RESET %s nexus=%ld:%d:%d.\n",
  1029. (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
  1030. return ret;
  1031. }
  1032. /**************************************************************************
  1033. * qla2xxx_eh_host_reset
  1034. *
  1035. * Description:
  1036. * The reset function will reset the Adapter.
  1037. *
  1038. * Input:
  1039. * cmd = Linux SCSI command packet of the command that cause the
  1040. * adapter reset.
  1041. *
  1042. * Returns:
  1043. * Either SUCCESS or FAILED.
  1044. *
  1045. * Note:
  1046. **************************************************************************/
  1047. static int
  1048. qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
  1049. {
  1050. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1051. struct qla_hw_data *ha = vha->hw;
  1052. int ret = FAILED;
  1053. unsigned int id, lun;
  1054. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  1055. id = cmd->device->id;
  1056. lun = cmd->device->lun;
  1057. ql_log(ql_log_info, vha, 0x8018,
  1058. "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
  1059. if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
  1060. goto eh_host_reset_lock;
  1061. if (vha != base_vha) {
  1062. if (qla2x00_vp_abort_isp(vha))
  1063. goto eh_host_reset_lock;
  1064. } else {
  1065. if (IS_QLA82XX(vha->hw)) {
  1066. if (!qla82xx_fcoe_ctx_reset(vha)) {
  1067. /* Ctx reset success */
  1068. ret = SUCCESS;
  1069. goto eh_host_reset_lock;
  1070. }
  1071. /* fall thru if ctx reset failed */
  1072. }
  1073. if (ha->wq)
  1074. flush_workqueue(ha->wq);
  1075. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1076. if (ha->isp_ops->abort_isp(base_vha)) {
  1077. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1078. /* failed. schedule dpc to try */
  1079. set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
  1080. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1081. ql_log(ql_log_warn, vha, 0x802a,
  1082. "wait for hba online failed.\n");
  1083. goto eh_host_reset_lock;
  1084. }
  1085. }
  1086. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1087. }
  1088. /* Waiting for command to be returned to OS.*/
  1089. if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
  1090. QLA_SUCCESS)
  1091. ret = SUCCESS;
  1092. eh_host_reset_lock:
  1093. ql_log(ql_log_info, vha, 0x8017,
  1094. "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
  1095. (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
  1096. return ret;
  1097. }
  1098. /*
  1099. * qla2x00_loop_reset
  1100. * Issue loop reset.
  1101. *
  1102. * Input:
  1103. * ha = adapter block pointer.
  1104. *
  1105. * Returns:
  1106. * 0 = success
  1107. */
  1108. int
  1109. qla2x00_loop_reset(scsi_qla_host_t *vha)
  1110. {
  1111. int ret;
  1112. struct fc_port *fcport;
  1113. struct qla_hw_data *ha = vha->hw;
  1114. if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
  1115. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1116. if (fcport->port_type != FCT_TARGET)
  1117. continue;
  1118. ret = ha->isp_ops->target_reset(fcport, 0, 0);
  1119. if (ret != QLA_SUCCESS) {
  1120. ql_dbg(ql_dbg_taskm, vha, 0x802c,
  1121. "Bus Reset failed: Target Reset=%d "
  1122. "d_id=%x.\n", ret, fcport->d_id.b24);
  1123. }
  1124. }
  1125. }
  1126. if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
  1127. atomic_set(&vha->loop_state, LOOP_DOWN);
  1128. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1129. qla2x00_mark_all_devices_lost(vha, 0);
  1130. ret = qla2x00_full_login_lip(vha);
  1131. if (ret != QLA_SUCCESS) {
  1132. ql_dbg(ql_dbg_taskm, vha, 0x802d,
  1133. "full_login_lip=%d.\n", ret);
  1134. }
  1135. }
  1136. if (ha->flags.enable_lip_reset) {
  1137. ret = qla2x00_lip_reset(vha);
  1138. if (ret != QLA_SUCCESS)
  1139. ql_dbg(ql_dbg_taskm, vha, 0x802e,
  1140. "lip_reset failed (%d).\n", ret);
  1141. }
  1142. /* Issue marker command only when we are going to start the I/O */
  1143. vha->marker_needed = 1;
  1144. return QLA_SUCCESS;
  1145. }
  1146. void
  1147. qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
  1148. {
  1149. int que, cnt;
  1150. unsigned long flags;
  1151. srb_t *sp;
  1152. struct qla_hw_data *ha = vha->hw;
  1153. struct req_que *req;
  1154. spin_lock_irqsave(&ha->hardware_lock, flags);
  1155. for (que = 0; que < ha->max_req_queues; que++) {
  1156. req = ha->req_q_map[que];
  1157. if (!req)
  1158. continue;
  1159. if (!req->outstanding_cmds)
  1160. continue;
  1161. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
  1162. sp = req->outstanding_cmds[cnt];
  1163. if (sp) {
  1164. req->outstanding_cmds[cnt] = NULL;
  1165. sp->done(vha, sp, res);
  1166. }
  1167. }
  1168. }
  1169. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1170. }
  1171. static int
  1172. qla2xxx_slave_alloc(struct scsi_device *sdev)
  1173. {
  1174. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1175. if (!rport || fc_remote_port_chkready(rport))
  1176. return -ENXIO;
  1177. sdev->hostdata = *(fc_port_t **)rport->dd_data;
  1178. return 0;
  1179. }
  1180. static int
  1181. qla2xxx_slave_configure(struct scsi_device *sdev)
  1182. {
  1183. scsi_qla_host_t *vha = shost_priv(sdev->host);
  1184. struct req_que *req = vha->req;
  1185. if (IS_T10_PI_CAPABLE(vha->hw))
  1186. blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
  1187. if (sdev->tagged_supported)
  1188. scsi_activate_tcq(sdev, req->max_q_depth);
  1189. else
  1190. scsi_deactivate_tcq(sdev, req->max_q_depth);
  1191. return 0;
  1192. }
  1193. static void
  1194. qla2xxx_slave_destroy(struct scsi_device *sdev)
  1195. {
  1196. sdev->hostdata = NULL;
  1197. }
  1198. static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
  1199. {
  1200. fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
  1201. if (!scsi_track_queue_full(sdev, qdepth))
  1202. return;
  1203. ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
  1204. "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
  1205. sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
  1206. }
  1207. static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
  1208. {
  1209. fc_port_t *fcport = sdev->hostdata;
  1210. struct scsi_qla_host *vha = fcport->vha;
  1211. struct req_que *req = NULL;
  1212. req = vha->req;
  1213. if (!req)
  1214. return;
  1215. if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
  1216. return;
  1217. if (sdev->ordered_tags)
  1218. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
  1219. else
  1220. scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
  1221. ql_dbg(ql_dbg_io, vha, 0x302a,
  1222. "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
  1223. sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
  1224. }
  1225. static int
  1226. qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
  1227. {
  1228. switch (reason) {
  1229. case SCSI_QDEPTH_DEFAULT:
  1230. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
  1231. break;
  1232. case SCSI_QDEPTH_QFULL:
  1233. qla2x00_handle_queue_full(sdev, qdepth);
  1234. break;
  1235. case SCSI_QDEPTH_RAMP_UP:
  1236. qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
  1237. break;
  1238. default:
  1239. return -EOPNOTSUPP;
  1240. }
  1241. return sdev->queue_depth;
  1242. }
  1243. static int
  1244. qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
  1245. {
  1246. if (sdev->tagged_supported) {
  1247. scsi_set_tag_type(sdev, tag_type);
  1248. if (tag_type)
  1249. scsi_activate_tcq(sdev, sdev->queue_depth);
  1250. else
  1251. scsi_deactivate_tcq(sdev, sdev->queue_depth);
  1252. } else
  1253. tag_type = 0;
  1254. return tag_type;
  1255. }
  1256. static void
  1257. qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
  1258. {
  1259. scsi_qla_host_t *vp;
  1260. struct Scsi_Host *shost;
  1261. struct scsi_device *sdev;
  1262. struct qla_hw_data *ha = vha->hw;
  1263. unsigned long flags;
  1264. ha->host_last_rampdown_time = jiffies;
  1265. if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
  1266. return;
  1267. if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
  1268. ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
  1269. else
  1270. ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
  1271. /*
  1272. * Geometrically ramp down the queue depth for all devices on this
  1273. * adapter
  1274. */
  1275. spin_lock_irqsave(&ha->vport_slock, flags);
  1276. list_for_each_entry(vp, &ha->vp_list, list) {
  1277. shost = vp->host;
  1278. shost_for_each_device(sdev, shost) {
  1279. if (sdev->queue_depth > shost->cmd_per_lun) {
  1280. if (sdev->queue_depth < ha->cfg_lun_q_depth)
  1281. continue;
  1282. ql_log(ql_log_warn, vp, 0x3031,
  1283. "%ld:%d:%d: Ramping down queue depth to %d",
  1284. vp->host_no, sdev->id, sdev->lun,
  1285. ha->cfg_lun_q_depth);
  1286. qla2x00_change_queue_depth(sdev,
  1287. ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
  1288. }
  1289. }
  1290. }
  1291. spin_unlock_irqrestore(&ha->vport_slock, flags);
  1292. return;
  1293. }
  1294. static void
  1295. qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
  1296. {
  1297. scsi_qla_host_t *vp;
  1298. struct Scsi_Host *shost;
  1299. struct scsi_device *sdev;
  1300. struct qla_hw_data *ha = vha->hw;
  1301. unsigned long flags;
  1302. ha->host_last_rampup_time = jiffies;
  1303. ha->cfg_lun_q_depth++;
  1304. /*
  1305. * Linearly ramp up the queue depth for all devices on this
  1306. * adapter
  1307. */
  1308. spin_lock_irqsave(&ha->vport_slock, flags);
  1309. list_for_each_entry(vp, &ha->vp_list, list) {
  1310. shost = vp->host;
  1311. shost_for_each_device(sdev, shost) {
  1312. if (sdev->queue_depth > ha->cfg_lun_q_depth)
  1313. continue;
  1314. qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
  1315. SCSI_QDEPTH_RAMP_UP);
  1316. }
  1317. }
  1318. spin_unlock_irqrestore(&ha->vport_slock, flags);
  1319. return;
  1320. }
  1321. /**
  1322. * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
  1323. * @ha: HA context
  1324. *
  1325. * At exit, the @ha's flags.enable_64bit_addressing set to indicated
  1326. * supported addressing method.
  1327. */
  1328. static void
  1329. qla2x00_config_dma_addressing(struct qla_hw_data *ha)
  1330. {
  1331. /* Assume a 32bit DMA mask. */
  1332. ha->flags.enable_64bit_addressing = 0;
  1333. if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
  1334. /* Any upper-dword bits set? */
  1335. if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
  1336. !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
  1337. /* Ok, a 64bit DMA mask is applicable. */
  1338. ha->flags.enable_64bit_addressing = 1;
  1339. ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
  1340. ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
  1341. return;
  1342. }
  1343. }
  1344. dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
  1345. pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
  1346. }
  1347. static void
  1348. qla2x00_enable_intrs(struct qla_hw_data *ha)
  1349. {
  1350. unsigned long flags = 0;
  1351. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1352. spin_lock_irqsave(&ha->hardware_lock, flags);
  1353. ha->interrupts_on = 1;
  1354. /* enable risc and host interrupts */
  1355. WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
  1356. RD_REG_WORD(&reg->ictrl);
  1357. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1358. }
  1359. static void
  1360. qla2x00_disable_intrs(struct qla_hw_data *ha)
  1361. {
  1362. unsigned long flags = 0;
  1363. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1364. spin_lock_irqsave(&ha->hardware_lock, flags);
  1365. ha->interrupts_on = 0;
  1366. /* disable risc and host interrupts */
  1367. WRT_REG_WORD(&reg->ictrl, 0);
  1368. RD_REG_WORD(&reg->ictrl);
  1369. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1370. }
  1371. static void
  1372. qla24xx_enable_intrs(struct qla_hw_data *ha)
  1373. {
  1374. unsigned long flags = 0;
  1375. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1376. spin_lock_irqsave(&ha->hardware_lock, flags);
  1377. ha->interrupts_on = 1;
  1378. WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
  1379. RD_REG_DWORD(&reg->ictrl);
  1380. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1381. }
  1382. static void
  1383. qla24xx_disable_intrs(struct qla_hw_data *ha)
  1384. {
  1385. unsigned long flags = 0;
  1386. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1387. if (IS_NOPOLLING_TYPE(ha))
  1388. return;
  1389. spin_lock_irqsave(&ha->hardware_lock, flags);
  1390. ha->interrupts_on = 0;
  1391. WRT_REG_DWORD(&reg->ictrl, 0);
  1392. RD_REG_DWORD(&reg->ictrl);
  1393. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1394. }
  1395. static int
  1396. qla2x00_iospace_config(struct qla_hw_data *ha)
  1397. {
  1398. resource_size_t pio;
  1399. uint16_t msix;
  1400. int cpus;
  1401. if (pci_request_selected_regions(ha->pdev, ha->bars,
  1402. QLA2XXX_DRIVER_NAME)) {
  1403. ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
  1404. "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
  1405. pci_name(ha->pdev));
  1406. goto iospace_error_exit;
  1407. }
  1408. if (!(ha->bars & 1))
  1409. goto skip_pio;
  1410. /* We only need PIO for Flash operations on ISP2312 v2 chips. */
  1411. pio = pci_resource_start(ha->pdev, 0);
  1412. if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
  1413. if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
  1414. ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
  1415. "Invalid pci I/O region size (%s).\n",
  1416. pci_name(ha->pdev));
  1417. pio = 0;
  1418. }
  1419. } else {
  1420. ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
  1421. "Region #0 no a PIO resource (%s).\n",
  1422. pci_name(ha->pdev));
  1423. pio = 0;
  1424. }
  1425. ha->pio_address = pio;
  1426. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
  1427. "PIO address=%llu.\n",
  1428. (unsigned long long)ha->pio_address);
  1429. skip_pio:
  1430. /* Use MMIO operations for all accesses. */
  1431. if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
  1432. ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
  1433. "Region #1 not an MMIO resource (%s), aborting.\n",
  1434. pci_name(ha->pdev));
  1435. goto iospace_error_exit;
  1436. }
  1437. if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
  1438. ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
  1439. "Invalid PCI mem region size (%s), aborting.\n",
  1440. pci_name(ha->pdev));
  1441. goto iospace_error_exit;
  1442. }
  1443. ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
  1444. if (!ha->iobase) {
  1445. ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
  1446. "Cannot remap MMIO (%s), aborting.\n",
  1447. pci_name(ha->pdev));
  1448. goto iospace_error_exit;
  1449. }
  1450. /* Determine queue resources */
  1451. ha->max_req_queues = ha->max_rsp_queues = 1;
  1452. if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
  1453. (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
  1454. (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
  1455. goto mqiobase_exit;
  1456. ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
  1457. pci_resource_len(ha->pdev, 3));
  1458. if (ha->mqiobase) {
  1459. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
  1460. "MQIO Base=%p.\n", ha->mqiobase);
  1461. /* Read MSIX vector size of the board */
  1462. pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
  1463. ha->msix_count = msix;
  1464. /* Max queues are bounded by available msix vectors */
  1465. /* queue 0 uses two msix vectors */
  1466. if (ql2xmultique_tag) {
  1467. cpus = num_online_cpus();
  1468. ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
  1469. (cpus + 1) : (ha->msix_count - 1);
  1470. ha->max_req_queues = 2;
  1471. } else if (ql2xmaxqueues > 1) {
  1472. ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
  1473. QLA_MQ_SIZE : ql2xmaxqueues;
  1474. ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
  1475. "QoS mode set, max no of request queues:%d.\n",
  1476. ha->max_req_queues);
  1477. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
  1478. "QoS mode set, max no of request queues:%d.\n",
  1479. ha->max_req_queues);
  1480. }
  1481. ql_log_pci(ql_log_info, ha->pdev, 0x001a,
  1482. "MSI-X vector count: %d.\n", msix);
  1483. } else
  1484. ql_log_pci(ql_log_info, ha->pdev, 0x001b,
  1485. "BAR 3 not enabled.\n");
  1486. mqiobase_exit:
  1487. ha->msix_count = ha->max_rsp_queues + 1;
  1488. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
  1489. "MSIX Count:%d.\n", ha->msix_count);
  1490. return (0);
  1491. iospace_error_exit:
  1492. return (-ENOMEM);
  1493. }
  1494. static int
  1495. qla83xx_iospace_config(struct qla_hw_data *ha)
  1496. {
  1497. uint16_t msix;
  1498. int cpus;
  1499. if (pci_request_selected_regions(ha->pdev, ha->bars,
  1500. QLA2XXX_DRIVER_NAME)) {
  1501. ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
  1502. "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
  1503. pci_name(ha->pdev));
  1504. goto iospace_error_exit;
  1505. }
  1506. /* Use MMIO operations for all accesses. */
  1507. if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
  1508. ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
  1509. "Invalid pci I/O region size (%s).\n",
  1510. pci_name(ha->pdev));
  1511. goto iospace_error_exit;
  1512. }
  1513. if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
  1514. ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
  1515. "Invalid PCI mem region size (%s), aborting\n",
  1516. pci_name(ha->pdev));
  1517. goto iospace_error_exit;
  1518. }
  1519. ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
  1520. if (!ha->iobase) {
  1521. ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
  1522. "Cannot remap MMIO (%s), aborting.\n",
  1523. pci_name(ha->pdev));
  1524. goto iospace_error_exit;
  1525. }
  1526. /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
  1527. /* 83XX 26XX always use MQ type access for queues
  1528. * - mbar 2, a.k.a region 4 */
  1529. ha->max_req_queues = ha->max_rsp_queues = 1;
  1530. ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
  1531. pci_resource_len(ha->pdev, 4));
  1532. if (!ha->mqiobase) {
  1533. ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
  1534. "BAR2/region4 not enabled\n");
  1535. goto mqiobase_exit;
  1536. }
  1537. ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
  1538. pci_resource_len(ha->pdev, 2));
  1539. if (ha->msixbase) {
  1540. /* Read MSIX vector size of the board */
  1541. pci_read_config_word(ha->pdev,
  1542. QLA_83XX_PCI_MSIX_CONTROL, &msix);
  1543. ha->msix_count = msix;
  1544. /* Max queues are bounded by available msix vectors */
  1545. /* queue 0 uses two msix vectors */
  1546. if (ql2xmultique_tag) {
  1547. cpus = num_online_cpus();
  1548. ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
  1549. (cpus + 1) : (ha->msix_count - 1);
  1550. ha->max_req_queues = 2;
  1551. } else if (ql2xmaxqueues > 1) {
  1552. ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
  1553. QLA_MQ_SIZE : ql2xmaxqueues;
  1554. ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
  1555. "QoS mode set, max no of request queues:%d.\n",
  1556. ha->max_req_queues);
  1557. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
  1558. "QoS mode set, max no of request queues:%d.\n",
  1559. ha->max_req_queues);
  1560. }
  1561. ql_log_pci(ql_log_info, ha->pdev, 0x011c,
  1562. "MSI-X vector count: %d.\n", msix);
  1563. } else
  1564. ql_log_pci(ql_log_info, ha->pdev, 0x011e,
  1565. "BAR 1 not enabled.\n");
  1566. mqiobase_exit:
  1567. ha->msix_count = ha->max_rsp_queues + 1;
  1568. qlt_83xx_iospace_config(ha);
  1569. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
  1570. "MSIX Count:%d.\n", ha->msix_count);
  1571. return 0;
  1572. iospace_error_exit:
  1573. return -ENOMEM;
  1574. }
  1575. static struct isp_operations qla2100_isp_ops = {
  1576. .pci_config = qla2100_pci_config,
  1577. .reset_chip = qla2x00_reset_chip,
  1578. .chip_diag = qla2x00_chip_diag,
  1579. .config_rings = qla2x00_config_rings,
  1580. .reset_adapter = qla2x00_reset_adapter,
  1581. .nvram_config = qla2x00_nvram_config,
  1582. .update_fw_options = qla2x00_update_fw_options,
  1583. .load_risc = qla2x00_load_risc,
  1584. .pci_info_str = qla2x00_pci_info_str,
  1585. .fw_version_str = qla2x00_fw_version_str,
  1586. .intr_handler = qla2100_intr_handler,
  1587. .enable_intrs = qla2x00_enable_intrs,
  1588. .disable_intrs = qla2x00_disable_intrs,
  1589. .abort_command = qla2x00_abort_command,
  1590. .target_reset = qla2x00_abort_target,
  1591. .lun_reset = qla2x00_lun_reset,
  1592. .fabric_login = qla2x00_login_fabric,
  1593. .fabric_logout = qla2x00_fabric_logout,
  1594. .calc_req_entries = qla2x00_calc_iocbs_32,
  1595. .build_iocbs = qla2x00_build_scsi_iocbs_32,
  1596. .prep_ms_iocb = qla2x00_prep_ms_iocb,
  1597. .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
  1598. .read_nvram = qla2x00_read_nvram_data,
  1599. .write_nvram = qla2x00_write_nvram_data,
  1600. .fw_dump = qla2100_fw_dump,
  1601. .beacon_on = NULL,
  1602. .beacon_off = NULL,
  1603. .beacon_blink = NULL,
  1604. .read_optrom = qla2x00_read_optrom_data,
  1605. .write_optrom = qla2x00_write_optrom_data,
  1606. .get_flash_version = qla2x00_get_flash_version,
  1607. .start_scsi = qla2x00_start_scsi,
  1608. .abort_isp = qla2x00_abort_isp,
  1609. .iospace_config = qla2x00_iospace_config,
  1610. };
  1611. static struct isp_operations qla2300_isp_ops = {
  1612. .pci_config = qla2300_pci_config,
  1613. .reset_chip = qla2x00_reset_chip,
  1614. .chip_diag = qla2x00_chip_diag,
  1615. .config_rings = qla2x00_config_rings,
  1616. .reset_adapter = qla2x00_reset_adapter,
  1617. .nvram_config = qla2x00_nvram_config,
  1618. .update_fw_options = qla2x00_update_fw_options,
  1619. .load_risc = qla2x00_load_risc,
  1620. .pci_info_str = qla2x00_pci_info_str,
  1621. .fw_version_str = qla2x00_fw_version_str,
  1622. .intr_handler = qla2300_intr_handler,
  1623. .enable_intrs = qla2x00_enable_intrs,
  1624. .disable_intrs = qla2x00_disable_intrs,
  1625. .abort_command = qla2x00_abort_command,
  1626. .target_reset = qla2x00_abort_target,
  1627. .lun_reset = qla2x00_lun_reset,
  1628. .fabric_login = qla2x00_login_fabric,
  1629. .fabric_logout = qla2x00_fabric_logout,
  1630. .calc_req_entries = qla2x00_calc_iocbs_32,
  1631. .build_iocbs = qla2x00_build_scsi_iocbs_32,
  1632. .prep_ms_iocb = qla2x00_prep_ms_iocb,
  1633. .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
  1634. .read_nvram = qla2x00_read_nvram_data,
  1635. .write_nvram = qla2x00_write_nvram_data,
  1636. .fw_dump = qla2300_fw_dump,
  1637. .beacon_on = qla2x00_beacon_on,
  1638. .beacon_off = qla2x00_beacon_off,
  1639. .beacon_blink = qla2x00_beacon_blink,
  1640. .read_optrom = qla2x00_read_optrom_data,
  1641. .write_optrom = qla2x00_write_optrom_data,
  1642. .get_flash_version = qla2x00_get_flash_version,
  1643. .start_scsi = qla2x00_start_scsi,
  1644. .abort_isp = qla2x00_abort_isp,
  1645. .iospace_config = qla2x00_iospace_config,
  1646. };
  1647. static struct isp_operations qla24xx_isp_ops = {
  1648. .pci_config = qla24xx_pci_config,
  1649. .reset_chip = qla24xx_reset_chip,
  1650. .chip_diag = qla24xx_chip_diag,
  1651. .config_rings = qla24xx_config_rings,
  1652. .reset_adapter = qla24xx_reset_adapter,
  1653. .nvram_config = qla24xx_nvram_config,
  1654. .update_fw_options = qla24xx_update_fw_options,
  1655. .load_risc = qla24xx_load_risc,
  1656. .pci_info_str = qla24xx_pci_info_str,
  1657. .fw_version_str = qla24xx_fw_version_str,
  1658. .intr_handler = qla24xx_intr_handler,
  1659. .enable_intrs = qla24xx_enable_intrs,
  1660. .disable_intrs = qla24xx_disable_intrs,
  1661. .abort_command = qla24xx_abort_command,
  1662. .target_reset = qla24xx_abort_target,
  1663. .lun_reset = qla24xx_lun_reset,
  1664. .fabric_login = qla24xx_login_fabric,
  1665. .fabric_logout = qla24xx_fabric_logout,
  1666. .calc_req_entries = NULL,
  1667. .build_iocbs = NULL,
  1668. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  1669. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  1670. .read_nvram = qla24xx_read_nvram_data,
  1671. .write_nvram = qla24xx_write_nvram_data,
  1672. .fw_dump = qla24xx_fw_dump,
  1673. .beacon_on = qla24xx_beacon_on,
  1674. .beacon_off = qla24xx_beacon_off,
  1675. .beacon_blink = qla24xx_beacon_blink,
  1676. .read_optrom = qla24xx_read_optrom_data,
  1677. .write_optrom = qla24xx_write_optrom_data,
  1678. .get_flash_version = qla24xx_get_flash_version,
  1679. .start_scsi = qla24xx_start_scsi,
  1680. .abort_isp = qla2x00_abort_isp,
  1681. .iospace_config = qla2x00_iospace_config,
  1682. };
  1683. static struct isp_operations qla25xx_isp_ops = {
  1684. .pci_config = qla25xx_pci_config,
  1685. .reset_chip = qla24xx_reset_chip,
  1686. .chip_diag = qla24xx_chip_diag,
  1687. .config_rings = qla24xx_config_rings,
  1688. .reset_adapter = qla24xx_reset_adapter,
  1689. .nvram_config = qla24xx_nvram_config,
  1690. .update_fw_options = qla24xx_update_fw_options,
  1691. .load_risc = qla24xx_load_risc,
  1692. .pci_info_str = qla24xx_pci_info_str,
  1693. .fw_version_str = qla24xx_fw_version_str,
  1694. .intr_handler = qla24xx_intr_handler,
  1695. .enable_intrs = qla24xx_enable_intrs,
  1696. .disable_intrs = qla24xx_disable_intrs,
  1697. .abort_command = qla24xx_abort_command,
  1698. .target_reset = qla24xx_abort_target,
  1699. .lun_reset = qla24xx_lun_reset,
  1700. .fabric_login = qla24xx_login_fabric,
  1701. .fabric_logout = qla24xx_fabric_logout,
  1702. .calc_req_entries = NULL,
  1703. .build_iocbs = NULL,
  1704. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  1705. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  1706. .read_nvram = qla25xx_read_nvram_data,
  1707. .write_nvram = qla25xx_write_nvram_data,
  1708. .fw_dump = qla25xx_fw_dump,
  1709. .beacon_on = qla24xx_beacon_on,
  1710. .beacon_off = qla24xx_beacon_off,
  1711. .beacon_blink = qla24xx_beacon_blink,
  1712. .read_optrom = qla25xx_read_optrom_data,
  1713. .write_optrom = qla24xx_write_optrom_data,
  1714. .get_flash_version = qla24xx_get_flash_version,
  1715. .start_scsi = qla24xx_dif_start_scsi,
  1716. .abort_isp = qla2x00_abort_isp,
  1717. .iospace_config = qla2x00_iospace_config,
  1718. };
  1719. static struct isp_operations qla81xx_isp_ops = {
  1720. .pci_config = qla25xx_pci_config,
  1721. .reset_chip = qla24xx_reset_chip,
  1722. .chip_diag = qla24xx_chip_diag,
  1723. .config_rings = qla24xx_config_rings,
  1724. .reset_adapter = qla24xx_reset_adapter,
  1725. .nvram_config = qla81xx_nvram_config,
  1726. .update_fw_options = qla81xx_update_fw_options,
  1727. .load_risc = qla81xx_load_risc,
  1728. .pci_info_str = qla24xx_pci_info_str,
  1729. .fw_version_str = qla24xx_fw_version_str,
  1730. .intr_handler = qla24xx_intr_handler,
  1731. .enable_intrs = qla24xx_enable_intrs,
  1732. .disable_intrs = qla24xx_disable_intrs,
  1733. .abort_command = qla24xx_abort_command,
  1734. .target_reset = qla24xx_abort_target,
  1735. .lun_reset = qla24xx_lun_reset,
  1736. .fabric_login = qla24xx_login_fabric,
  1737. .fabric_logout = qla24xx_fabric_logout,
  1738. .calc_req_entries = NULL,
  1739. .build_iocbs = NULL,
  1740. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  1741. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  1742. .read_nvram = NULL,
  1743. .write_nvram = NULL,
  1744. .fw_dump = qla81xx_fw_dump,
  1745. .beacon_on = qla24xx_beacon_on,
  1746. .beacon_off = qla24xx_beacon_off,
  1747. .beacon_blink = qla83xx_beacon_blink,
  1748. .read_optrom = qla25xx_read_optrom_data,
  1749. .write_optrom = qla24xx_write_optrom_data,
  1750. .get_flash_version = qla24xx_get_flash_version,
  1751. .start_scsi = qla24xx_dif_start_scsi,
  1752. .abort_isp = qla2x00_abort_isp,
  1753. .iospace_config = qla2x00_iospace_config,
  1754. };
  1755. static struct isp_operations qla82xx_isp_ops = {
  1756. .pci_config = qla82xx_pci_config,
  1757. .reset_chip = qla82xx_reset_chip,
  1758. .chip_diag = qla24xx_chip_diag,
  1759. .config_rings = qla82xx_config_rings,
  1760. .reset_adapter = qla24xx_reset_adapter,
  1761. .nvram_config = qla81xx_nvram_config,
  1762. .update_fw_options = qla24xx_update_fw_options,
  1763. .load_risc = qla82xx_load_risc,
  1764. .pci_info_str = qla24xx_pci_info_str,
  1765. .fw_version_str = qla24xx_fw_version_str,
  1766. .intr_handler = qla82xx_intr_handler,
  1767. .enable_intrs = qla82xx_enable_intrs,
  1768. .disable_intrs = qla82xx_disable_intrs,
  1769. .abort_command = qla24xx_abort_command,
  1770. .target_reset = qla24xx_abort_target,
  1771. .lun_reset = qla24xx_lun_reset,
  1772. .fabric_login = qla24xx_login_fabric,
  1773. .fabric_logout = qla24xx_fabric_logout,
  1774. .calc_req_entries = NULL,
  1775. .build_iocbs = NULL,
  1776. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  1777. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  1778. .read_nvram = qla24xx_read_nvram_data,
  1779. .write_nvram = qla24xx_write_nvram_data,
  1780. .fw_dump = qla24xx_fw_dump,
  1781. .beacon_on = qla82xx_beacon_on,
  1782. .beacon_off = qla82xx_beacon_off,
  1783. .beacon_blink = NULL,
  1784. .read_optrom = qla82xx_read_optrom_data,
  1785. .write_optrom = qla82xx_write_optrom_data,
  1786. .get_flash_version = qla24xx_get_flash_version,
  1787. .start_scsi = qla82xx_start_scsi,
  1788. .abort_isp = qla82xx_abort_isp,
  1789. .iospace_config = qla82xx_iospace_config,
  1790. };
  1791. static struct isp_operations qla83xx_isp_ops = {
  1792. .pci_config = qla25xx_pci_config,
  1793. .reset_chip = qla24xx_reset_chip,
  1794. .chip_diag = qla24xx_chip_diag,
  1795. .config_rings = qla24xx_config_rings,
  1796. .reset_adapter = qla24xx_reset_adapter,
  1797. .nvram_config = qla81xx_nvram_config,
  1798. .update_fw_options = qla81xx_update_fw_options,
  1799. .load_risc = qla81xx_load_risc,
  1800. .pci_info_str = qla24xx_pci_info_str,
  1801. .fw_version_str = qla24xx_fw_version_str,
  1802. .intr_handler = qla24xx_intr_handler,
  1803. .enable_intrs = qla24xx_enable_intrs,
  1804. .disable_intrs = qla24xx_disable_intrs,
  1805. .abort_command = qla24xx_abort_command,
  1806. .target_reset = qla24xx_abort_target,
  1807. .lun_reset = qla24xx_lun_reset,
  1808. .fabric_login = qla24xx_login_fabric,
  1809. .fabric_logout = qla24xx_fabric_logout,
  1810. .calc_req_entries = NULL,
  1811. .build_iocbs = NULL,
  1812. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  1813. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  1814. .read_nvram = NULL,
  1815. .write_nvram = NULL,
  1816. .fw_dump = qla83xx_fw_dump,
  1817. .beacon_on = qla24xx_beacon_on,
  1818. .beacon_off = qla24xx_beacon_off,
  1819. .beacon_blink = qla83xx_beacon_blink,
  1820. .read_optrom = qla25xx_read_optrom_data,
  1821. .write_optrom = qla24xx_write_optrom_data,
  1822. .get_flash_version = qla24xx_get_flash_version,
  1823. .start_scsi = qla24xx_dif_start_scsi,
  1824. .abort_isp = qla2x00_abort_isp,
  1825. .iospace_config = qla83xx_iospace_config,
  1826. };
  1827. static inline void
  1828. qla2x00_set_isp_flags(struct qla_hw_data *ha)
  1829. {
  1830. ha->device_type = DT_EXTENDED_IDS;
  1831. switch (ha->pdev->device) {
  1832. case PCI_DEVICE_ID_QLOGIC_ISP2100:
  1833. ha->device_type |= DT_ISP2100;
  1834. ha->device_type &= ~DT_EXTENDED_IDS;
  1835. ha->fw_srisc_address = RISC_START_ADDRESS_2100;
  1836. break;
  1837. case PCI_DEVICE_ID_QLOGIC_ISP2200:
  1838. ha->device_type |= DT_ISP2200;
  1839. ha->device_type &= ~DT_EXTENDED_IDS;
  1840. ha->fw_srisc_address = RISC_START_ADDRESS_2100;
  1841. break;
  1842. case PCI_DEVICE_ID_QLOGIC_ISP2300:
  1843. ha->device_type |= DT_ISP2300;
  1844. ha->device_type |= DT_ZIO_SUPPORTED;
  1845. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  1846. break;
  1847. case PCI_DEVICE_ID_QLOGIC_ISP2312:
  1848. ha->device_type |= DT_ISP2312;
  1849. ha->device_type |= DT_ZIO_SUPPORTED;
  1850. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  1851. break;
  1852. case PCI_DEVICE_ID_QLOGIC_ISP2322:
  1853. ha->device_type |= DT_ISP2322;
  1854. ha->device_type |= DT_ZIO_SUPPORTED;
  1855. if (ha->pdev->subsystem_vendor == 0x1028 &&
  1856. ha->pdev->subsystem_device == 0x0170)
  1857. ha->device_type |= DT_OEM_001;
  1858. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  1859. break;
  1860. case PCI_DEVICE_ID_QLOGIC_ISP6312:
  1861. ha->device_type |= DT_ISP6312;
  1862. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  1863. break;
  1864. case PCI_DEVICE_ID_QLOGIC_ISP6322:
  1865. ha->device_type |= DT_ISP6322;
  1866. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  1867. break;
  1868. case PCI_DEVICE_ID_QLOGIC_ISP2422:
  1869. ha->device_type |= DT_ISP2422;
  1870. ha->device_type |= DT_ZIO_SUPPORTED;
  1871. ha->device_type |= DT_FWI2;
  1872. ha->device_type |= DT_IIDMA;
  1873. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1874. break;
  1875. case PCI_DEVICE_ID_QLOGIC_ISP2432:
  1876. ha->device_type |= DT_ISP2432;
  1877. ha->device_type |= DT_ZIO_SUPPORTED;
  1878. ha->device_type |= DT_FWI2;
  1879. ha->device_type |= DT_IIDMA;
  1880. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1881. break;
  1882. case PCI_DEVICE_ID_QLOGIC_ISP8432:
  1883. ha->device_type |= DT_ISP8432;
  1884. ha->device_type |= DT_ZIO_SUPPORTED;
  1885. ha->device_type |= DT_FWI2;
  1886. ha->device_type |= DT_IIDMA;
  1887. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1888. break;
  1889. case PCI_DEVICE_ID_QLOGIC_ISP5422:
  1890. ha->device_type |= DT_ISP5422;
  1891. ha->device_type |= DT_FWI2;
  1892. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1893. break;
  1894. case PCI_DEVICE_ID_QLOGIC_ISP5432:
  1895. ha->device_type |= DT_ISP5432;
  1896. ha->device_type |= DT_FWI2;
  1897. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1898. break;
  1899. case PCI_DEVICE_ID_QLOGIC_ISP2532:
  1900. ha->device_type |= DT_ISP2532;
  1901. ha->device_type |= DT_ZIO_SUPPORTED;
  1902. ha->device_type |= DT_FWI2;
  1903. ha->device_type |= DT_IIDMA;
  1904. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1905. break;
  1906. case PCI_DEVICE_ID_QLOGIC_ISP8001:
  1907. ha->device_type |= DT_ISP8001;
  1908. ha->device_type |= DT_ZIO_SUPPORTED;
  1909. ha->device_type |= DT_FWI2;
  1910. ha->device_type |= DT_IIDMA;
  1911. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1912. break;
  1913. case PCI_DEVICE_ID_QLOGIC_ISP8021:
  1914. ha->device_type |= DT_ISP8021;
  1915. ha->device_type |= DT_ZIO_SUPPORTED;
  1916. ha->device_type |= DT_FWI2;
  1917. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1918. /* Initialize 82XX ISP flags */
  1919. qla82xx_init_flags(ha);
  1920. break;
  1921. case PCI_DEVICE_ID_QLOGIC_ISP2031:
  1922. ha->device_type |= DT_ISP2031;
  1923. ha->device_type |= DT_ZIO_SUPPORTED;
  1924. ha->device_type |= DT_FWI2;
  1925. ha->device_type |= DT_IIDMA;
  1926. ha->device_type |= DT_T10_PI;
  1927. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1928. break;
  1929. case PCI_DEVICE_ID_QLOGIC_ISP8031:
  1930. ha->device_type |= DT_ISP8031;
  1931. ha->device_type |= DT_ZIO_SUPPORTED;
  1932. ha->device_type |= DT_FWI2;
  1933. ha->device_type |= DT_IIDMA;
  1934. ha->device_type |= DT_T10_PI;
  1935. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  1936. break;
  1937. }
  1938. if (IS_QLA82XX(ha))
  1939. ha->port_no = !(ha->portnum & 1);
  1940. else
  1941. /* Get adapter physical port no from interrupt pin register. */
  1942. pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
  1943. if (ha->port_no & 1)
  1944. ha->flags.port0 = 1;
  1945. else
  1946. ha->flags.port0 = 0;
  1947. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
  1948. "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
  1949. ha->device_type, ha->flags.port0, ha->fw_srisc_address);
  1950. }
  1951. static void
  1952. qla2xxx_scan_start(struct Scsi_Host *shost)
  1953. {
  1954. scsi_qla_host_t *vha = shost_priv(shost);
  1955. if (vha->hw->flags.running_gold_fw)
  1956. return;
  1957. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1958. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  1959. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  1960. set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
  1961. }
  1962. static int
  1963. qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
  1964. {
  1965. scsi_qla_host_t *vha = shost_priv(shost);
  1966. if (!vha->host)
  1967. return 1;
  1968. if (time > vha->hw->loop_reset_delay * HZ)
  1969. return 1;
  1970. return atomic_read(&vha->loop_state) == LOOP_READY;
  1971. }
  1972. /*
  1973. * PCI driver interface
  1974. */
  1975. static int
  1976. qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  1977. {
  1978. int ret = -ENODEV;
  1979. struct Scsi_Host *host;
  1980. scsi_qla_host_t *base_vha = NULL;
  1981. struct qla_hw_data *ha;
  1982. char pci_info[30];
  1983. char fw_str[30], wq_name[30];
  1984. struct scsi_host_template *sht;
  1985. int bars, mem_only = 0;
  1986. uint16_t req_length = 0, rsp_length = 0;
  1987. struct req_que *req = NULL;
  1988. struct rsp_que *rsp = NULL;
  1989. bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
  1990. sht = &qla2xxx_driver_template;
  1991. if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
  1992. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
  1993. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
  1994. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
  1995. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
  1996. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
  1997. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
  1998. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
  1999. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
  2000. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
  2001. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  2002. mem_only = 1;
  2003. ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
  2004. "Mem only adapter.\n");
  2005. }
  2006. ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
  2007. "Bars=%d.\n", bars);
  2008. if (mem_only) {
  2009. if (pci_enable_device_mem(pdev))
  2010. goto probe_out;
  2011. } else {
  2012. if (pci_enable_device(pdev))
  2013. goto probe_out;
  2014. }
  2015. /* This may fail but that's ok */
  2016. pci_enable_pcie_error_reporting(pdev);
  2017. ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
  2018. if (!ha) {
  2019. ql_log_pci(ql_log_fatal, pdev, 0x0009,
  2020. "Unable to allocate memory for ha.\n");
  2021. goto probe_out;
  2022. }
  2023. ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
  2024. "Memory allocated for ha=%p.\n", ha);
  2025. ha->pdev = pdev;
  2026. ha->tgt.enable_class_2 = ql2xenableclass2;
  2027. /* Clear our data area */
  2028. ha->bars = bars;
  2029. ha->mem_only = mem_only;
  2030. spin_lock_init(&ha->hardware_lock);
  2031. spin_lock_init(&ha->vport_slock);
  2032. mutex_init(&ha->selflogin_lock);
  2033. /* Set ISP-type information. */
  2034. qla2x00_set_isp_flags(ha);
  2035. /* Set EEH reset type to fundamental if required by hba */
  2036. if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
  2037. IS_QLA83XX(ha))
  2038. pdev->needs_freset = 1;
  2039. ha->prev_topology = 0;
  2040. ha->init_cb_size = sizeof(init_cb_t);
  2041. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  2042. ha->optrom_size = OPTROM_SIZE_2300;
  2043. ha->cfg_lun_q_depth = ql2xmaxqdepth;
  2044. /* Assign ISP specific operations. */
  2045. if (IS_QLA2100(ha)) {
  2046. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
  2047. ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
  2048. req_length = REQUEST_ENTRY_CNT_2100;
  2049. rsp_length = RESPONSE_ENTRY_CNT_2100;
  2050. ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
  2051. ha->gid_list_info_size = 4;
  2052. ha->flash_conf_off = ~0;
  2053. ha->flash_data_off = ~0;
  2054. ha->nvram_conf_off = ~0;
  2055. ha->nvram_data_off = ~0;
  2056. ha->isp_ops = &qla2100_isp_ops;
  2057. } else if (IS_QLA2200(ha)) {
  2058. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
  2059. ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
  2060. req_length = REQUEST_ENTRY_CNT_2200;
  2061. rsp_length = RESPONSE_ENTRY_CNT_2100;
  2062. ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
  2063. ha->gid_list_info_size = 4;
  2064. ha->flash_conf_off = ~0;
  2065. ha->flash_data_off = ~0;
  2066. ha->nvram_conf_off = ~0;
  2067. ha->nvram_data_off = ~0;
  2068. ha->isp_ops = &qla2100_isp_ops;
  2069. } else if (IS_QLA23XX(ha)) {
  2070. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
  2071. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2072. req_length = REQUEST_ENTRY_CNT_2200;
  2073. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2074. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2075. ha->gid_list_info_size = 6;
  2076. if (IS_QLA2322(ha) || IS_QLA6322(ha))
  2077. ha->optrom_size = OPTROM_SIZE_2322;
  2078. ha->flash_conf_off = ~0;
  2079. ha->flash_data_off = ~0;
  2080. ha->nvram_conf_off = ~0;
  2081. ha->nvram_data_off = ~0;
  2082. ha->isp_ops = &qla2300_isp_ops;
  2083. } else if (IS_QLA24XX_TYPE(ha)) {
  2084. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2085. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2086. req_length = REQUEST_ENTRY_CNT_24XX;
  2087. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2088. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2089. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2090. ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
  2091. ha->gid_list_info_size = 8;
  2092. ha->optrom_size = OPTROM_SIZE_24XX;
  2093. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
  2094. ha->isp_ops = &qla24xx_isp_ops;
  2095. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2096. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2097. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2098. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2099. } else if (IS_QLA25XX(ha)) {
  2100. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2101. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2102. req_length = REQUEST_ENTRY_CNT_24XX;
  2103. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2104. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2105. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2106. ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
  2107. ha->gid_list_info_size = 8;
  2108. ha->optrom_size = OPTROM_SIZE_25XX;
  2109. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2110. ha->isp_ops = &qla25xx_isp_ops;
  2111. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2112. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2113. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2114. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2115. } else if (IS_QLA81XX(ha)) {
  2116. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2117. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2118. req_length = REQUEST_ENTRY_CNT_24XX;
  2119. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2120. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2121. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2122. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2123. ha->gid_list_info_size = 8;
  2124. ha->optrom_size = OPTROM_SIZE_81XX;
  2125. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2126. ha->isp_ops = &qla81xx_isp_ops;
  2127. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
  2128. ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
  2129. ha->nvram_conf_off = ~0;
  2130. ha->nvram_data_off = ~0;
  2131. } else if (IS_QLA82XX(ha)) {
  2132. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2133. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2134. req_length = REQUEST_ENTRY_CNT_82XX;
  2135. rsp_length = RESPONSE_ENTRY_CNT_82XX;
  2136. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2137. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2138. ha->gid_list_info_size = 8;
  2139. ha->optrom_size = OPTROM_SIZE_82XX;
  2140. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2141. ha->isp_ops = &qla82xx_isp_ops;
  2142. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2143. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2144. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2145. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2146. } else if (IS_QLA83XX(ha)) {
  2147. ha->portnum = PCI_FUNC(ha->pdev->devfn);
  2148. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2149. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2150. req_length = REQUEST_ENTRY_CNT_24XX;
  2151. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2152. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2153. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2154. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2155. ha->gid_list_info_size = 8;
  2156. ha->optrom_size = OPTROM_SIZE_83XX;
  2157. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2158. ha->isp_ops = &qla83xx_isp_ops;
  2159. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
  2160. ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
  2161. ha->nvram_conf_off = ~0;
  2162. ha->nvram_data_off = ~0;
  2163. }
  2164. ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
  2165. "mbx_count=%d, req_length=%d, "
  2166. "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
  2167. "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
  2168. "max_fibre_devices=%d.\n",
  2169. ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
  2170. ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
  2171. ha->nvram_npiv_size, ha->max_fibre_devices);
  2172. ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
  2173. "isp_ops=%p, flash_conf_off=%d, "
  2174. "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
  2175. ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
  2176. ha->nvram_conf_off, ha->nvram_data_off);
  2177. /* Configure PCI I/O space */
  2178. ret = ha->isp_ops->iospace_config(ha);
  2179. if (ret)
  2180. goto iospace_config_failed;
  2181. ql_log_pci(ql_log_info, pdev, 0x001d,
  2182. "Found an ISP%04X irq %d iobase 0x%p.\n",
  2183. pdev->device, pdev->irq, ha->iobase);
  2184. mutex_init(&ha->vport_lock);
  2185. init_completion(&ha->mbx_cmd_comp);
  2186. complete(&ha->mbx_cmd_comp);
  2187. init_completion(&ha->mbx_intr_comp);
  2188. init_completion(&ha->dcbx_comp);
  2189. init_completion(&ha->lb_portup_comp);
  2190. set_bit(0, (unsigned long *) ha->vp_idx_map);
  2191. qla2x00_config_dma_addressing(ha);
  2192. ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
  2193. "64 Bit addressing is %s.\n",
  2194. ha->flags.enable_64bit_addressing ? "enable" :
  2195. "disable");
  2196. ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
  2197. if (!ret) {
  2198. ql_log_pci(ql_log_fatal, pdev, 0x0031,
  2199. "Failed to allocate memory for adapter, aborting.\n");
  2200. goto probe_hw_failed;
  2201. }
  2202. req->max_q_depth = MAX_Q_DEPTH;
  2203. if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
  2204. req->max_q_depth = ql2xmaxqdepth;
  2205. base_vha = qla2x00_create_host(sht, ha);
  2206. if (!base_vha) {
  2207. ret = -ENOMEM;
  2208. qla2x00_mem_free(ha);
  2209. qla2x00_free_req_que(ha, req);
  2210. qla2x00_free_rsp_que(ha, rsp);
  2211. goto probe_hw_failed;
  2212. }
  2213. pci_set_drvdata(pdev, base_vha);
  2214. host = base_vha->host;
  2215. base_vha->req = req;
  2216. host->can_queue = req->length + 128;
  2217. if (IS_QLA2XXX_MIDTYPE(ha))
  2218. base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
  2219. else
  2220. base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
  2221. base_vha->vp_idx;
  2222. /* Set the SG table size based on ISP type */
  2223. if (!IS_FWI2_CAPABLE(ha)) {
  2224. if (IS_QLA2100(ha))
  2225. host->sg_tablesize = 32;
  2226. } else {
  2227. if (!IS_QLA82XX(ha))
  2228. host->sg_tablesize = QLA_SG_ALL;
  2229. }
  2230. ql_dbg(ql_dbg_init, base_vha, 0x0032,
  2231. "can_queue=%d, req=%p, "
  2232. "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
  2233. host->can_queue, base_vha->req,
  2234. base_vha->mgmt_svr_loop_id, host->sg_tablesize);
  2235. host->max_id = ha->max_fibre_devices;
  2236. host->cmd_per_lun = 3;
  2237. host->unique_id = host->host_no;
  2238. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  2239. host->max_cmd_len = 32;
  2240. else
  2241. host->max_cmd_len = MAX_CMDSZ;
  2242. host->max_channel = MAX_BUSES - 1;
  2243. host->max_lun = ql2xmaxlun;
  2244. host->transportt = qla2xxx_transport_template;
  2245. sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
  2246. ql_dbg(ql_dbg_init, base_vha, 0x0033,
  2247. "max_id=%d this_id=%d "
  2248. "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
  2249. "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
  2250. host->this_id, host->cmd_per_lun, host->unique_id,
  2251. host->max_cmd_len, host->max_channel, host->max_lun,
  2252. host->transportt, sht->vendor_id);
  2253. que_init:
  2254. /* Alloc arrays of request and response ring ptrs */
  2255. if (!qla2x00_alloc_queues(ha, req, rsp)) {
  2256. ql_log(ql_log_fatal, base_vha, 0x003d,
  2257. "Failed to allocate memory for queue pointers..."
  2258. "aborting.\n");
  2259. goto probe_init_failed;
  2260. }
  2261. qlt_probe_one_stage1(base_vha, ha);
  2262. /* Set up the irqs */
  2263. ret = qla2x00_request_irqs(ha, rsp);
  2264. if (ret)
  2265. goto probe_init_failed;
  2266. pci_save_state(pdev);
  2267. /* Assign back pointers */
  2268. rsp->req = req;
  2269. req->rsp = rsp;
  2270. /* FWI2-capable only. */
  2271. req->req_q_in = &ha->iobase->isp24.req_q_in;
  2272. req->req_q_out = &ha->iobase->isp24.req_q_out;
  2273. rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
  2274. rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
  2275. if (ha->mqenable || IS_QLA83XX(ha)) {
  2276. req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
  2277. req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
  2278. rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
  2279. rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
  2280. }
  2281. if (IS_QLA82XX(ha)) {
  2282. req->req_q_out = &ha->iobase->isp82.req_q_out[0];
  2283. rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
  2284. rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
  2285. }
  2286. ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
  2287. "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
  2288. ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
  2289. ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
  2290. "req->req_q_in=%p req->req_q_out=%p "
  2291. "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
  2292. req->req_q_in, req->req_q_out,
  2293. rsp->rsp_q_in, rsp->rsp_q_out);
  2294. ql_dbg(ql_dbg_init, base_vha, 0x003e,
  2295. "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
  2296. ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
  2297. ql_dbg(ql_dbg_init, base_vha, 0x003f,
  2298. "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
  2299. req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
  2300. if (qla2x00_initialize_adapter(base_vha)) {
  2301. ql_log(ql_log_fatal, base_vha, 0x00d6,
  2302. "Failed to initialize adapter - Adapter flags %x.\n",
  2303. base_vha->device_flags);
  2304. if (IS_QLA82XX(ha)) {
  2305. qla82xx_idc_lock(ha);
  2306. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  2307. QLA8XXX_DEV_FAILED);
  2308. qla82xx_idc_unlock(ha);
  2309. ql_log(ql_log_fatal, base_vha, 0x00d7,
  2310. "HW State: FAILED.\n");
  2311. }
  2312. ret = -ENODEV;
  2313. goto probe_failed;
  2314. }
  2315. if (ha->mqenable) {
  2316. if (qla25xx_setup_mode(base_vha)) {
  2317. ql_log(ql_log_warn, base_vha, 0x00ec,
  2318. "Failed to create queues, falling back to single queue mode.\n");
  2319. goto que_init;
  2320. }
  2321. }
  2322. if (ha->flags.running_gold_fw)
  2323. goto skip_dpc;
  2324. /*
  2325. * Startup the kernel thread for this host adapter
  2326. */
  2327. ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
  2328. "%s_dpc", base_vha->host_str);
  2329. if (IS_ERR(ha->dpc_thread)) {
  2330. ql_log(ql_log_fatal, base_vha, 0x00ed,
  2331. "Failed to start DPC thread.\n");
  2332. ret = PTR_ERR(ha->dpc_thread);
  2333. goto probe_failed;
  2334. }
  2335. ql_dbg(ql_dbg_init, base_vha, 0x00ee,
  2336. "DPC thread started successfully.\n");
  2337. /*
  2338. * If we're not coming up in initiator mode, we might sit for
  2339. * a while without waking up the dpc thread, which leads to a
  2340. * stuck process warning. So just kick the dpc once here and
  2341. * let the kthread start (and go back to sleep in qla2x00_do_dpc).
  2342. */
  2343. qla2xxx_wake_dpc(base_vha);
  2344. if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
  2345. sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
  2346. ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
  2347. INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
  2348. sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
  2349. ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
  2350. INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
  2351. INIT_WORK(&ha->idc_state_handler,
  2352. qla83xx_idc_state_handler_work);
  2353. INIT_WORK(&ha->nic_core_unrecoverable,
  2354. qla83xx_nic_core_unrecoverable_work);
  2355. }
  2356. skip_dpc:
  2357. list_add_tail(&base_vha->list, &ha->vp_list);
  2358. base_vha->host->irq = ha->pdev->irq;
  2359. /* Initialized the timer */
  2360. qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
  2361. ql_dbg(ql_dbg_init, base_vha, 0x00ef,
  2362. "Started qla2x00_timer with "
  2363. "interval=%d.\n", WATCH_INTERVAL);
  2364. ql_dbg(ql_dbg_init, base_vha, 0x00f0,
  2365. "Detected hba at address=%p.\n",
  2366. ha);
  2367. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
  2368. if (ha->fw_attributes & BIT_4) {
  2369. int prot = 0, guard;
  2370. base_vha->flags.difdix_supported = 1;
  2371. ql_dbg(ql_dbg_init, base_vha, 0x00f1,
  2372. "Registering for DIF/DIX type 1 and 3 protection.\n");
  2373. if (ql2xenabledif == 1)
  2374. prot = SHOST_DIX_TYPE0_PROTECTION;
  2375. scsi_host_set_prot(host,
  2376. prot | SHOST_DIF_TYPE1_PROTECTION
  2377. | SHOST_DIF_TYPE2_PROTECTION
  2378. | SHOST_DIF_TYPE3_PROTECTION
  2379. | SHOST_DIX_TYPE1_PROTECTION
  2380. | SHOST_DIX_TYPE2_PROTECTION
  2381. | SHOST_DIX_TYPE3_PROTECTION);
  2382. guard = SHOST_DIX_GUARD_CRC;
  2383. if (IS_PI_IPGUARD_CAPABLE(ha) &&
  2384. (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
  2385. guard |= SHOST_DIX_GUARD_IP;
  2386. scsi_host_set_guard(host, guard);
  2387. } else
  2388. base_vha->flags.difdix_supported = 0;
  2389. }
  2390. ha->isp_ops->enable_intrs(ha);
  2391. ret = scsi_add_host(host, &pdev->dev);
  2392. if (ret)
  2393. goto probe_failed;
  2394. base_vha->flags.init_done = 1;
  2395. base_vha->flags.online = 1;
  2396. ql_dbg(ql_dbg_init, base_vha, 0x00f2,
  2397. "Init done and hba is online.\n");
  2398. if (qla_ini_mode_enabled(base_vha))
  2399. scsi_scan_host(host);
  2400. else
  2401. ql_dbg(ql_dbg_init, base_vha, 0x0122,
  2402. "skipping scsi_scan_host() for non-initiator port\n");
  2403. qla2x00_alloc_sysfs_attr(base_vha);
  2404. qla2x00_init_host_attr(base_vha);
  2405. qla2x00_dfs_setup(base_vha);
  2406. ql_log(ql_log_info, base_vha, 0x00fb,
  2407. "QLogic %s - %s.\n",
  2408. ha->model_number, ha->model_desc ? ha->model_desc : "");
  2409. ql_log(ql_log_info, base_vha, 0x00fc,
  2410. "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
  2411. pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
  2412. pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
  2413. base_vha->host_no,
  2414. ha->isp_ops->fw_version_str(base_vha, fw_str));
  2415. qlt_add_target(ha, base_vha);
  2416. return 0;
  2417. probe_init_failed:
  2418. qla2x00_free_req_que(ha, req);
  2419. ha->req_q_map[0] = NULL;
  2420. clear_bit(0, ha->req_qid_map);
  2421. qla2x00_free_rsp_que(ha, rsp);
  2422. ha->rsp_q_map[0] = NULL;
  2423. clear_bit(0, ha->rsp_qid_map);
  2424. ha->max_req_queues = ha->max_rsp_queues = 0;
  2425. probe_failed:
  2426. if (base_vha->timer_active)
  2427. qla2x00_stop_timer(base_vha);
  2428. base_vha->flags.online = 0;
  2429. if (ha->dpc_thread) {
  2430. struct task_struct *t = ha->dpc_thread;
  2431. ha->dpc_thread = NULL;
  2432. kthread_stop(t);
  2433. }
  2434. qla2x00_free_device(base_vha);
  2435. scsi_host_put(base_vha->host);
  2436. probe_hw_failed:
  2437. if (IS_QLA82XX(ha)) {
  2438. qla82xx_idc_lock(ha);
  2439. qla82xx_clear_drv_active(ha);
  2440. qla82xx_idc_unlock(ha);
  2441. }
  2442. iospace_config_failed:
  2443. if (IS_QLA82XX(ha)) {
  2444. if (!ha->nx_pcibase)
  2445. iounmap((device_reg_t __iomem *)ha->nx_pcibase);
  2446. if (!ql2xdbwr)
  2447. iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
  2448. } else {
  2449. if (ha->iobase)
  2450. iounmap(ha->iobase);
  2451. }
  2452. pci_release_selected_regions(ha->pdev, ha->bars);
  2453. kfree(ha);
  2454. ha = NULL;
  2455. probe_out:
  2456. pci_disable_device(pdev);
  2457. return ret;
  2458. }
  2459. static void
  2460. qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
  2461. {
  2462. struct qla_hw_data *ha = vha->hw;
  2463. struct task_struct *t = ha->dpc_thread;
  2464. if (ha->dpc_thread == NULL)
  2465. return;
  2466. /*
  2467. * qla2xxx_wake_dpc checks for ->dpc_thread
  2468. * so we need to zero it out.
  2469. */
  2470. ha->dpc_thread = NULL;
  2471. kthread_stop(t);
  2472. }
  2473. static void
  2474. qla2x00_shutdown(struct pci_dev *pdev)
  2475. {
  2476. scsi_qla_host_t *vha;
  2477. struct qla_hw_data *ha;
  2478. if (!atomic_read(&pdev->enable_cnt))
  2479. return;
  2480. vha = pci_get_drvdata(pdev);
  2481. ha = vha->hw;
  2482. /* Turn-off FCE trace */
  2483. if (ha->flags.fce_enabled) {
  2484. qla2x00_disable_fce_trace(vha, NULL, NULL);
  2485. ha->flags.fce_enabled = 0;
  2486. }
  2487. /* Turn-off EFT trace */
  2488. if (ha->eft)
  2489. qla2x00_disable_eft_trace(vha);
  2490. /* Stop currently executing firmware. */
  2491. qla2x00_try_to_stop_firmware(vha);
  2492. /* Turn adapter off line */
  2493. vha->flags.online = 0;
  2494. /* turn-off interrupts on the card */
  2495. if (ha->interrupts_on) {
  2496. vha->flags.init_done = 0;
  2497. ha->isp_ops->disable_intrs(ha);
  2498. }
  2499. qla2x00_free_irqs(vha);
  2500. qla2x00_free_fw_dump(ha);
  2501. }
  2502. static void
  2503. qla2x00_remove_one(struct pci_dev *pdev)
  2504. {
  2505. scsi_qla_host_t *base_vha, *vha;
  2506. struct qla_hw_data *ha;
  2507. unsigned long flags;
  2508. /*
  2509. * If the PCI device is disabled that means that probe failed and any
  2510. * resources should be have cleaned up on probe exit.
  2511. */
  2512. if (!atomic_read(&pdev->enable_cnt))
  2513. return;
  2514. base_vha = pci_get_drvdata(pdev);
  2515. ha = base_vha->hw;
  2516. ha->flags.host_shutting_down = 1;
  2517. set_bit(UNLOADING, &base_vha->dpc_flags);
  2518. mutex_lock(&ha->vport_lock);
  2519. while (ha->cur_vport_count) {
  2520. struct Scsi_Host *scsi_host;
  2521. spin_lock_irqsave(&ha->vport_slock, flags);
  2522. BUG_ON(base_vha->list.next == &ha->vp_list);
  2523. /* This assumes first entry in ha->vp_list is always base vha */
  2524. vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
  2525. scsi_host = scsi_host_get(vha->host);
  2526. spin_unlock_irqrestore(&ha->vport_slock, flags);
  2527. mutex_unlock(&ha->vport_lock);
  2528. fc_vport_terminate(vha->fc_vport);
  2529. scsi_host_put(vha->host);
  2530. mutex_lock(&ha->vport_lock);
  2531. }
  2532. mutex_unlock(&ha->vport_lock);
  2533. if (IS_QLA8031(ha)) {
  2534. ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
  2535. "Clearing fcoe driver presence.\n");
  2536. if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
  2537. ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
  2538. "Error while clearing DRV-Presence.\n");
  2539. }
  2540. qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
  2541. qla2x00_dfs_remove(base_vha);
  2542. qla84xx_put_chip(base_vha);
  2543. /* Disable timer */
  2544. if (base_vha->timer_active)
  2545. qla2x00_stop_timer(base_vha);
  2546. base_vha->flags.online = 0;
  2547. /* Flush the work queue and remove it */
  2548. if (ha->wq) {
  2549. flush_workqueue(ha->wq);
  2550. destroy_workqueue(ha->wq);
  2551. ha->wq = NULL;
  2552. }
  2553. /* Cancel all work and destroy DPC workqueues */
  2554. if (ha->dpc_lp_wq) {
  2555. cancel_work_sync(&ha->idc_aen);
  2556. destroy_workqueue(ha->dpc_lp_wq);
  2557. ha->dpc_lp_wq = NULL;
  2558. }
  2559. if (ha->dpc_hp_wq) {
  2560. cancel_work_sync(&ha->nic_core_reset);
  2561. cancel_work_sync(&ha->idc_state_handler);
  2562. cancel_work_sync(&ha->nic_core_unrecoverable);
  2563. destroy_workqueue(ha->dpc_hp_wq);
  2564. ha->dpc_hp_wq = NULL;
  2565. }
  2566. /* Kill the kernel thread for this host */
  2567. if (ha->dpc_thread) {
  2568. struct task_struct *t = ha->dpc_thread;
  2569. /*
  2570. * qla2xxx_wake_dpc checks for ->dpc_thread
  2571. * so we need to zero it out.
  2572. */
  2573. ha->dpc_thread = NULL;
  2574. kthread_stop(t);
  2575. }
  2576. qlt_remove_target(ha, base_vha);
  2577. qla2x00_free_sysfs_attr(base_vha);
  2578. fc_remove_host(base_vha->host);
  2579. scsi_remove_host(base_vha->host);
  2580. qla2x00_free_device(base_vha);
  2581. scsi_host_put(base_vha->host);
  2582. if (IS_QLA82XX(ha)) {
  2583. qla82xx_idc_lock(ha);
  2584. qla82xx_clear_drv_active(ha);
  2585. qla82xx_idc_unlock(ha);
  2586. iounmap((device_reg_t __iomem *)ha->nx_pcibase);
  2587. if (!ql2xdbwr)
  2588. iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
  2589. } else {
  2590. if (ha->iobase)
  2591. iounmap(ha->iobase);
  2592. if (ha->mqiobase)
  2593. iounmap(ha->mqiobase);
  2594. if (IS_QLA83XX(ha) && ha->msixbase)
  2595. iounmap(ha->msixbase);
  2596. }
  2597. pci_release_selected_regions(ha->pdev, ha->bars);
  2598. kfree(ha);
  2599. ha = NULL;
  2600. pci_disable_pcie_error_reporting(pdev);
  2601. pci_disable_device(pdev);
  2602. pci_set_drvdata(pdev, NULL);
  2603. }
  2604. static void
  2605. qla2x00_free_device(scsi_qla_host_t *vha)
  2606. {
  2607. struct qla_hw_data *ha = vha->hw;
  2608. qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
  2609. /* Disable timer */
  2610. if (vha->timer_active)
  2611. qla2x00_stop_timer(vha);
  2612. qla2x00_stop_dpc_thread(vha);
  2613. qla25xx_delete_queues(vha);
  2614. if (ha->flags.fce_enabled)
  2615. qla2x00_disable_fce_trace(vha, NULL, NULL);
  2616. if (ha->eft)
  2617. qla2x00_disable_eft_trace(vha);
  2618. /* Stop currently executing firmware. */
  2619. qla2x00_try_to_stop_firmware(vha);
  2620. vha->flags.online = 0;
  2621. /* turn-off interrupts on the card */
  2622. if (ha->interrupts_on) {
  2623. vha->flags.init_done = 0;
  2624. ha->isp_ops->disable_intrs(ha);
  2625. }
  2626. qla2x00_free_irqs(vha);
  2627. qla2x00_free_fcports(vha);
  2628. qla2x00_mem_free(ha);
  2629. qla82xx_md_free(vha);
  2630. qla2x00_free_queues(ha);
  2631. }
  2632. void qla2x00_free_fcports(struct scsi_qla_host *vha)
  2633. {
  2634. fc_port_t *fcport, *tfcport;
  2635. list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
  2636. list_del(&fcport->list);
  2637. qla2x00_clear_loop_id(fcport);
  2638. kfree(fcport);
  2639. fcport = NULL;
  2640. }
  2641. }
  2642. static inline void
  2643. qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
  2644. int defer)
  2645. {
  2646. struct fc_rport *rport;
  2647. scsi_qla_host_t *base_vha;
  2648. unsigned long flags;
  2649. if (!fcport->rport)
  2650. return;
  2651. rport = fcport->rport;
  2652. if (defer) {
  2653. base_vha = pci_get_drvdata(vha->hw->pdev);
  2654. spin_lock_irqsave(vha->host->host_lock, flags);
  2655. fcport->drport = rport;
  2656. spin_unlock_irqrestore(vha->host->host_lock, flags);
  2657. set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
  2658. qla2xxx_wake_dpc(base_vha);
  2659. } else {
  2660. fc_remote_port_delete(rport);
  2661. qlt_fc_port_deleted(vha, fcport);
  2662. }
  2663. }
  2664. /*
  2665. * qla2x00_mark_device_lost Updates fcport state when device goes offline.
  2666. *
  2667. * Input: ha = adapter block pointer. fcport = port structure pointer.
  2668. *
  2669. * Return: None.
  2670. *
  2671. * Context:
  2672. */
  2673. void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
  2674. int do_login, int defer)
  2675. {
  2676. if (atomic_read(&fcport->state) == FCS_ONLINE &&
  2677. vha->vp_idx == fcport->vha->vp_idx) {
  2678. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  2679. qla2x00_schedule_rport_del(vha, fcport, defer);
  2680. }
  2681. /*
  2682. * We may need to retry the login, so don't change the state of the
  2683. * port but do the retries.
  2684. */
  2685. if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
  2686. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  2687. if (!do_login)
  2688. return;
  2689. if (fcport->login_retry == 0) {
  2690. fcport->login_retry = vha->hw->login_retry_count;
  2691. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  2692. ql_dbg(ql_dbg_disc, vha, 0x2067,
  2693. "Port login retry "
  2694. "%02x%02x%02x%02x%02x%02x%02x%02x, "
  2695. "id = 0x%04x retry cnt=%d.\n",
  2696. fcport->port_name[0], fcport->port_name[1],
  2697. fcport->port_name[2], fcport->port_name[3],
  2698. fcport->port_name[4], fcport->port_name[5],
  2699. fcport->port_name[6], fcport->port_name[7],
  2700. fcport->loop_id, fcport->login_retry);
  2701. }
  2702. }
  2703. /*
  2704. * qla2x00_mark_all_devices_lost
  2705. * Updates fcport state when device goes offline.
  2706. *
  2707. * Input:
  2708. * ha = adapter block pointer.
  2709. * fcport = port structure pointer.
  2710. *
  2711. * Return:
  2712. * None.
  2713. *
  2714. * Context:
  2715. */
  2716. void
  2717. qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
  2718. {
  2719. fc_port_t *fcport;
  2720. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2721. if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
  2722. continue;
  2723. /*
  2724. * No point in marking the device as lost, if the device is
  2725. * already DEAD.
  2726. */
  2727. if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
  2728. continue;
  2729. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  2730. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  2731. if (defer)
  2732. qla2x00_schedule_rport_del(vha, fcport, defer);
  2733. else if (vha->vp_idx == fcport->vha->vp_idx)
  2734. qla2x00_schedule_rport_del(vha, fcport, defer);
  2735. }
  2736. }
  2737. }
  2738. /*
  2739. * qla2x00_mem_alloc
  2740. * Allocates adapter memory.
  2741. *
  2742. * Returns:
  2743. * 0 = success.
  2744. * !0 = failure.
  2745. */
  2746. static int
  2747. qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
  2748. struct req_que **req, struct rsp_que **rsp)
  2749. {
  2750. char name[16];
  2751. ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
  2752. &ha->init_cb_dma, GFP_KERNEL);
  2753. if (!ha->init_cb)
  2754. goto fail;
  2755. if (qlt_mem_alloc(ha) < 0)
  2756. goto fail_free_init_cb;
  2757. ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
  2758. qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
  2759. if (!ha->gid_list)
  2760. goto fail_free_tgt_mem;
  2761. ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
  2762. if (!ha->srb_mempool)
  2763. goto fail_free_gid_list;
  2764. if (IS_QLA82XX(ha)) {
  2765. /* Allocate cache for CT6 Ctx. */
  2766. if (!ctx_cachep) {
  2767. ctx_cachep = kmem_cache_create("qla2xxx_ctx",
  2768. sizeof(struct ct6_dsd), 0,
  2769. SLAB_HWCACHE_ALIGN, NULL);
  2770. if (!ctx_cachep)
  2771. goto fail_free_gid_list;
  2772. }
  2773. ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
  2774. ctx_cachep);
  2775. if (!ha->ctx_mempool)
  2776. goto fail_free_srb_mempool;
  2777. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
  2778. "ctx_cachep=%p ctx_mempool=%p.\n",
  2779. ctx_cachep, ha->ctx_mempool);
  2780. }
  2781. /* Get memory for cached NVRAM */
  2782. ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
  2783. if (!ha->nvram)
  2784. goto fail_free_ctx_mempool;
  2785. snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
  2786. ha->pdev->device);
  2787. ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  2788. DMA_POOL_SIZE, 8, 0);
  2789. if (!ha->s_dma_pool)
  2790. goto fail_free_nvram;
  2791. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
  2792. "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
  2793. ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
  2794. if (IS_QLA82XX(ha) || ql2xenabledif) {
  2795. ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  2796. DSD_LIST_DMA_POOL_SIZE, 8, 0);
  2797. if (!ha->dl_dma_pool) {
  2798. ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
  2799. "Failed to allocate memory for dl_dma_pool.\n");
  2800. goto fail_s_dma_pool;
  2801. }
  2802. ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  2803. FCP_CMND_DMA_POOL_SIZE, 8, 0);
  2804. if (!ha->fcp_cmnd_dma_pool) {
  2805. ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
  2806. "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
  2807. goto fail_dl_dma_pool;
  2808. }
  2809. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
  2810. "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
  2811. ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
  2812. }
  2813. /* Allocate memory for SNS commands */
  2814. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  2815. /* Get consistent memory allocated for SNS commands */
  2816. ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
  2817. sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
  2818. if (!ha->sns_cmd)
  2819. goto fail_dma_pool;
  2820. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
  2821. "sns_cmd: %p.\n", ha->sns_cmd);
  2822. } else {
  2823. /* Get consistent memory allocated for MS IOCB */
  2824. ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
  2825. &ha->ms_iocb_dma);
  2826. if (!ha->ms_iocb)
  2827. goto fail_dma_pool;
  2828. /* Get consistent memory allocated for CT SNS commands */
  2829. ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
  2830. sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
  2831. if (!ha->ct_sns)
  2832. goto fail_free_ms_iocb;
  2833. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
  2834. "ms_iocb=%p ct_sns=%p.\n",
  2835. ha->ms_iocb, ha->ct_sns);
  2836. }
  2837. /* Allocate memory for request ring */
  2838. *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  2839. if (!*req) {
  2840. ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
  2841. "Failed to allocate memory for req.\n");
  2842. goto fail_req;
  2843. }
  2844. (*req)->length = req_len;
  2845. (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
  2846. ((*req)->length + 1) * sizeof(request_t),
  2847. &(*req)->dma, GFP_KERNEL);
  2848. if (!(*req)->ring) {
  2849. ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
  2850. "Failed to allocate memory for req_ring.\n");
  2851. goto fail_req_ring;
  2852. }
  2853. /* Allocate memory for response ring */
  2854. *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  2855. if (!*rsp) {
  2856. ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
  2857. "Failed to allocate memory for rsp.\n");
  2858. goto fail_rsp;
  2859. }
  2860. (*rsp)->hw = ha;
  2861. (*rsp)->length = rsp_len;
  2862. (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
  2863. ((*rsp)->length + 1) * sizeof(response_t),
  2864. &(*rsp)->dma, GFP_KERNEL);
  2865. if (!(*rsp)->ring) {
  2866. ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
  2867. "Failed to allocate memory for rsp_ring.\n");
  2868. goto fail_rsp_ring;
  2869. }
  2870. (*req)->rsp = *rsp;
  2871. (*rsp)->req = *req;
  2872. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
  2873. "req=%p req->length=%d req->ring=%p rsp=%p "
  2874. "rsp->length=%d rsp->ring=%p.\n",
  2875. *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
  2876. (*rsp)->ring);
  2877. /* Allocate memory for NVRAM data for vports */
  2878. if (ha->nvram_npiv_size) {
  2879. ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
  2880. ha->nvram_npiv_size, GFP_KERNEL);
  2881. if (!ha->npiv_info) {
  2882. ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
  2883. "Failed to allocate memory for npiv_info.\n");
  2884. goto fail_npiv_info;
  2885. }
  2886. } else
  2887. ha->npiv_info = NULL;
  2888. /* Get consistent memory allocated for EX-INIT-CB. */
  2889. if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
  2890. ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
  2891. &ha->ex_init_cb_dma);
  2892. if (!ha->ex_init_cb)
  2893. goto fail_ex_init_cb;
  2894. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
  2895. "ex_init_cb=%p.\n", ha->ex_init_cb);
  2896. }
  2897. INIT_LIST_HEAD(&ha->gbl_dsd_list);
  2898. /* Get consistent memory allocated for Async Port-Database. */
  2899. if (!IS_FWI2_CAPABLE(ha)) {
  2900. ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
  2901. &ha->async_pd_dma);
  2902. if (!ha->async_pd)
  2903. goto fail_async_pd;
  2904. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
  2905. "async_pd=%p.\n", ha->async_pd);
  2906. }
  2907. INIT_LIST_HEAD(&ha->vp_list);
  2908. /* Allocate memory for our loop_id bitmap */
  2909. ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
  2910. GFP_KERNEL);
  2911. if (!ha->loop_id_map)
  2912. goto fail_async_pd;
  2913. else {
  2914. qla2x00_set_reserved_loop_ids(ha);
  2915. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
  2916. "loop_id_map=%p. \n", ha->loop_id_map);
  2917. }
  2918. return 1;
  2919. fail_async_pd:
  2920. dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
  2921. fail_ex_init_cb:
  2922. kfree(ha->npiv_info);
  2923. fail_npiv_info:
  2924. dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
  2925. sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
  2926. (*rsp)->ring = NULL;
  2927. (*rsp)->dma = 0;
  2928. fail_rsp_ring:
  2929. kfree(*rsp);
  2930. fail_rsp:
  2931. dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
  2932. sizeof(request_t), (*req)->ring, (*req)->dma);
  2933. (*req)->ring = NULL;
  2934. (*req)->dma = 0;
  2935. fail_req_ring:
  2936. kfree(*req);
  2937. fail_req:
  2938. dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
  2939. ha->ct_sns, ha->ct_sns_dma);
  2940. ha->ct_sns = NULL;
  2941. ha->ct_sns_dma = 0;
  2942. fail_free_ms_iocb:
  2943. dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
  2944. ha->ms_iocb = NULL;
  2945. ha->ms_iocb_dma = 0;
  2946. fail_dma_pool:
  2947. if (IS_QLA82XX(ha) || ql2xenabledif) {
  2948. dma_pool_destroy(ha->fcp_cmnd_dma_pool);
  2949. ha->fcp_cmnd_dma_pool = NULL;
  2950. }
  2951. fail_dl_dma_pool:
  2952. if (IS_QLA82XX(ha) || ql2xenabledif) {
  2953. dma_pool_destroy(ha->dl_dma_pool);
  2954. ha->dl_dma_pool = NULL;
  2955. }
  2956. fail_s_dma_pool:
  2957. dma_pool_destroy(ha->s_dma_pool);
  2958. ha->s_dma_pool = NULL;
  2959. fail_free_nvram:
  2960. kfree(ha->nvram);
  2961. ha->nvram = NULL;
  2962. fail_free_ctx_mempool:
  2963. mempool_destroy(ha->ctx_mempool);
  2964. ha->ctx_mempool = NULL;
  2965. fail_free_srb_mempool:
  2966. mempool_destroy(ha->srb_mempool);
  2967. ha->srb_mempool = NULL;
  2968. fail_free_gid_list:
  2969. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  2970. ha->gid_list,
  2971. ha->gid_list_dma);
  2972. ha->gid_list = NULL;
  2973. ha->gid_list_dma = 0;
  2974. fail_free_tgt_mem:
  2975. qlt_mem_free(ha);
  2976. fail_free_init_cb:
  2977. dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
  2978. ha->init_cb_dma);
  2979. ha->init_cb = NULL;
  2980. ha->init_cb_dma = 0;
  2981. fail:
  2982. ql_log(ql_log_fatal, NULL, 0x0030,
  2983. "Memory allocation failure.\n");
  2984. return -ENOMEM;
  2985. }
  2986. /*
  2987. * qla2x00_free_fw_dump
  2988. * Frees fw dump stuff.
  2989. *
  2990. * Input:
  2991. * ha = adapter block pointer.
  2992. */
  2993. static void
  2994. qla2x00_free_fw_dump(struct qla_hw_data *ha)
  2995. {
  2996. if (ha->fce)
  2997. dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
  2998. ha->fce_dma);
  2999. if (ha->fw_dump) {
  3000. if (ha->eft)
  3001. dma_free_coherent(&ha->pdev->dev,
  3002. ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
  3003. vfree(ha->fw_dump);
  3004. }
  3005. ha->fce = NULL;
  3006. ha->fce_dma = 0;
  3007. ha->eft = NULL;
  3008. ha->eft_dma = 0;
  3009. ha->fw_dump = NULL;
  3010. ha->fw_dumped = 0;
  3011. ha->fw_dump_reading = 0;
  3012. }
  3013. /*
  3014. * qla2x00_mem_free
  3015. * Frees all adapter allocated memory.
  3016. *
  3017. * Input:
  3018. * ha = adapter block pointer.
  3019. */
  3020. static void
  3021. qla2x00_mem_free(struct qla_hw_data *ha)
  3022. {
  3023. qla2x00_free_fw_dump(ha);
  3024. if (ha->mctp_dump)
  3025. dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
  3026. ha->mctp_dump_dma);
  3027. if (ha->srb_mempool)
  3028. mempool_destroy(ha->srb_mempool);
  3029. if (ha->dcbx_tlv)
  3030. dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
  3031. ha->dcbx_tlv, ha->dcbx_tlv_dma);
  3032. if (ha->xgmac_data)
  3033. dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
  3034. ha->xgmac_data, ha->xgmac_data_dma);
  3035. if (ha->sns_cmd)
  3036. dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
  3037. ha->sns_cmd, ha->sns_cmd_dma);
  3038. if (ha->ct_sns)
  3039. dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
  3040. ha->ct_sns, ha->ct_sns_dma);
  3041. if (ha->sfp_data)
  3042. dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
  3043. if (ha->ms_iocb)
  3044. dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
  3045. if (ha->ex_init_cb)
  3046. dma_pool_free(ha->s_dma_pool,
  3047. ha->ex_init_cb, ha->ex_init_cb_dma);
  3048. if (ha->async_pd)
  3049. dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
  3050. if (ha->s_dma_pool)
  3051. dma_pool_destroy(ha->s_dma_pool);
  3052. if (ha->gid_list)
  3053. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  3054. ha->gid_list, ha->gid_list_dma);
  3055. if (IS_QLA82XX(ha)) {
  3056. if (!list_empty(&ha->gbl_dsd_list)) {
  3057. struct dsd_dma *dsd_ptr, *tdsd_ptr;
  3058. /* clean up allocated prev pool */
  3059. list_for_each_entry_safe(dsd_ptr,
  3060. tdsd_ptr, &ha->gbl_dsd_list, list) {
  3061. dma_pool_free(ha->dl_dma_pool,
  3062. dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
  3063. list_del(&dsd_ptr->list);
  3064. kfree(dsd_ptr);
  3065. }
  3066. }
  3067. }
  3068. if (ha->dl_dma_pool)
  3069. dma_pool_destroy(ha->dl_dma_pool);
  3070. if (ha->fcp_cmnd_dma_pool)
  3071. dma_pool_destroy(ha->fcp_cmnd_dma_pool);
  3072. if (ha->ctx_mempool)
  3073. mempool_destroy(ha->ctx_mempool);
  3074. qlt_mem_free(ha);
  3075. if (ha->init_cb)
  3076. dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
  3077. ha->init_cb, ha->init_cb_dma);
  3078. vfree(ha->optrom_buffer);
  3079. kfree(ha->nvram);
  3080. kfree(ha->npiv_info);
  3081. kfree(ha->swl);
  3082. kfree(ha->loop_id_map);
  3083. ha->srb_mempool = NULL;
  3084. ha->ctx_mempool = NULL;
  3085. ha->sns_cmd = NULL;
  3086. ha->sns_cmd_dma = 0;
  3087. ha->ct_sns = NULL;
  3088. ha->ct_sns_dma = 0;
  3089. ha->ms_iocb = NULL;
  3090. ha->ms_iocb_dma = 0;
  3091. ha->init_cb = NULL;
  3092. ha->init_cb_dma = 0;
  3093. ha->ex_init_cb = NULL;
  3094. ha->ex_init_cb_dma = 0;
  3095. ha->async_pd = NULL;
  3096. ha->async_pd_dma = 0;
  3097. ha->s_dma_pool = NULL;
  3098. ha->dl_dma_pool = NULL;
  3099. ha->fcp_cmnd_dma_pool = NULL;
  3100. ha->gid_list = NULL;
  3101. ha->gid_list_dma = 0;
  3102. ha->tgt.atio_ring = NULL;
  3103. ha->tgt.atio_dma = 0;
  3104. ha->tgt.tgt_vp_map = NULL;
  3105. }
  3106. struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
  3107. struct qla_hw_data *ha)
  3108. {
  3109. struct Scsi_Host *host;
  3110. struct scsi_qla_host *vha = NULL;
  3111. host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
  3112. if (host == NULL) {
  3113. ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
  3114. "Failed to allocate host from the scsi layer, aborting.\n");
  3115. goto fail;
  3116. }
  3117. /* Clear our data area */
  3118. vha = shost_priv(host);
  3119. memset(vha, 0, sizeof(scsi_qla_host_t));
  3120. vha->host = host;
  3121. vha->host_no = host->host_no;
  3122. vha->hw = ha;
  3123. INIT_LIST_HEAD(&vha->vp_fcports);
  3124. INIT_LIST_HEAD(&vha->work_list);
  3125. INIT_LIST_HEAD(&vha->list);
  3126. spin_lock_init(&vha->work_lock);
  3127. sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
  3128. ql_dbg(ql_dbg_init, vha, 0x0041,
  3129. "Allocated the host=%p hw=%p vha=%p dev_name=%s",
  3130. vha->host, vha->hw, vha,
  3131. dev_name(&(ha->pdev->dev)));
  3132. return vha;
  3133. fail:
  3134. return vha;
  3135. }
  3136. static struct qla_work_evt *
  3137. qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
  3138. {
  3139. struct qla_work_evt *e;
  3140. uint8_t bail;
  3141. QLA_VHA_MARK_BUSY(vha, bail);
  3142. if (bail)
  3143. return NULL;
  3144. e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
  3145. if (!e) {
  3146. QLA_VHA_MARK_NOT_BUSY(vha);
  3147. return NULL;
  3148. }
  3149. INIT_LIST_HEAD(&e->list);
  3150. e->type = type;
  3151. e->flags = QLA_EVT_FLAG_FREE;
  3152. return e;
  3153. }
  3154. static int
  3155. qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
  3156. {
  3157. unsigned long flags;
  3158. spin_lock_irqsave(&vha->work_lock, flags);
  3159. list_add_tail(&e->list, &vha->work_list);
  3160. spin_unlock_irqrestore(&vha->work_lock, flags);
  3161. qla2xxx_wake_dpc(vha);
  3162. return QLA_SUCCESS;
  3163. }
  3164. int
  3165. qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
  3166. u32 data)
  3167. {
  3168. struct qla_work_evt *e;
  3169. e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
  3170. if (!e)
  3171. return QLA_FUNCTION_FAILED;
  3172. e->u.aen.code = code;
  3173. e->u.aen.data = data;
  3174. return qla2x00_post_work(vha, e);
  3175. }
  3176. int
  3177. qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
  3178. {
  3179. struct qla_work_evt *e;
  3180. e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
  3181. if (!e)
  3182. return QLA_FUNCTION_FAILED;
  3183. memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
  3184. return qla2x00_post_work(vha, e);
  3185. }
  3186. #define qla2x00_post_async_work(name, type) \
  3187. int qla2x00_post_async_##name##_work( \
  3188. struct scsi_qla_host *vha, \
  3189. fc_port_t *fcport, uint16_t *data) \
  3190. { \
  3191. struct qla_work_evt *e; \
  3192. \
  3193. e = qla2x00_alloc_work(vha, type); \
  3194. if (!e) \
  3195. return QLA_FUNCTION_FAILED; \
  3196. \
  3197. e->u.logio.fcport = fcport; \
  3198. if (data) { \
  3199. e->u.logio.data[0] = data[0]; \
  3200. e->u.logio.data[1] = data[1]; \
  3201. } \
  3202. return qla2x00_post_work(vha, e); \
  3203. }
  3204. qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
  3205. qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
  3206. qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
  3207. qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
  3208. qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
  3209. qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
  3210. int
  3211. qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
  3212. {
  3213. struct qla_work_evt *e;
  3214. e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
  3215. if (!e)
  3216. return QLA_FUNCTION_FAILED;
  3217. e->u.uevent.code = code;
  3218. return qla2x00_post_work(vha, e);
  3219. }
  3220. static void
  3221. qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
  3222. {
  3223. char event_string[40];
  3224. char *envp[] = { event_string, NULL };
  3225. switch (code) {
  3226. case QLA_UEVENT_CODE_FW_DUMP:
  3227. snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
  3228. vha->host_no);
  3229. break;
  3230. default:
  3231. /* do nothing */
  3232. break;
  3233. }
  3234. kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
  3235. }
  3236. void
  3237. qla2x00_do_work(struct scsi_qla_host *vha)
  3238. {
  3239. struct qla_work_evt *e, *tmp;
  3240. unsigned long flags;
  3241. LIST_HEAD(work);
  3242. spin_lock_irqsave(&vha->work_lock, flags);
  3243. list_splice_init(&vha->work_list, &work);
  3244. spin_unlock_irqrestore(&vha->work_lock, flags);
  3245. list_for_each_entry_safe(e, tmp, &work, list) {
  3246. list_del_init(&e->list);
  3247. switch (e->type) {
  3248. case QLA_EVT_AEN:
  3249. fc_host_post_event(vha->host, fc_get_event_number(),
  3250. e->u.aen.code, e->u.aen.data);
  3251. break;
  3252. case QLA_EVT_IDC_ACK:
  3253. qla81xx_idc_ack(vha, e->u.idc_ack.mb);
  3254. break;
  3255. case QLA_EVT_ASYNC_LOGIN:
  3256. qla2x00_async_login(vha, e->u.logio.fcport,
  3257. e->u.logio.data);
  3258. break;
  3259. case QLA_EVT_ASYNC_LOGIN_DONE:
  3260. qla2x00_async_login_done(vha, e->u.logio.fcport,
  3261. e->u.logio.data);
  3262. break;
  3263. case QLA_EVT_ASYNC_LOGOUT:
  3264. qla2x00_async_logout(vha, e->u.logio.fcport);
  3265. break;
  3266. case QLA_EVT_ASYNC_LOGOUT_DONE:
  3267. qla2x00_async_logout_done(vha, e->u.logio.fcport,
  3268. e->u.logio.data);
  3269. break;
  3270. case QLA_EVT_ASYNC_ADISC:
  3271. qla2x00_async_adisc(vha, e->u.logio.fcport,
  3272. e->u.logio.data);
  3273. break;
  3274. case QLA_EVT_ASYNC_ADISC_DONE:
  3275. qla2x00_async_adisc_done(vha, e->u.logio.fcport,
  3276. e->u.logio.data);
  3277. break;
  3278. case QLA_EVT_UEVENT:
  3279. qla2x00_uevent_emit(vha, e->u.uevent.code);
  3280. break;
  3281. }
  3282. if (e->flags & QLA_EVT_FLAG_FREE)
  3283. kfree(e);
  3284. /* For each work completed decrement vha ref count */
  3285. QLA_VHA_MARK_NOT_BUSY(vha);
  3286. }
  3287. }
  3288. /* Relogins all the fcports of a vport
  3289. * Context: dpc thread
  3290. */
  3291. void qla2x00_relogin(struct scsi_qla_host *vha)
  3292. {
  3293. fc_port_t *fcport;
  3294. int status;
  3295. uint16_t next_loopid = 0;
  3296. struct qla_hw_data *ha = vha->hw;
  3297. uint16_t data[2];
  3298. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  3299. /*
  3300. * If the port is not ONLINE then try to login
  3301. * to it if we haven't run out of retries.
  3302. */
  3303. if (atomic_read(&fcport->state) != FCS_ONLINE &&
  3304. fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
  3305. fcport->login_retry--;
  3306. if (fcport->flags & FCF_FABRIC_DEVICE) {
  3307. if (fcport->flags & FCF_FCP2_DEVICE)
  3308. ha->isp_ops->fabric_logout(vha,
  3309. fcport->loop_id,
  3310. fcport->d_id.b.domain,
  3311. fcport->d_id.b.area,
  3312. fcport->d_id.b.al_pa);
  3313. if (fcport->loop_id == FC_NO_LOOP_ID) {
  3314. fcport->loop_id = next_loopid =
  3315. ha->min_external_loopid;
  3316. status = qla2x00_find_new_loop_id(
  3317. vha, fcport);
  3318. if (status != QLA_SUCCESS) {
  3319. /* Ran out of IDs to use */
  3320. break;
  3321. }
  3322. }
  3323. if (IS_ALOGIO_CAPABLE(ha)) {
  3324. fcport->flags |= FCF_ASYNC_SENT;
  3325. data[0] = 0;
  3326. data[1] = QLA_LOGIO_LOGIN_RETRIED;
  3327. status = qla2x00_post_async_login_work(
  3328. vha, fcport, data);
  3329. if (status == QLA_SUCCESS)
  3330. continue;
  3331. /* Attempt a retry. */
  3332. status = 1;
  3333. } else {
  3334. status = qla2x00_fabric_login(vha,
  3335. fcport, &next_loopid);
  3336. if (status == QLA_SUCCESS) {
  3337. int status2;
  3338. uint8_t opts;
  3339. opts = 0;
  3340. if (fcport->flags &
  3341. FCF_FCP2_DEVICE)
  3342. opts |= BIT_1;
  3343. status2 =
  3344. qla2x00_get_port_database(
  3345. vha, fcport, opts);
  3346. if (status2 != QLA_SUCCESS)
  3347. status = 1;
  3348. }
  3349. }
  3350. } else
  3351. status = qla2x00_local_device_login(vha,
  3352. fcport);
  3353. if (status == QLA_SUCCESS) {
  3354. fcport->old_loop_id = fcport->loop_id;
  3355. ql_dbg(ql_dbg_disc, vha, 0x2003,
  3356. "Port login OK: logged in ID 0x%x.\n",
  3357. fcport->loop_id);
  3358. qla2x00_update_fcport(vha, fcport);
  3359. } else if (status == 1) {
  3360. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  3361. /* retry the login again */
  3362. ql_dbg(ql_dbg_disc, vha, 0x2007,
  3363. "Retrying %d login again loop_id 0x%x.\n",
  3364. fcport->login_retry, fcport->loop_id);
  3365. } else {
  3366. fcport->login_retry = 0;
  3367. }
  3368. if (fcport->login_retry == 0 && status != QLA_SUCCESS)
  3369. qla2x00_clear_loop_id(fcport);
  3370. }
  3371. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  3372. break;
  3373. }
  3374. }
  3375. /* Schedule work on any of the dpc-workqueues */
  3376. void
  3377. qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
  3378. {
  3379. struct qla_hw_data *ha = base_vha->hw;
  3380. switch (work_code) {
  3381. case MBA_IDC_AEN: /* 0x8200 */
  3382. if (ha->dpc_lp_wq)
  3383. queue_work(ha->dpc_lp_wq, &ha->idc_aen);
  3384. break;
  3385. case QLA83XX_NIC_CORE_RESET: /* 0x1 */
  3386. if (!ha->flags.nic_core_reset_hdlr_active) {
  3387. if (ha->dpc_hp_wq)
  3388. queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
  3389. } else
  3390. ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
  3391. "NIC Core reset is already active. Skip "
  3392. "scheduling it again.\n");
  3393. break;
  3394. case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
  3395. if (ha->dpc_hp_wq)
  3396. queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
  3397. break;
  3398. case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
  3399. if (ha->dpc_hp_wq)
  3400. queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
  3401. break;
  3402. default:
  3403. ql_log(ql_log_warn, base_vha, 0xb05f,
  3404. "Unknow work-code=0x%x.\n", work_code);
  3405. }
  3406. return;
  3407. }
  3408. /* Work: Perform NIC Core Unrecoverable state handling */
  3409. void
  3410. qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
  3411. {
  3412. struct qla_hw_data *ha =
  3413. container_of(work, struct qla_hw_data, nic_core_unrecoverable);
  3414. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  3415. uint32_t dev_state = 0;
  3416. qla83xx_idc_lock(base_vha, 0);
  3417. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  3418. qla83xx_reset_ownership(base_vha);
  3419. if (ha->flags.nic_core_reset_owner) {
  3420. ha->flags.nic_core_reset_owner = 0;
  3421. qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
  3422. QLA8XXX_DEV_FAILED);
  3423. ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
  3424. qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
  3425. }
  3426. qla83xx_idc_unlock(base_vha, 0);
  3427. }
  3428. /* Work: Execute IDC state handler */
  3429. void
  3430. qla83xx_idc_state_handler_work(struct work_struct *work)
  3431. {
  3432. struct qla_hw_data *ha =
  3433. container_of(work, struct qla_hw_data, idc_state_handler);
  3434. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  3435. uint32_t dev_state = 0;
  3436. qla83xx_idc_lock(base_vha, 0);
  3437. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  3438. if (dev_state == QLA8XXX_DEV_FAILED ||
  3439. dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
  3440. qla83xx_idc_state_handler(base_vha);
  3441. qla83xx_idc_unlock(base_vha, 0);
  3442. }
  3443. static int
  3444. qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
  3445. {
  3446. int rval = QLA_SUCCESS;
  3447. unsigned long heart_beat_wait = jiffies + (1 * HZ);
  3448. uint32_t heart_beat_counter1, heart_beat_counter2;
  3449. do {
  3450. if (time_after(jiffies, heart_beat_wait)) {
  3451. ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
  3452. "Nic Core f/w is not alive.\n");
  3453. rval = QLA_FUNCTION_FAILED;
  3454. break;
  3455. }
  3456. qla83xx_idc_lock(base_vha, 0);
  3457. qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
  3458. &heart_beat_counter1);
  3459. qla83xx_idc_unlock(base_vha, 0);
  3460. msleep(100);
  3461. qla83xx_idc_lock(base_vha, 0);
  3462. qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
  3463. &heart_beat_counter2);
  3464. qla83xx_idc_unlock(base_vha, 0);
  3465. } while (heart_beat_counter1 == heart_beat_counter2);
  3466. return rval;
  3467. }
  3468. /* Work: Perform NIC Core Reset handling */
  3469. void
  3470. qla83xx_nic_core_reset_work(struct work_struct *work)
  3471. {
  3472. struct qla_hw_data *ha =
  3473. container_of(work, struct qla_hw_data, nic_core_reset);
  3474. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  3475. uint32_t dev_state = 0;
  3476. if (IS_QLA2031(ha)) {
  3477. if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
  3478. ql_log(ql_log_warn, base_vha, 0xb081,
  3479. "Failed to dump mctp\n");
  3480. return;
  3481. }
  3482. if (!ha->flags.nic_core_reset_hdlr_active) {
  3483. if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
  3484. qla83xx_idc_lock(base_vha, 0);
  3485. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
  3486. &dev_state);
  3487. qla83xx_idc_unlock(base_vha, 0);
  3488. if (dev_state != QLA8XXX_DEV_NEED_RESET) {
  3489. ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
  3490. "Nic Core f/w is alive.\n");
  3491. return;
  3492. }
  3493. }
  3494. ha->flags.nic_core_reset_hdlr_active = 1;
  3495. if (qla83xx_nic_core_reset(base_vha)) {
  3496. /* NIC Core reset failed. */
  3497. ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
  3498. "NIC Core reset failed.\n");
  3499. }
  3500. ha->flags.nic_core_reset_hdlr_active = 0;
  3501. }
  3502. }
  3503. /* Work: Handle 8200 IDC aens */
  3504. void
  3505. qla83xx_service_idc_aen(struct work_struct *work)
  3506. {
  3507. struct qla_hw_data *ha =
  3508. container_of(work, struct qla_hw_data, idc_aen);
  3509. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  3510. uint32_t dev_state, idc_control;
  3511. qla83xx_idc_lock(base_vha, 0);
  3512. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  3513. qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
  3514. qla83xx_idc_unlock(base_vha, 0);
  3515. if (dev_state == QLA8XXX_DEV_NEED_RESET) {
  3516. if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
  3517. ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
  3518. "Application requested NIC Core Reset.\n");
  3519. qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
  3520. } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
  3521. QLA_SUCCESS) {
  3522. ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
  3523. "Other protocol driver requested NIC Core Reset.\n");
  3524. qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
  3525. }
  3526. } else if (dev_state == QLA8XXX_DEV_FAILED ||
  3527. dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
  3528. qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
  3529. }
  3530. }
  3531. static void
  3532. qla83xx_wait_logic(void)
  3533. {
  3534. int i;
  3535. /* Yield CPU */
  3536. if (!in_interrupt()) {
  3537. /*
  3538. * Wait about 200ms before retrying again.
  3539. * This controls the number of retries for single
  3540. * lock operation.
  3541. */
  3542. msleep(100);
  3543. schedule();
  3544. } else {
  3545. for (i = 0; i < 20; i++)
  3546. cpu_relax(); /* This a nop instr on i386 */
  3547. }
  3548. }
  3549. static int
  3550. qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
  3551. {
  3552. int rval;
  3553. uint32_t data;
  3554. uint32_t idc_lck_rcvry_stage_mask = 0x3;
  3555. uint32_t idc_lck_rcvry_owner_mask = 0x3c;
  3556. struct qla_hw_data *ha = base_vha->hw;
  3557. ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
  3558. "Trying force recovery of the IDC lock.\n");
  3559. rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
  3560. if (rval)
  3561. return rval;
  3562. if ((data & idc_lck_rcvry_stage_mask) > 0) {
  3563. return QLA_SUCCESS;
  3564. } else {
  3565. data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
  3566. rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
  3567. data);
  3568. if (rval)
  3569. return rval;
  3570. msleep(200);
  3571. rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
  3572. &data);
  3573. if (rval)
  3574. return rval;
  3575. if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
  3576. data &= (IDC_LOCK_RECOVERY_STAGE2 |
  3577. ~(idc_lck_rcvry_stage_mask));
  3578. rval = qla83xx_wr_reg(base_vha,
  3579. QLA83XX_IDC_LOCK_RECOVERY, data);
  3580. if (rval)
  3581. return rval;
  3582. /* Forcefully perform IDC UnLock */
  3583. rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
  3584. &data);
  3585. if (rval)
  3586. return rval;
  3587. /* Clear lock-id by setting 0xff */
  3588. rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
  3589. 0xff);
  3590. if (rval)
  3591. return rval;
  3592. /* Clear lock-recovery by setting 0x0 */
  3593. rval = qla83xx_wr_reg(base_vha,
  3594. QLA83XX_IDC_LOCK_RECOVERY, 0x0);
  3595. if (rval)
  3596. return rval;
  3597. } else
  3598. return QLA_SUCCESS;
  3599. }
  3600. return rval;
  3601. }
  3602. static int
  3603. qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
  3604. {
  3605. int rval = QLA_SUCCESS;
  3606. uint32_t o_drv_lockid, n_drv_lockid;
  3607. unsigned long lock_recovery_timeout;
  3608. lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
  3609. retry_lockid:
  3610. rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
  3611. if (rval)
  3612. goto exit;
  3613. /* MAX wait time before forcing IDC Lock recovery = 2 secs */
  3614. if (time_after_eq(jiffies, lock_recovery_timeout)) {
  3615. if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
  3616. return QLA_SUCCESS;
  3617. else
  3618. return QLA_FUNCTION_FAILED;
  3619. }
  3620. rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
  3621. if (rval)
  3622. goto exit;
  3623. if (o_drv_lockid == n_drv_lockid) {
  3624. qla83xx_wait_logic();
  3625. goto retry_lockid;
  3626. } else
  3627. return QLA_SUCCESS;
  3628. exit:
  3629. return rval;
  3630. }
  3631. void
  3632. qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
  3633. {
  3634. uint16_t options = (requester_id << 15) | BIT_6;
  3635. uint32_t data;
  3636. uint32_t lock_owner;
  3637. struct qla_hw_data *ha = base_vha->hw;
  3638. /* IDC-lock implementation using driver-lock/lock-id remote registers */
  3639. retry_lock:
  3640. if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
  3641. == QLA_SUCCESS) {
  3642. if (data) {
  3643. /* Setting lock-id to our function-number */
  3644. qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
  3645. ha->portnum);
  3646. } else {
  3647. qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
  3648. &lock_owner);
  3649. ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
  3650. "Failed to acquire IDC lock, acquired by %d, "
  3651. "retrying...\n", lock_owner);
  3652. /* Retry/Perform IDC-Lock recovery */
  3653. if (qla83xx_idc_lock_recovery(base_vha)
  3654. == QLA_SUCCESS) {
  3655. qla83xx_wait_logic();
  3656. goto retry_lock;
  3657. } else
  3658. ql_log(ql_log_warn, base_vha, 0xb075,
  3659. "IDC Lock recovery FAILED.\n");
  3660. }
  3661. }
  3662. return;
  3663. /* XXX: IDC-lock implementation using access-control mbx */
  3664. retry_lock2:
  3665. if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
  3666. ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
  3667. "Failed to acquire IDC lock. retrying...\n");
  3668. /* Retry/Perform IDC-Lock recovery */
  3669. if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
  3670. qla83xx_wait_logic();
  3671. goto retry_lock2;
  3672. } else
  3673. ql_log(ql_log_warn, base_vha, 0xb076,
  3674. "IDC Lock recovery FAILED.\n");
  3675. }
  3676. return;
  3677. }
  3678. void
  3679. qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
  3680. {
  3681. uint16_t options = (requester_id << 15) | BIT_7, retry;
  3682. uint32_t data;
  3683. struct qla_hw_data *ha = base_vha->hw;
  3684. /* IDC-unlock implementation using driver-unlock/lock-id
  3685. * remote registers
  3686. */
  3687. retry = 0;
  3688. retry_unlock:
  3689. if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
  3690. == QLA_SUCCESS) {
  3691. if (data == ha->portnum) {
  3692. qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
  3693. /* Clearing lock-id by setting 0xff */
  3694. qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
  3695. } else if (retry < 10) {
  3696. /* SV: XXX: IDC unlock retrying needed here? */
  3697. /* Retry for IDC-unlock */
  3698. qla83xx_wait_logic();
  3699. retry++;
  3700. ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
  3701. "Failed to release IDC lock, retyring=%d\n", retry);
  3702. goto retry_unlock;
  3703. }
  3704. } else if (retry < 10) {
  3705. /* Retry for IDC-unlock */
  3706. qla83xx_wait_logic();
  3707. retry++;
  3708. ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
  3709. "Failed to read drv-lockid, retyring=%d\n", retry);
  3710. goto retry_unlock;
  3711. }
  3712. return;
  3713. /* XXX: IDC-unlock implementation using access-control mbx */
  3714. retry = 0;
  3715. retry_unlock2:
  3716. if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
  3717. if (retry < 10) {
  3718. /* Retry for IDC-unlock */
  3719. qla83xx_wait_logic();
  3720. retry++;
  3721. ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
  3722. "Failed to release IDC lock, retyring=%d\n", retry);
  3723. goto retry_unlock2;
  3724. }
  3725. }
  3726. return;
  3727. }
  3728. int
  3729. __qla83xx_set_drv_presence(scsi_qla_host_t *vha)
  3730. {
  3731. int rval = QLA_SUCCESS;
  3732. struct qla_hw_data *ha = vha->hw;
  3733. uint32_t drv_presence;
  3734. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  3735. if (rval == QLA_SUCCESS) {
  3736. drv_presence |= (1 << ha->portnum);
  3737. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
  3738. drv_presence);
  3739. }
  3740. return rval;
  3741. }
  3742. int
  3743. qla83xx_set_drv_presence(scsi_qla_host_t *vha)
  3744. {
  3745. int rval = QLA_SUCCESS;
  3746. qla83xx_idc_lock(vha, 0);
  3747. rval = __qla83xx_set_drv_presence(vha);
  3748. qla83xx_idc_unlock(vha, 0);
  3749. return rval;
  3750. }
  3751. int
  3752. __qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
  3753. {
  3754. int rval = QLA_SUCCESS;
  3755. struct qla_hw_data *ha = vha->hw;
  3756. uint32_t drv_presence;
  3757. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  3758. if (rval == QLA_SUCCESS) {
  3759. drv_presence &= ~(1 << ha->portnum);
  3760. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
  3761. drv_presence);
  3762. }
  3763. return rval;
  3764. }
  3765. int
  3766. qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
  3767. {
  3768. int rval = QLA_SUCCESS;
  3769. qla83xx_idc_lock(vha, 0);
  3770. rval = __qla83xx_clear_drv_presence(vha);
  3771. qla83xx_idc_unlock(vha, 0);
  3772. return rval;
  3773. }
  3774. static void
  3775. qla83xx_need_reset_handler(scsi_qla_host_t *vha)
  3776. {
  3777. struct qla_hw_data *ha = vha->hw;
  3778. uint32_t drv_ack, drv_presence;
  3779. unsigned long ack_timeout;
  3780. /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
  3781. ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
  3782. while (1) {
  3783. qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
  3784. qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  3785. if ((drv_ack & drv_presence) == drv_presence)
  3786. break;
  3787. if (time_after_eq(jiffies, ack_timeout)) {
  3788. ql_log(ql_log_warn, vha, 0xb067,
  3789. "RESET ACK TIMEOUT! drv_presence=0x%x "
  3790. "drv_ack=0x%x\n", drv_presence, drv_ack);
  3791. /*
  3792. * The function(s) which did not ack in time are forced
  3793. * to withdraw any further participation in the IDC
  3794. * reset.
  3795. */
  3796. if (drv_ack != drv_presence)
  3797. qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
  3798. drv_ack);
  3799. break;
  3800. }
  3801. qla83xx_idc_unlock(vha, 0);
  3802. msleep(1000);
  3803. qla83xx_idc_lock(vha, 0);
  3804. }
  3805. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
  3806. ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
  3807. }
  3808. static int
  3809. qla83xx_device_bootstrap(scsi_qla_host_t *vha)
  3810. {
  3811. int rval = QLA_SUCCESS;
  3812. uint32_t idc_control;
  3813. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
  3814. ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
  3815. /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
  3816. __qla83xx_get_idc_control(vha, &idc_control);
  3817. idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
  3818. __qla83xx_set_idc_control(vha, 0);
  3819. qla83xx_idc_unlock(vha, 0);
  3820. rval = qla83xx_restart_nic_firmware(vha);
  3821. qla83xx_idc_lock(vha, 0);
  3822. if (rval != QLA_SUCCESS) {
  3823. ql_log(ql_log_fatal, vha, 0xb06a,
  3824. "Failed to restart NIC f/w.\n");
  3825. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
  3826. ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
  3827. } else {
  3828. ql_dbg(ql_dbg_p3p, vha, 0xb06c,
  3829. "Success in restarting nic f/w.\n");
  3830. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
  3831. ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
  3832. }
  3833. return rval;
  3834. }
  3835. /* Assumes idc_lock always held on entry */
  3836. int
  3837. qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
  3838. {
  3839. struct qla_hw_data *ha = base_vha->hw;
  3840. int rval = QLA_SUCCESS;
  3841. unsigned long dev_init_timeout;
  3842. uint32_t dev_state;
  3843. /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
  3844. dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
  3845. while (1) {
  3846. if (time_after_eq(jiffies, dev_init_timeout)) {
  3847. ql_log(ql_log_warn, base_vha, 0xb06e,
  3848. "Initialization TIMEOUT!\n");
  3849. /* Init timeout. Disable further NIC Core
  3850. * communication.
  3851. */
  3852. qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
  3853. QLA8XXX_DEV_FAILED);
  3854. ql_log(ql_log_info, base_vha, 0xb06f,
  3855. "HW State: FAILED.\n");
  3856. }
  3857. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  3858. switch (dev_state) {
  3859. case QLA8XXX_DEV_READY:
  3860. if (ha->flags.nic_core_reset_owner)
  3861. qla83xx_idc_audit(base_vha,
  3862. IDC_AUDIT_COMPLETION);
  3863. ha->flags.nic_core_reset_owner = 0;
  3864. ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
  3865. "Reset_owner reset by 0x%x.\n",
  3866. ha->portnum);
  3867. goto exit;
  3868. case QLA8XXX_DEV_COLD:
  3869. if (ha->flags.nic_core_reset_owner)
  3870. rval = qla83xx_device_bootstrap(base_vha);
  3871. else {
  3872. /* Wait for AEN to change device-state */
  3873. qla83xx_idc_unlock(base_vha, 0);
  3874. msleep(1000);
  3875. qla83xx_idc_lock(base_vha, 0);
  3876. }
  3877. break;
  3878. case QLA8XXX_DEV_INITIALIZING:
  3879. /* Wait for AEN to change device-state */
  3880. qla83xx_idc_unlock(base_vha, 0);
  3881. msleep(1000);
  3882. qla83xx_idc_lock(base_vha, 0);
  3883. break;
  3884. case QLA8XXX_DEV_NEED_RESET:
  3885. if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
  3886. qla83xx_need_reset_handler(base_vha);
  3887. else {
  3888. /* Wait for AEN to change device-state */
  3889. qla83xx_idc_unlock(base_vha, 0);
  3890. msleep(1000);
  3891. qla83xx_idc_lock(base_vha, 0);
  3892. }
  3893. /* reset timeout value after need reset handler */
  3894. dev_init_timeout = jiffies +
  3895. (ha->fcoe_dev_init_timeout * HZ);
  3896. break;
  3897. case QLA8XXX_DEV_NEED_QUIESCENT:
  3898. /* XXX: DEBUG for now */
  3899. qla83xx_idc_unlock(base_vha, 0);
  3900. msleep(1000);
  3901. qla83xx_idc_lock(base_vha, 0);
  3902. break;
  3903. case QLA8XXX_DEV_QUIESCENT:
  3904. /* XXX: DEBUG for now */
  3905. if (ha->flags.quiesce_owner)
  3906. goto exit;
  3907. qla83xx_idc_unlock(base_vha, 0);
  3908. msleep(1000);
  3909. qla83xx_idc_lock(base_vha, 0);
  3910. dev_init_timeout = jiffies +
  3911. (ha->fcoe_dev_init_timeout * HZ);
  3912. break;
  3913. case QLA8XXX_DEV_FAILED:
  3914. if (ha->flags.nic_core_reset_owner)
  3915. qla83xx_idc_audit(base_vha,
  3916. IDC_AUDIT_COMPLETION);
  3917. ha->flags.nic_core_reset_owner = 0;
  3918. __qla83xx_clear_drv_presence(base_vha);
  3919. qla83xx_idc_unlock(base_vha, 0);
  3920. qla8xxx_dev_failed_handler(base_vha);
  3921. rval = QLA_FUNCTION_FAILED;
  3922. qla83xx_idc_lock(base_vha, 0);
  3923. goto exit;
  3924. case QLA8XXX_BAD_VALUE:
  3925. qla83xx_idc_unlock(base_vha, 0);
  3926. msleep(1000);
  3927. qla83xx_idc_lock(base_vha, 0);
  3928. break;
  3929. default:
  3930. ql_log(ql_log_warn, base_vha, 0xb071,
  3931. "Unknow Device State: %x.\n", dev_state);
  3932. qla83xx_idc_unlock(base_vha, 0);
  3933. qla8xxx_dev_failed_handler(base_vha);
  3934. rval = QLA_FUNCTION_FAILED;
  3935. qla83xx_idc_lock(base_vha, 0);
  3936. goto exit;
  3937. }
  3938. }
  3939. exit:
  3940. return rval;
  3941. }
  3942. /**************************************************************************
  3943. * qla2x00_do_dpc
  3944. * This kernel thread is a task that is schedule by the interrupt handler
  3945. * to perform the background processing for interrupts.
  3946. *
  3947. * Notes:
  3948. * This task always run in the context of a kernel thread. It
  3949. * is kick-off by the driver's detect code and starts up
  3950. * up one per adapter. It immediately goes to sleep and waits for
  3951. * some fibre event. When either the interrupt handler or
  3952. * the timer routine detects a event it will one of the task
  3953. * bits then wake us up.
  3954. **************************************************************************/
  3955. static int
  3956. qla2x00_do_dpc(void *data)
  3957. {
  3958. int rval;
  3959. scsi_qla_host_t *base_vha;
  3960. struct qla_hw_data *ha;
  3961. ha = (struct qla_hw_data *)data;
  3962. base_vha = pci_get_drvdata(ha->pdev);
  3963. set_user_nice(current, -20);
  3964. set_current_state(TASK_INTERRUPTIBLE);
  3965. while (!kthread_should_stop()) {
  3966. ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
  3967. "DPC handler sleeping.\n");
  3968. schedule();
  3969. __set_current_state(TASK_RUNNING);
  3970. if (!base_vha->flags.init_done || ha->flags.mbox_busy)
  3971. goto end_loop;
  3972. if (ha->flags.eeh_busy) {
  3973. ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
  3974. "eeh_busy=%d.\n", ha->flags.eeh_busy);
  3975. goto end_loop;
  3976. }
  3977. ha->dpc_active = 1;
  3978. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
  3979. "DPC handler waking up, dpc_flags=0x%lx.\n",
  3980. base_vha->dpc_flags);
  3981. qla2x00_do_work(base_vha);
  3982. if (IS_QLA82XX(ha)) {
  3983. if (test_and_clear_bit(ISP_UNRECOVERABLE,
  3984. &base_vha->dpc_flags)) {
  3985. qla82xx_idc_lock(ha);
  3986. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  3987. QLA8XXX_DEV_FAILED);
  3988. qla82xx_idc_unlock(ha);
  3989. ql_log(ql_log_info, base_vha, 0x4004,
  3990. "HW State: FAILED.\n");
  3991. qla82xx_device_state_handler(base_vha);
  3992. continue;
  3993. }
  3994. if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
  3995. &base_vha->dpc_flags)) {
  3996. ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
  3997. "FCoE context reset scheduled.\n");
  3998. if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
  3999. &base_vha->dpc_flags))) {
  4000. if (qla82xx_fcoe_ctx_reset(base_vha)) {
  4001. /* FCoE-ctx reset failed.
  4002. * Escalate to chip-reset
  4003. */
  4004. set_bit(ISP_ABORT_NEEDED,
  4005. &base_vha->dpc_flags);
  4006. }
  4007. clear_bit(ABORT_ISP_ACTIVE,
  4008. &base_vha->dpc_flags);
  4009. }
  4010. ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
  4011. "FCoE context reset end.\n");
  4012. }
  4013. }
  4014. if (test_and_clear_bit(ISP_ABORT_NEEDED,
  4015. &base_vha->dpc_flags)) {
  4016. ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
  4017. "ISP abort scheduled.\n");
  4018. if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
  4019. &base_vha->dpc_flags))) {
  4020. if (ha->isp_ops->abort_isp(base_vha)) {
  4021. /* failed. retry later */
  4022. set_bit(ISP_ABORT_NEEDED,
  4023. &base_vha->dpc_flags);
  4024. }
  4025. clear_bit(ABORT_ISP_ACTIVE,
  4026. &base_vha->dpc_flags);
  4027. }
  4028. ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
  4029. "ISP abort end.\n");
  4030. }
  4031. if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
  4032. &base_vha->dpc_flags)) {
  4033. qla2x00_update_fcports(base_vha);
  4034. }
  4035. if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
  4036. int ret;
  4037. ret = qla2x00_send_change_request(base_vha, 0x3, 0);
  4038. if (ret != QLA_SUCCESS)
  4039. ql_log(ql_log_warn, base_vha, 0x121,
  4040. "Failed to enable receiving of RSCN "
  4041. "requests: 0x%x.\n", ret);
  4042. clear_bit(SCR_PENDING, &base_vha->dpc_flags);
  4043. }
  4044. if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
  4045. ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
  4046. "Quiescence mode scheduled.\n");
  4047. if (IS_QLA82XX(ha)) {
  4048. qla82xx_device_state_handler(base_vha);
  4049. clear_bit(ISP_QUIESCE_NEEDED,
  4050. &base_vha->dpc_flags);
  4051. if (!ha->flags.quiesce_owner) {
  4052. qla2x00_perform_loop_resync(base_vha);
  4053. qla82xx_idc_lock(ha);
  4054. qla82xx_clear_qsnt_ready(base_vha);
  4055. qla82xx_idc_unlock(ha);
  4056. }
  4057. } else {
  4058. clear_bit(ISP_QUIESCE_NEEDED,
  4059. &base_vha->dpc_flags);
  4060. qla2x00_quiesce_io(base_vha);
  4061. }
  4062. ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
  4063. "Quiescence mode end.\n");
  4064. }
  4065. if (test_and_clear_bit(RESET_MARKER_NEEDED,
  4066. &base_vha->dpc_flags) &&
  4067. (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
  4068. ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
  4069. "Reset marker scheduled.\n");
  4070. qla2x00_rst_aen(base_vha);
  4071. clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
  4072. ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
  4073. "Reset marker end.\n");
  4074. }
  4075. /* Retry each device up to login retry count */
  4076. if ((test_and_clear_bit(RELOGIN_NEEDED,
  4077. &base_vha->dpc_flags)) &&
  4078. !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
  4079. atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
  4080. ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
  4081. "Relogin scheduled.\n");
  4082. qla2x00_relogin(base_vha);
  4083. ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
  4084. "Relogin end.\n");
  4085. }
  4086. if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
  4087. &base_vha->dpc_flags)) {
  4088. ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
  4089. "Loop resync scheduled.\n");
  4090. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
  4091. &base_vha->dpc_flags))) {
  4092. rval = qla2x00_loop_resync(base_vha);
  4093. clear_bit(LOOP_RESYNC_ACTIVE,
  4094. &base_vha->dpc_flags);
  4095. }
  4096. ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
  4097. "Loop resync end.\n");
  4098. }
  4099. if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
  4100. atomic_read(&base_vha->loop_state) == LOOP_READY) {
  4101. clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
  4102. qla2xxx_flash_npiv_conf(base_vha);
  4103. }
  4104. if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
  4105. &base_vha->dpc_flags)) {
  4106. /* Prevents simultaneous ramp up and down */
  4107. clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
  4108. &base_vha->dpc_flags);
  4109. qla2x00_host_ramp_down_queuedepth(base_vha);
  4110. }
  4111. if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
  4112. &base_vha->dpc_flags))
  4113. qla2x00_host_ramp_up_queuedepth(base_vha);
  4114. if (!ha->interrupts_on)
  4115. ha->isp_ops->enable_intrs(ha);
  4116. if (test_and_clear_bit(BEACON_BLINK_NEEDED,
  4117. &base_vha->dpc_flags))
  4118. ha->isp_ops->beacon_blink(base_vha);
  4119. qla2x00_do_dpc_all_vps(base_vha);
  4120. ha->dpc_active = 0;
  4121. end_loop:
  4122. set_current_state(TASK_INTERRUPTIBLE);
  4123. } /* End of while(1) */
  4124. __set_current_state(TASK_RUNNING);
  4125. ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
  4126. "DPC handler exiting.\n");
  4127. /*
  4128. * Make sure that nobody tries to wake us up again.
  4129. */
  4130. ha->dpc_active = 0;
  4131. /* Cleanup any residual CTX SRBs. */
  4132. qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
  4133. return 0;
  4134. }
  4135. void
  4136. qla2xxx_wake_dpc(struct scsi_qla_host *vha)
  4137. {
  4138. struct qla_hw_data *ha = vha->hw;
  4139. struct task_struct *t = ha->dpc_thread;
  4140. if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
  4141. wake_up_process(t);
  4142. }
  4143. /*
  4144. * qla2x00_rst_aen
  4145. * Processes asynchronous reset.
  4146. *
  4147. * Input:
  4148. * ha = adapter block pointer.
  4149. */
  4150. static void
  4151. qla2x00_rst_aen(scsi_qla_host_t *vha)
  4152. {
  4153. if (vha->flags.online && !vha->flags.reset_active &&
  4154. !atomic_read(&vha->loop_down_timer) &&
  4155. !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
  4156. do {
  4157. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  4158. /*
  4159. * Issue marker command only when we are going to start
  4160. * the I/O.
  4161. */
  4162. vha->marker_needed = 1;
  4163. } while (!atomic_read(&vha->loop_down_timer) &&
  4164. (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
  4165. }
  4166. }
  4167. /**************************************************************************
  4168. * qla2x00_timer
  4169. *
  4170. * Description:
  4171. * One second timer
  4172. *
  4173. * Context: Interrupt
  4174. ***************************************************************************/
  4175. void
  4176. qla2x00_timer(scsi_qla_host_t *vha)
  4177. {
  4178. unsigned long cpu_flags = 0;
  4179. int start_dpc = 0;
  4180. int index;
  4181. srb_t *sp;
  4182. uint16_t w;
  4183. struct qla_hw_data *ha = vha->hw;
  4184. struct req_que *req;
  4185. if (ha->flags.eeh_busy) {
  4186. ql_dbg(ql_dbg_timer, vha, 0x6000,
  4187. "EEH = %d, restarting timer.\n",
  4188. ha->flags.eeh_busy);
  4189. qla2x00_restart_timer(vha, WATCH_INTERVAL);
  4190. return;
  4191. }
  4192. /* Hardware read to raise pending EEH errors during mailbox waits. */
  4193. if (!pci_channel_offline(ha->pdev))
  4194. pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
  4195. /* Make sure qla82xx_watchdog is run only for physical port */
  4196. if (!vha->vp_idx && IS_QLA82XX(ha)) {
  4197. if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
  4198. start_dpc++;
  4199. qla82xx_watchdog(vha);
  4200. }
  4201. /* Loop down handler. */
  4202. if (atomic_read(&vha->loop_down_timer) > 0 &&
  4203. !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
  4204. !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
  4205. && vha->flags.online) {
  4206. if (atomic_read(&vha->loop_down_timer) ==
  4207. vha->loop_down_abort_time) {
  4208. ql_log(ql_log_info, vha, 0x6008,
  4209. "Loop down - aborting the queues before time expires.\n");
  4210. if (!IS_QLA2100(ha) && vha->link_down_timeout)
  4211. atomic_set(&vha->loop_state, LOOP_DEAD);
  4212. /*
  4213. * Schedule an ISP abort to return any FCP2-device
  4214. * commands.
  4215. */
  4216. /* NPIV - scan physical port only */
  4217. if (!vha->vp_idx) {
  4218. spin_lock_irqsave(&ha->hardware_lock,
  4219. cpu_flags);
  4220. req = ha->req_q_map[0];
  4221. for (index = 1;
  4222. index < req->num_outstanding_cmds;
  4223. index++) {
  4224. fc_port_t *sfcp;
  4225. sp = req->outstanding_cmds[index];
  4226. if (!sp)
  4227. continue;
  4228. if (sp->type != SRB_SCSI_CMD)
  4229. continue;
  4230. sfcp = sp->fcport;
  4231. if (!(sfcp->flags & FCF_FCP2_DEVICE))
  4232. continue;
  4233. if (IS_QLA82XX(ha))
  4234. set_bit(FCOE_CTX_RESET_NEEDED,
  4235. &vha->dpc_flags);
  4236. else
  4237. set_bit(ISP_ABORT_NEEDED,
  4238. &vha->dpc_flags);
  4239. break;
  4240. }
  4241. spin_unlock_irqrestore(&ha->hardware_lock,
  4242. cpu_flags);
  4243. }
  4244. start_dpc++;
  4245. }
  4246. /* if the loop has been down for 4 minutes, reinit adapter */
  4247. if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
  4248. if (!(vha->device_flags & DFLG_NO_CABLE)) {
  4249. ql_log(ql_log_warn, vha, 0x6009,
  4250. "Loop down - aborting ISP.\n");
  4251. if (IS_QLA82XX(ha))
  4252. set_bit(FCOE_CTX_RESET_NEEDED,
  4253. &vha->dpc_flags);
  4254. else
  4255. set_bit(ISP_ABORT_NEEDED,
  4256. &vha->dpc_flags);
  4257. }
  4258. }
  4259. ql_dbg(ql_dbg_timer, vha, 0x600a,
  4260. "Loop down - seconds remaining %d.\n",
  4261. atomic_read(&vha->loop_down_timer));
  4262. }
  4263. /* Check if beacon LED needs to be blinked for physical host only */
  4264. if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
  4265. /* There is no beacon_blink function for ISP82xx */
  4266. if (!IS_QLA82XX(ha)) {
  4267. set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
  4268. start_dpc++;
  4269. }
  4270. }
  4271. /* Process any deferred work. */
  4272. if (!list_empty(&vha->work_list))
  4273. start_dpc++;
  4274. /* Schedule the DPC routine if needed */
  4275. if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  4276. test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
  4277. test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
  4278. start_dpc ||
  4279. test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
  4280. test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
  4281. test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
  4282. test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
  4283. test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
  4284. test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
  4285. test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
  4286. test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
  4287. ql_dbg(ql_dbg_timer, vha, 0x600b,
  4288. "isp_abort_needed=%d loop_resync_needed=%d "
  4289. "fcport_update_needed=%d start_dpc=%d "
  4290. "reset_marker_needed=%d",
  4291. test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
  4292. test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
  4293. test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
  4294. start_dpc,
  4295. test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
  4296. ql_dbg(ql_dbg_timer, vha, 0x600c,
  4297. "beacon_blink_needed=%d isp_unrecoverable=%d "
  4298. "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
  4299. "relogin_needed=%d, host_ramp_down_needed=%d "
  4300. "host_ramp_up_needed=%d.\n",
  4301. test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
  4302. test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
  4303. test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
  4304. test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
  4305. test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
  4306. test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
  4307. test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
  4308. qla2xxx_wake_dpc(vha);
  4309. }
  4310. qla2x00_restart_timer(vha, WATCH_INTERVAL);
  4311. }
  4312. /* Firmware interface routines. */
  4313. #define FW_BLOBS 10
  4314. #define FW_ISP21XX 0
  4315. #define FW_ISP22XX 1
  4316. #define FW_ISP2300 2
  4317. #define FW_ISP2322 3
  4318. #define FW_ISP24XX 4
  4319. #define FW_ISP25XX 5
  4320. #define FW_ISP81XX 6
  4321. #define FW_ISP82XX 7
  4322. #define FW_ISP2031 8
  4323. #define FW_ISP8031 9
  4324. #define FW_FILE_ISP21XX "ql2100_fw.bin"
  4325. #define FW_FILE_ISP22XX "ql2200_fw.bin"
  4326. #define FW_FILE_ISP2300 "ql2300_fw.bin"
  4327. #define FW_FILE_ISP2322 "ql2322_fw.bin"
  4328. #define FW_FILE_ISP24XX "ql2400_fw.bin"
  4329. #define FW_FILE_ISP25XX "ql2500_fw.bin"
  4330. #define FW_FILE_ISP81XX "ql8100_fw.bin"
  4331. #define FW_FILE_ISP82XX "ql8200_fw.bin"
  4332. #define FW_FILE_ISP2031 "ql2600_fw.bin"
  4333. #define FW_FILE_ISP8031 "ql8300_fw.bin"
  4334. static DEFINE_MUTEX(qla_fw_lock);
  4335. static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
  4336. { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
  4337. { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
  4338. { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
  4339. { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
  4340. { .name = FW_FILE_ISP24XX, },
  4341. { .name = FW_FILE_ISP25XX, },
  4342. { .name = FW_FILE_ISP81XX, },
  4343. { .name = FW_FILE_ISP82XX, },
  4344. { .name = FW_FILE_ISP2031, },
  4345. { .name = FW_FILE_ISP8031, },
  4346. };
  4347. struct fw_blob *
  4348. qla2x00_request_firmware(scsi_qla_host_t *vha)
  4349. {
  4350. struct qla_hw_data *ha = vha->hw;
  4351. struct fw_blob *blob;
  4352. if (IS_QLA2100(ha)) {
  4353. blob = &qla_fw_blobs[FW_ISP21XX];
  4354. } else if (IS_QLA2200(ha)) {
  4355. blob = &qla_fw_blobs[FW_ISP22XX];
  4356. } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
  4357. blob = &qla_fw_blobs[FW_ISP2300];
  4358. } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
  4359. blob = &qla_fw_blobs[FW_ISP2322];
  4360. } else if (IS_QLA24XX_TYPE(ha)) {
  4361. blob = &qla_fw_blobs[FW_ISP24XX];
  4362. } else if (IS_QLA25XX(ha)) {
  4363. blob = &qla_fw_blobs[FW_ISP25XX];
  4364. } else if (IS_QLA81XX(ha)) {
  4365. blob = &qla_fw_blobs[FW_ISP81XX];
  4366. } else if (IS_QLA82XX(ha)) {
  4367. blob = &qla_fw_blobs[FW_ISP82XX];
  4368. } else if (IS_QLA2031(ha)) {
  4369. blob = &qla_fw_blobs[FW_ISP2031];
  4370. } else if (IS_QLA8031(ha)) {
  4371. blob = &qla_fw_blobs[FW_ISP8031];
  4372. } else {
  4373. return NULL;
  4374. }
  4375. mutex_lock(&qla_fw_lock);
  4376. if (blob->fw)
  4377. goto out;
  4378. if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
  4379. ql_log(ql_log_warn, vha, 0x0063,
  4380. "Failed to load firmware image (%s).\n", blob->name);
  4381. blob->fw = NULL;
  4382. blob = NULL;
  4383. goto out;
  4384. }
  4385. out:
  4386. mutex_unlock(&qla_fw_lock);
  4387. return blob;
  4388. }
  4389. static void
  4390. qla2x00_release_firmware(void)
  4391. {
  4392. int idx;
  4393. mutex_lock(&qla_fw_lock);
  4394. for (idx = 0; idx < FW_BLOBS; idx++)
  4395. release_firmware(qla_fw_blobs[idx].fw);
  4396. mutex_unlock(&qla_fw_lock);
  4397. }
  4398. static pci_ers_result_t
  4399. qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  4400. {
  4401. scsi_qla_host_t *vha = pci_get_drvdata(pdev);
  4402. struct qla_hw_data *ha = vha->hw;
  4403. ql_dbg(ql_dbg_aer, vha, 0x9000,
  4404. "PCI error detected, state %x.\n", state);
  4405. switch (state) {
  4406. case pci_channel_io_normal:
  4407. ha->flags.eeh_busy = 0;
  4408. return PCI_ERS_RESULT_CAN_RECOVER;
  4409. case pci_channel_io_frozen:
  4410. ha->flags.eeh_busy = 1;
  4411. /* For ISP82XX complete any pending mailbox cmd */
  4412. if (IS_QLA82XX(ha)) {
  4413. ha->flags.isp82xx_fw_hung = 1;
  4414. ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
  4415. qla82xx_clear_pending_mbx(vha);
  4416. }
  4417. qla2x00_free_irqs(vha);
  4418. pci_disable_device(pdev);
  4419. /* Return back all IOs */
  4420. qla2x00_abort_all_cmds(vha, DID_RESET << 16);
  4421. return PCI_ERS_RESULT_NEED_RESET;
  4422. case pci_channel_io_perm_failure:
  4423. ha->flags.pci_channel_io_perm_failure = 1;
  4424. qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
  4425. return PCI_ERS_RESULT_DISCONNECT;
  4426. }
  4427. return PCI_ERS_RESULT_NEED_RESET;
  4428. }
  4429. static pci_ers_result_t
  4430. qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
  4431. {
  4432. int risc_paused = 0;
  4433. uint32_t stat;
  4434. unsigned long flags;
  4435. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  4436. struct qla_hw_data *ha = base_vha->hw;
  4437. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  4438. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  4439. if (IS_QLA82XX(ha))
  4440. return PCI_ERS_RESULT_RECOVERED;
  4441. spin_lock_irqsave(&ha->hardware_lock, flags);
  4442. if (IS_QLA2100(ha) || IS_QLA2200(ha)){
  4443. stat = RD_REG_DWORD(&reg->hccr);
  4444. if (stat & HCCR_RISC_PAUSE)
  4445. risc_paused = 1;
  4446. } else if (IS_QLA23XX(ha)) {
  4447. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  4448. if (stat & HSR_RISC_PAUSED)
  4449. risc_paused = 1;
  4450. } else if (IS_FWI2_CAPABLE(ha)) {
  4451. stat = RD_REG_DWORD(&reg24->host_status);
  4452. if (stat & HSRX_RISC_PAUSED)
  4453. risc_paused = 1;
  4454. }
  4455. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  4456. if (risc_paused) {
  4457. ql_log(ql_log_info, base_vha, 0x9003,
  4458. "RISC paused -- mmio_enabled, Dumping firmware.\n");
  4459. ha->isp_ops->fw_dump(base_vha, 0);
  4460. return PCI_ERS_RESULT_NEED_RESET;
  4461. } else
  4462. return PCI_ERS_RESULT_RECOVERED;
  4463. }
  4464. static uint32_t
  4465. qla82xx_error_recovery(scsi_qla_host_t *base_vha)
  4466. {
  4467. uint32_t rval = QLA_FUNCTION_FAILED;
  4468. uint32_t drv_active = 0;
  4469. struct qla_hw_data *ha = base_vha->hw;
  4470. int fn;
  4471. struct pci_dev *other_pdev = NULL;
  4472. ql_dbg(ql_dbg_aer, base_vha, 0x9006,
  4473. "Entered %s.\n", __func__);
  4474. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  4475. if (base_vha->flags.online) {
  4476. /* Abort all outstanding commands,
  4477. * so as to be requeued later */
  4478. qla2x00_abort_isp_cleanup(base_vha);
  4479. }
  4480. fn = PCI_FUNC(ha->pdev->devfn);
  4481. while (fn > 0) {
  4482. fn--;
  4483. ql_dbg(ql_dbg_aer, base_vha, 0x9007,
  4484. "Finding pci device at function = 0x%x.\n", fn);
  4485. other_pdev =
  4486. pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
  4487. ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
  4488. fn));
  4489. if (!other_pdev)
  4490. continue;
  4491. if (atomic_read(&other_pdev->enable_cnt)) {
  4492. ql_dbg(ql_dbg_aer, base_vha, 0x9008,
  4493. "Found PCI func available and enable at 0x%x.\n",
  4494. fn);
  4495. pci_dev_put(other_pdev);
  4496. break;
  4497. }
  4498. pci_dev_put(other_pdev);
  4499. }
  4500. if (!fn) {
  4501. /* Reset owner */
  4502. ql_dbg(ql_dbg_aer, base_vha, 0x9009,
  4503. "This devfn is reset owner = 0x%x.\n",
  4504. ha->pdev->devfn);
  4505. qla82xx_idc_lock(ha);
  4506. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  4507. QLA8XXX_DEV_INITIALIZING);
  4508. qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
  4509. QLA82XX_IDC_VERSION);
  4510. drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
  4511. ql_dbg(ql_dbg_aer, base_vha, 0x900a,
  4512. "drv_active = 0x%x.\n", drv_active);
  4513. qla82xx_idc_unlock(ha);
  4514. /* Reset if device is not already reset
  4515. * drv_active would be 0 if a reset has already been done
  4516. */
  4517. if (drv_active)
  4518. rval = qla82xx_start_firmware(base_vha);
  4519. else
  4520. rval = QLA_SUCCESS;
  4521. qla82xx_idc_lock(ha);
  4522. if (rval != QLA_SUCCESS) {
  4523. ql_log(ql_log_info, base_vha, 0x900b,
  4524. "HW State: FAILED.\n");
  4525. qla82xx_clear_drv_active(ha);
  4526. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  4527. QLA8XXX_DEV_FAILED);
  4528. } else {
  4529. ql_log(ql_log_info, base_vha, 0x900c,
  4530. "HW State: READY.\n");
  4531. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  4532. QLA8XXX_DEV_READY);
  4533. qla82xx_idc_unlock(ha);
  4534. ha->flags.isp82xx_fw_hung = 0;
  4535. rval = qla82xx_restart_isp(base_vha);
  4536. qla82xx_idc_lock(ha);
  4537. /* Clear driver state register */
  4538. qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
  4539. qla82xx_set_drv_active(base_vha);
  4540. }
  4541. qla82xx_idc_unlock(ha);
  4542. } else {
  4543. ql_dbg(ql_dbg_aer, base_vha, 0x900d,
  4544. "This devfn is not reset owner = 0x%x.\n",
  4545. ha->pdev->devfn);
  4546. if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
  4547. QLA8XXX_DEV_READY)) {
  4548. ha->flags.isp82xx_fw_hung = 0;
  4549. rval = qla82xx_restart_isp(base_vha);
  4550. qla82xx_idc_lock(ha);
  4551. qla82xx_set_drv_active(base_vha);
  4552. qla82xx_idc_unlock(ha);
  4553. }
  4554. }
  4555. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  4556. return rval;
  4557. }
  4558. static pci_ers_result_t
  4559. qla2xxx_pci_slot_reset(struct pci_dev *pdev)
  4560. {
  4561. pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
  4562. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  4563. struct qla_hw_data *ha = base_vha->hw;
  4564. struct rsp_que *rsp;
  4565. int rc, retries = 10;
  4566. ql_dbg(ql_dbg_aer, base_vha, 0x9004,
  4567. "Slot Reset.\n");
  4568. /* Workaround: qla2xxx driver which access hardware earlier
  4569. * needs error state to be pci_channel_io_online.
  4570. * Otherwise mailbox command timesout.
  4571. */
  4572. pdev->error_state = pci_channel_io_normal;
  4573. pci_restore_state(pdev);
  4574. /* pci_restore_state() clears the saved_state flag of the device
  4575. * save restored state which resets saved_state flag
  4576. */
  4577. pci_save_state(pdev);
  4578. if (ha->mem_only)
  4579. rc = pci_enable_device_mem(pdev);
  4580. else
  4581. rc = pci_enable_device(pdev);
  4582. if (rc) {
  4583. ql_log(ql_log_warn, base_vha, 0x9005,
  4584. "Can't re-enable PCI device after reset.\n");
  4585. goto exit_slot_reset;
  4586. }
  4587. rsp = ha->rsp_q_map[0];
  4588. if (qla2x00_request_irqs(ha, rsp))
  4589. goto exit_slot_reset;
  4590. if (ha->isp_ops->pci_config(base_vha))
  4591. goto exit_slot_reset;
  4592. if (IS_QLA82XX(ha)) {
  4593. if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
  4594. ret = PCI_ERS_RESULT_RECOVERED;
  4595. goto exit_slot_reset;
  4596. } else
  4597. goto exit_slot_reset;
  4598. }
  4599. while (ha->flags.mbox_busy && retries--)
  4600. msleep(1000);
  4601. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  4602. if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
  4603. ret = PCI_ERS_RESULT_RECOVERED;
  4604. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  4605. exit_slot_reset:
  4606. ql_dbg(ql_dbg_aer, base_vha, 0x900e,
  4607. "slot_reset return %x.\n", ret);
  4608. return ret;
  4609. }
  4610. static void
  4611. qla2xxx_pci_resume(struct pci_dev *pdev)
  4612. {
  4613. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  4614. struct qla_hw_data *ha = base_vha->hw;
  4615. int ret;
  4616. ql_dbg(ql_dbg_aer, base_vha, 0x900f,
  4617. "pci_resume.\n");
  4618. ret = qla2x00_wait_for_hba_online(base_vha);
  4619. if (ret != QLA_SUCCESS) {
  4620. ql_log(ql_log_fatal, base_vha, 0x9002,
  4621. "The device failed to resume I/O from slot/link_reset.\n");
  4622. }
  4623. pci_cleanup_aer_uncorrect_error_status(pdev);
  4624. ha->flags.eeh_busy = 0;
  4625. }
  4626. static const struct pci_error_handlers qla2xxx_err_handler = {
  4627. .error_detected = qla2xxx_pci_error_detected,
  4628. .mmio_enabled = qla2xxx_pci_mmio_enabled,
  4629. .slot_reset = qla2xxx_pci_slot_reset,
  4630. .resume = qla2xxx_pci_resume,
  4631. };
  4632. static struct pci_device_id qla2xxx_pci_tbl[] = {
  4633. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
  4634. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
  4635. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
  4636. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
  4637. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
  4638. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
  4639. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
  4640. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
  4641. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
  4642. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
  4643. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
  4644. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
  4645. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
  4646. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
  4647. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
  4648. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
  4649. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
  4650. { 0 },
  4651. };
  4652. MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
  4653. static struct pci_driver qla2xxx_pci_driver = {
  4654. .name = QLA2XXX_DRIVER_NAME,
  4655. .driver = {
  4656. .owner = THIS_MODULE,
  4657. },
  4658. .id_table = qla2xxx_pci_tbl,
  4659. .probe = qla2x00_probe_one,
  4660. .remove = qla2x00_remove_one,
  4661. .shutdown = qla2x00_shutdown,
  4662. .err_handler = &qla2xxx_err_handler,
  4663. };
  4664. static struct file_operations apidev_fops = {
  4665. .owner = THIS_MODULE,
  4666. .llseek = noop_llseek,
  4667. };
  4668. /**
  4669. * qla2x00_module_init - Module initialization.
  4670. **/
  4671. static int __init
  4672. qla2x00_module_init(void)
  4673. {
  4674. int ret = 0;
  4675. /* Allocate cache for SRBs. */
  4676. srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
  4677. SLAB_HWCACHE_ALIGN, NULL);
  4678. if (srb_cachep == NULL) {
  4679. ql_log(ql_log_fatal, NULL, 0x0001,
  4680. "Unable to allocate SRB cache...Failing load!.\n");
  4681. return -ENOMEM;
  4682. }
  4683. /* Initialize target kmem_cache and mem_pools */
  4684. ret = qlt_init();
  4685. if (ret < 0) {
  4686. kmem_cache_destroy(srb_cachep);
  4687. return ret;
  4688. } else if (ret > 0) {
  4689. /*
  4690. * If initiator mode is explictly disabled by qlt_init(),
  4691. * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
  4692. * performing scsi_scan_target() during LOOP UP event.
  4693. */
  4694. qla2xxx_transport_functions.disable_target_scan = 1;
  4695. qla2xxx_transport_vport_functions.disable_target_scan = 1;
  4696. }
  4697. /* Derive version string. */
  4698. strcpy(qla2x00_version_str, QLA2XXX_VERSION);
  4699. if (ql2xextended_error_logging)
  4700. strcat(qla2x00_version_str, "-debug");
  4701. qla2xxx_transport_template =
  4702. fc_attach_transport(&qla2xxx_transport_functions);
  4703. if (!qla2xxx_transport_template) {
  4704. kmem_cache_destroy(srb_cachep);
  4705. ql_log(ql_log_fatal, NULL, 0x0002,
  4706. "fc_attach_transport failed...Failing load!.\n");
  4707. qlt_exit();
  4708. return -ENODEV;
  4709. }
  4710. apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
  4711. if (apidev_major < 0) {
  4712. ql_log(ql_log_fatal, NULL, 0x0003,
  4713. "Unable to register char device %s.\n", QLA2XXX_APIDEV);
  4714. }
  4715. qla2xxx_transport_vport_template =
  4716. fc_attach_transport(&qla2xxx_transport_vport_functions);
  4717. if (!qla2xxx_transport_vport_template) {
  4718. kmem_cache_destroy(srb_cachep);
  4719. qlt_exit();
  4720. fc_release_transport(qla2xxx_transport_template);
  4721. ql_log(ql_log_fatal, NULL, 0x0004,
  4722. "fc_attach_transport vport failed...Failing load!.\n");
  4723. return -ENODEV;
  4724. }
  4725. ql_log(ql_log_info, NULL, 0x0005,
  4726. "QLogic Fibre Channel HBA Driver: %s.\n",
  4727. qla2x00_version_str);
  4728. ret = pci_register_driver(&qla2xxx_pci_driver);
  4729. if (ret) {
  4730. kmem_cache_destroy(srb_cachep);
  4731. qlt_exit();
  4732. fc_release_transport(qla2xxx_transport_template);
  4733. fc_release_transport(qla2xxx_transport_vport_template);
  4734. ql_log(ql_log_fatal, NULL, 0x0006,
  4735. "pci_register_driver failed...ret=%d Failing load!.\n",
  4736. ret);
  4737. }
  4738. return ret;
  4739. }
  4740. /**
  4741. * qla2x00_module_exit - Module cleanup.
  4742. **/
  4743. static void __exit
  4744. qla2x00_module_exit(void)
  4745. {
  4746. unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
  4747. pci_unregister_driver(&qla2xxx_pci_driver);
  4748. qla2x00_release_firmware();
  4749. kmem_cache_destroy(srb_cachep);
  4750. qlt_exit();
  4751. if (ctx_cachep)
  4752. kmem_cache_destroy(ctx_cachep);
  4753. fc_release_transport(qla2xxx_transport_template);
  4754. fc_release_transport(qla2xxx_transport_vport_template);
  4755. }
  4756. module_init(qla2x00_module_init);
  4757. module_exit(qla2x00_module_exit);
  4758. MODULE_AUTHOR("QLogic Corporation");
  4759. MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
  4760. MODULE_LICENSE("GPL");
  4761. MODULE_VERSION(QLA2XXX_VERSION);
  4762. MODULE_FIRMWARE(FW_FILE_ISP21XX);
  4763. MODULE_FIRMWARE(FW_FILE_ISP22XX);
  4764. MODULE_FIRMWARE(FW_FILE_ISP2300);
  4765. MODULE_FIRMWARE(FW_FILE_ISP2322);
  4766. MODULE_FIRMWARE(FW_FILE_ISP24XX);
  4767. MODULE_FIRMWARE(FW_FILE_ISP25XX);