skd_main.c 141 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524
  1. /* Copyright 2012 STEC, Inc.
  2. *
  3. * This file is licensed under the terms of the 3-clause
  4. * BSD License (http://opensource.org/licenses/BSD-3-Clause)
  5. * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
  6. * at your option. Both licenses are also available in the LICENSE file
  7. * distributed with this project. This file may not be copied, modified,
  8. * or distributed except in accordance with those terms.
  9. * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10. * Initial Driver Design!
  11. * Thomas Swann <tswann@stec-inc.com>
  12. * Interrupt handling.
  13. * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14. * biomode implementation.
  15. * Akhil Bhansali <abhansali@stec-inc.com>
  16. * Added support for DISCARD / FLUSH and FUA.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/sched.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/compiler.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/bitops.h>
  30. #include <linux/delay.h>
  31. #include <linux/time.h>
  32. #include <linux/hdreg.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/completion.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/version.h>
  37. #include <linux/err.h>
  38. #include <linux/scatterlist.h>
  39. #include <linux/aer.h>
  40. #include <linux/ctype.h>
  41. #include <linux/wait.h>
  42. #include <linux/uio.h>
  43. #include <scsi/scsi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_tcq.h>
  46. #include <scsi/scsi_cmnd.h>
  47. #include <scsi/sg.h>
  48. #include <linux/io.h>
  49. #include <linux/uaccess.h>
  50. #include <asm-generic/unaligned.h>
  51. #include "skd_s1120.h"
  52. static int skd_dbg_level;
  53. static int skd_isr_comp_limit = 4;
  54. enum {
  55. STEC_LINK_2_5GTS = 0,
  56. STEC_LINK_5GTS = 1,
  57. STEC_LINK_8GTS = 2,
  58. STEC_LINK_UNKNOWN = 0xFF
  59. };
  60. enum {
  61. SKD_FLUSH_INITIALIZER,
  62. SKD_FLUSH_ZERO_SIZE_FIRST,
  63. SKD_FLUSH_DATA_SECOND,
  64. };
  65. #define SKD_ASSERT(expr) \
  66. do { \
  67. if (unlikely(!(expr))) { \
  68. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  69. # expr, __FILE__, __func__, __LINE__); \
  70. } \
  71. } while (0)
  72. #define DRV_NAME "skd"
  73. #define DRV_VERSION "2.2.1"
  74. #define DRV_BUILD_ID "0260"
  75. #define PFX DRV_NAME ": "
  76. #define DRV_BIN_VERSION 0x100
  77. #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
  78. MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  79. MODULE_LICENSE("Dual BSD/GPL");
  80. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block/BIO driver (b" DRV_BUILD_ID ")");
  81. MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  82. #define PCI_VENDOR_ID_STEC 0x1B39
  83. #define PCI_DEVICE_ID_S1120 0x0001
  84. #define SKD_FUA_NV (1 << 1)
  85. #define SKD_MINORS_PER_DEVICE 16
  86. #define SKD_MAX_QUEUE_DEPTH 200u
  87. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  88. #define SKD_N_FITMSG_BYTES (512u)
  89. #define SKD_N_SPECIAL_CONTEXT 32u
  90. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  91. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  92. * 128KB limit. That allows 4096*4K = 16M xfer size
  93. */
  94. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  95. #define SKD_N_SG_PER_SPECIAL 256u
  96. #define SKD_N_COMPLETION_ENTRY 256u
  97. #define SKD_N_READ_CAP_BYTES (8u)
  98. #define SKD_N_INTERNAL_BYTES (512u)
  99. /* 5 bits of uniqifier, 0xF800 */
  100. #define SKD_ID_INCR (0x400)
  101. #define SKD_ID_TABLE_MASK (3u << 8u)
  102. #define SKD_ID_RW_REQUEST (0u << 8u)
  103. #define SKD_ID_INTERNAL (1u << 8u)
  104. #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
  105. #define SKD_ID_FIT_MSG (3u << 8u)
  106. #define SKD_ID_SLOT_MASK 0x00FFu
  107. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  108. #define SKD_N_TIMEOUT_SLOT 4u
  109. #define SKD_TIMEOUT_SLOT_MASK 3u
  110. #define SKD_N_MAX_SECTORS 2048u
  111. #define SKD_MAX_RETRIES 2u
  112. #define SKD_TIMER_SECONDS(seconds) (seconds)
  113. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  114. #define INQ_STD_NBYTES 36
  115. #define SKD_DISCARD_CDB_LENGTH 24
  116. enum skd_drvr_state {
  117. SKD_DRVR_STATE_LOAD,
  118. SKD_DRVR_STATE_IDLE,
  119. SKD_DRVR_STATE_BUSY,
  120. SKD_DRVR_STATE_STARTING,
  121. SKD_DRVR_STATE_ONLINE,
  122. SKD_DRVR_STATE_PAUSING,
  123. SKD_DRVR_STATE_PAUSED,
  124. SKD_DRVR_STATE_DRAINING_TIMEOUT,
  125. SKD_DRVR_STATE_RESTARTING,
  126. SKD_DRVR_STATE_RESUMING,
  127. SKD_DRVR_STATE_STOPPING,
  128. SKD_DRVR_STATE_FAULT,
  129. SKD_DRVR_STATE_DISAPPEARED,
  130. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  131. SKD_DRVR_STATE_BUSY_ERASE,
  132. SKD_DRVR_STATE_BUSY_SANITIZE,
  133. SKD_DRVR_STATE_BUSY_IMMINENT,
  134. SKD_DRVR_STATE_WAIT_BOOT,
  135. SKD_DRVR_STATE_SYNCING,
  136. };
  137. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  138. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  139. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  140. #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
  141. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  142. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  143. #define SKD_START_WAIT_SECONDS 90u
  144. enum skd_req_state {
  145. SKD_REQ_STATE_IDLE,
  146. SKD_REQ_STATE_SETUP,
  147. SKD_REQ_STATE_BUSY,
  148. SKD_REQ_STATE_COMPLETED,
  149. SKD_REQ_STATE_TIMEOUT,
  150. SKD_REQ_STATE_ABORTED,
  151. };
  152. enum skd_fit_msg_state {
  153. SKD_MSG_STATE_IDLE,
  154. SKD_MSG_STATE_BUSY,
  155. };
  156. enum skd_check_status_action {
  157. SKD_CHECK_STATUS_REPORT_GOOD,
  158. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  159. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  160. SKD_CHECK_STATUS_REPORT_ERROR,
  161. SKD_CHECK_STATUS_BUSY_IMMINENT,
  162. };
  163. struct skd_fitmsg_context {
  164. enum skd_fit_msg_state state;
  165. struct skd_fitmsg_context *next;
  166. u32 id;
  167. u16 outstanding;
  168. u32 length;
  169. u32 offset;
  170. u8 *msg_buf;
  171. dma_addr_t mb_dma_address;
  172. };
  173. struct skd_request_context {
  174. enum skd_req_state state;
  175. struct skd_request_context *next;
  176. u16 id;
  177. u32 fitmsg_id;
  178. struct request *req;
  179. u8 flush_cmd;
  180. u8 discard_page;
  181. u32 timeout_stamp;
  182. u8 sg_data_dir;
  183. struct scatterlist *sg;
  184. u32 n_sg;
  185. u32 sg_byte_count;
  186. struct fit_sg_descriptor *sksg_list;
  187. dma_addr_t sksg_dma_address;
  188. struct fit_completion_entry_v1 completion;
  189. struct fit_comp_error_info err_info;
  190. };
  191. #define SKD_DATA_DIR_HOST_TO_CARD 1
  192. #define SKD_DATA_DIR_CARD_TO_HOST 2
  193. #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
  194. struct skd_special_context {
  195. struct skd_request_context req;
  196. u8 orphaned;
  197. void *data_buf;
  198. dma_addr_t db_dma_address;
  199. u8 *msg_buf;
  200. dma_addr_t mb_dma_address;
  201. };
  202. struct skd_sg_io {
  203. fmode_t mode;
  204. void __user *argp;
  205. struct sg_io_hdr sg;
  206. u8 cdb[16];
  207. u32 dxfer_len;
  208. u32 iovcnt;
  209. struct sg_iovec *iov;
  210. struct sg_iovec no_iov_iov;
  211. struct skd_special_context *skspcl;
  212. };
  213. typedef enum skd_irq_type {
  214. SKD_IRQ_LEGACY,
  215. SKD_IRQ_MSI,
  216. SKD_IRQ_MSIX
  217. } skd_irq_type_t;
  218. #define SKD_MAX_BARS 2
  219. struct skd_device {
  220. volatile void __iomem *mem_map[SKD_MAX_BARS];
  221. resource_size_t mem_phys[SKD_MAX_BARS];
  222. u32 mem_size[SKD_MAX_BARS];
  223. skd_irq_type_t irq_type;
  224. u32 msix_count;
  225. struct skd_msix_entry *msix_entries;
  226. struct pci_dev *pdev;
  227. int pcie_error_reporting_is_enabled;
  228. spinlock_t lock;
  229. struct gendisk *disk;
  230. struct request_queue *queue;
  231. struct device *class_dev;
  232. int gendisk_on;
  233. int sync_done;
  234. atomic_t device_count;
  235. u32 devno;
  236. u32 major;
  237. char name[32];
  238. char isr_name[30];
  239. enum skd_drvr_state state;
  240. u32 drive_state;
  241. u32 in_flight;
  242. u32 cur_max_queue_depth;
  243. u32 queue_low_water_mark;
  244. u32 dev_max_queue_depth;
  245. u32 num_fitmsg_context;
  246. u32 num_req_context;
  247. u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
  248. u32 timeout_stamp;
  249. struct skd_fitmsg_context *skmsg_free_list;
  250. struct skd_fitmsg_context *skmsg_table;
  251. struct skd_request_context *skreq_free_list;
  252. struct skd_request_context *skreq_table;
  253. struct skd_special_context *skspcl_free_list;
  254. struct skd_special_context *skspcl_table;
  255. struct skd_special_context internal_skspcl;
  256. u32 read_cap_blocksize;
  257. u32 read_cap_last_lba;
  258. int read_cap_is_valid;
  259. int inquiry_is_valid;
  260. u8 inq_serial_num[13]; /*12 chars plus null term */
  261. u8 id_str[80]; /* holds a composite name (pci + sernum) */
  262. u8 skcomp_cycle;
  263. u32 skcomp_ix;
  264. struct fit_completion_entry_v1 *skcomp_table;
  265. struct fit_comp_error_info *skerr_table;
  266. dma_addr_t cq_dma_address;
  267. wait_queue_head_t waitq;
  268. struct timer_list timer;
  269. u32 timer_countdown;
  270. u32 timer_substate;
  271. int n_special;
  272. int sgs_per_request;
  273. u32 last_mtd;
  274. u32 proto_ver;
  275. int dbg_level;
  276. u32 connect_time_stamp;
  277. int connect_retries;
  278. #define SKD_MAX_CONNECT_RETRIES 16
  279. u32 drive_jiffies;
  280. u32 timo_slot;
  281. struct work_struct completion_worker;
  282. };
  283. #define SKD_FLUSH_JOB "skd-flush-jobs"
  284. struct kmem_cache *skd_flush_slab;
  285. /*
  286. * These commands hold "nonzero size FLUSH bios",
  287. * which are enqueud in skdev->flush_list during
  288. * completion of "zero size FLUSH commands".
  289. * It will be active in biomode.
  290. */
  291. struct skd_flush_cmd {
  292. void *cmd;
  293. struct list_head flist;
  294. };
  295. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  296. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  297. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  298. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  299. {
  300. u32 val;
  301. if (likely(skdev->dbg_level < 2))
  302. return readl(skdev->mem_map[1] + offset);
  303. else {
  304. barrier();
  305. val = readl(skdev->mem_map[1] + offset);
  306. barrier();
  307. pr_debug("%s:%s:%d offset %x = %x\n",
  308. skdev->name, __func__, __LINE__, offset, val);
  309. return val;
  310. }
  311. }
  312. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  313. u32 offset)
  314. {
  315. if (likely(skdev->dbg_level < 2)) {
  316. writel(val, skdev->mem_map[1] + offset);
  317. barrier();
  318. } else {
  319. barrier();
  320. writel(val, skdev->mem_map[1] + offset);
  321. barrier();
  322. pr_debug("%s:%s:%d offset %x = %x\n",
  323. skdev->name, __func__, __LINE__, offset, val);
  324. }
  325. }
  326. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  327. u32 offset)
  328. {
  329. if (likely(skdev->dbg_level < 2)) {
  330. writeq(val, skdev->mem_map[1] + offset);
  331. barrier();
  332. } else {
  333. barrier();
  334. writeq(val, skdev->mem_map[1] + offset);
  335. barrier();
  336. pr_debug("%s:%s:%d offset %x = %016llx\n",
  337. skdev->name, __func__, __LINE__, offset, val);
  338. }
  339. }
  340. #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
  341. static int skd_isr_type = SKD_IRQ_DEFAULT;
  342. module_param(skd_isr_type, int, 0444);
  343. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  344. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  345. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  346. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  347. module_param(skd_max_req_per_msg, int, 0444);
  348. MODULE_PARM_DESC(skd_max_req_per_msg,
  349. "Maximum SCSI requests packed in a single message."
  350. " (1-14, default==1)");
  351. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  352. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  353. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  354. module_param(skd_max_queue_depth, int, 0444);
  355. MODULE_PARM_DESC(skd_max_queue_depth,
  356. "Maximum SCSI requests issued to s1120."
  357. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  358. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  359. module_param(skd_sgs_per_request, int, 0444);
  360. MODULE_PARM_DESC(skd_sgs_per_request,
  361. "Maximum SG elements per block request."
  362. " (1-4096, default==256)");
  363. static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  364. module_param(skd_max_pass_thru, int, 0444);
  365. MODULE_PARM_DESC(skd_max_pass_thru,
  366. "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
  367. module_param(skd_dbg_level, int, 0444);
  368. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  369. module_param(skd_isr_comp_limit, int, 0444);
  370. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  371. /* Major device number dynamically assigned. */
  372. static u32 skd_major;
  373. static struct skd_device *skd_construct(struct pci_dev *pdev);
  374. static void skd_destruct(struct skd_device *skdev);
  375. static const struct block_device_operations skd_blockdev_ops;
  376. static void skd_send_fitmsg(struct skd_device *skdev,
  377. struct skd_fitmsg_context *skmsg);
  378. static void skd_send_special_fitmsg(struct skd_device *skdev,
  379. struct skd_special_context *skspcl);
  380. static void skd_request_fn(struct request_queue *rq);
  381. static void skd_end_request(struct skd_device *skdev,
  382. struct skd_request_context *skreq, int error);
  383. static int skd_preop_sg_list(struct skd_device *skdev,
  384. struct skd_request_context *skreq);
  385. static void skd_postop_sg_list(struct skd_device *skdev,
  386. struct skd_request_context *skreq);
  387. static void skd_restart_device(struct skd_device *skdev);
  388. static int skd_quiesce_dev(struct skd_device *skdev);
  389. static int skd_unquiesce_dev(struct skd_device *skdev);
  390. static void skd_release_special(struct skd_device *skdev,
  391. struct skd_special_context *skspcl);
  392. static void skd_disable_interrupts(struct skd_device *skdev);
  393. static void skd_isr_fwstate(struct skd_device *skdev);
  394. static void skd_recover_requests(struct skd_device *skdev, int requeue);
  395. static void skd_soft_reset(struct skd_device *skdev);
  396. static const char *skd_name(struct skd_device *skdev);
  397. const char *skd_drive_state_to_str(int state);
  398. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  399. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  400. static void skd_log_skmsg(struct skd_device *skdev,
  401. struct skd_fitmsg_context *skmsg, const char *event);
  402. static void skd_log_skreq(struct skd_device *skdev,
  403. struct skd_request_context *skreq, const char *event);
  404. /*
  405. *****************************************************************************
  406. * READ/WRITE REQUESTS
  407. *****************************************************************************
  408. */
  409. static void skd_fail_all_pending(struct skd_device *skdev)
  410. {
  411. struct request_queue *q = skdev->queue;
  412. struct request *req;
  413. for (;; ) {
  414. req = blk_peek_request(q);
  415. if (req == NULL)
  416. break;
  417. blk_start_request(req);
  418. __blk_end_request_all(req, -EIO);
  419. }
  420. }
  421. static void
  422. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  423. int data_dir, unsigned lba,
  424. unsigned count)
  425. {
  426. if (data_dir == READ)
  427. scsi_req->cdb[0] = 0x28;
  428. else
  429. scsi_req->cdb[0] = 0x2a;
  430. scsi_req->cdb[1] = 0;
  431. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  432. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  433. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  434. scsi_req->cdb[5] = (lba & 0xff);
  435. scsi_req->cdb[6] = 0;
  436. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  437. scsi_req->cdb[8] = count & 0xff;
  438. scsi_req->cdb[9] = 0;
  439. }
  440. static void
  441. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  442. struct skd_request_context *skreq)
  443. {
  444. skreq->flush_cmd = 1;
  445. scsi_req->cdb[0] = 0x35;
  446. scsi_req->cdb[1] = 0;
  447. scsi_req->cdb[2] = 0;
  448. scsi_req->cdb[3] = 0;
  449. scsi_req->cdb[4] = 0;
  450. scsi_req->cdb[5] = 0;
  451. scsi_req->cdb[6] = 0;
  452. scsi_req->cdb[7] = 0;
  453. scsi_req->cdb[8] = 0;
  454. scsi_req->cdb[9] = 0;
  455. }
  456. static void
  457. skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
  458. struct skd_request_context *skreq,
  459. struct page *page,
  460. u32 lba, u32 count)
  461. {
  462. char *buf;
  463. unsigned long len;
  464. struct request *req;
  465. buf = page_address(page);
  466. len = SKD_DISCARD_CDB_LENGTH;
  467. scsi_req->cdb[0] = UNMAP;
  468. scsi_req->cdb[8] = len;
  469. put_unaligned_be16(6 + 16, &buf[0]);
  470. put_unaligned_be16(16, &buf[2]);
  471. put_unaligned_be64(lba, &buf[8]);
  472. put_unaligned_be32(count, &buf[16]);
  473. req = skreq->req;
  474. blk_add_request_payload(req, page, len);
  475. req->buffer = buf;
  476. }
  477. static void skd_request_fn_not_online(struct request_queue *q);
  478. static void skd_request_fn(struct request_queue *q)
  479. {
  480. struct skd_device *skdev = q->queuedata;
  481. struct skd_fitmsg_context *skmsg = NULL;
  482. struct fit_msg_hdr *fmh = NULL;
  483. struct skd_request_context *skreq;
  484. struct request *req = NULL;
  485. struct skd_scsi_request *scsi_req;
  486. struct page *page;
  487. unsigned long io_flags;
  488. int error;
  489. u32 lba;
  490. u32 count;
  491. int data_dir;
  492. u32 be_lba;
  493. u32 be_count;
  494. u64 be_dmaa;
  495. u64 cmdctxt;
  496. u32 timo_slot;
  497. void *cmd_ptr;
  498. int flush, fua;
  499. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  500. skd_request_fn_not_online(q);
  501. return;
  502. }
  503. if (blk_queue_stopped(skdev->queue)) {
  504. if (skdev->skmsg_free_list == NULL ||
  505. skdev->skreq_free_list == NULL ||
  506. skdev->in_flight >= skdev->queue_low_water_mark)
  507. /* There is still some kind of shortage */
  508. return;
  509. queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
  510. }
  511. /*
  512. * Stop conditions:
  513. * - There are no more native requests
  514. * - There are already the maximum number of requests in progress
  515. * - There are no more skd_request_context entries
  516. * - There are no more FIT msg buffers
  517. */
  518. for (;; ) {
  519. flush = fua = 0;
  520. req = blk_peek_request(q);
  521. /* Are there any native requests to start? */
  522. if (req == NULL)
  523. break;
  524. lba = (u32)blk_rq_pos(req);
  525. count = blk_rq_sectors(req);
  526. data_dir = rq_data_dir(req);
  527. io_flags = req->cmd_flags;
  528. if (io_flags & REQ_FLUSH)
  529. flush++;
  530. if (io_flags & REQ_FUA)
  531. fua++;
  532. pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
  533. "count=%u(0x%x) dir=%d\n",
  534. skdev->name, __func__, __LINE__,
  535. req, lba, lba, count, count, data_dir);
  536. /* At this point we know there is a request
  537. * (from our bio q or req q depending on the way
  538. * the driver is built do checks for resources.
  539. */
  540. /* Are too many requets already in progress? */
  541. if (skdev->in_flight >= skdev->cur_max_queue_depth) {
  542. pr_debug("%s:%s:%d qdepth %d, limit %d\n",
  543. skdev->name, __func__, __LINE__,
  544. skdev->in_flight, skdev->cur_max_queue_depth);
  545. break;
  546. }
  547. /* Is a skd_request_context available? */
  548. skreq = skdev->skreq_free_list;
  549. if (skreq == NULL) {
  550. pr_debug("%s:%s:%d Out of req=%p\n",
  551. skdev->name, __func__, __LINE__, q);
  552. break;
  553. }
  554. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  555. SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
  556. /* Now we check to see if we can get a fit msg */
  557. if (skmsg == NULL) {
  558. if (skdev->skmsg_free_list == NULL) {
  559. pr_debug("%s:%s:%d Out of msg\n",
  560. skdev->name, __func__, __LINE__);
  561. break;
  562. }
  563. }
  564. skreq->flush_cmd = 0;
  565. skreq->n_sg = 0;
  566. skreq->sg_byte_count = 0;
  567. skreq->discard_page = 0;
  568. /*
  569. * OK to now dequeue request from either bio or q.
  570. *
  571. * At this point we are comitted to either start or reject
  572. * the native request. Note that skd_request_context is
  573. * available but is still at the head of the free list.
  574. */
  575. blk_start_request(req);
  576. skreq->req = req;
  577. skreq->fitmsg_id = 0;
  578. /* Either a FIT msg is in progress or we have to start one. */
  579. if (skmsg == NULL) {
  580. /* Are there any FIT msg buffers available? */
  581. skmsg = skdev->skmsg_free_list;
  582. if (skmsg == NULL) {
  583. pr_debug("%s:%s:%d Out of msg skdev=%p\n",
  584. skdev->name, __func__, __LINE__,
  585. skdev);
  586. break;
  587. }
  588. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
  589. SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
  590. skdev->skmsg_free_list = skmsg->next;
  591. skmsg->state = SKD_MSG_STATE_BUSY;
  592. skmsg->id += SKD_ID_INCR;
  593. /* Initialize the FIT msg header */
  594. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  595. memset(fmh, 0, sizeof(*fmh));
  596. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  597. skmsg->length = sizeof(*fmh);
  598. }
  599. skreq->fitmsg_id = skmsg->id;
  600. /*
  601. * Note that a FIT msg may have just been started
  602. * but contains no SoFIT requests yet.
  603. */
  604. /*
  605. * Transcode the request, checking as we go. The outcome of
  606. * the transcoding is represented by the error variable.
  607. */
  608. cmd_ptr = &skmsg->msg_buf[skmsg->length];
  609. memset(cmd_ptr, 0, 32);
  610. be_lba = cpu_to_be32(lba);
  611. be_count = cpu_to_be32(count);
  612. be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
  613. cmdctxt = skreq->id + SKD_ID_INCR;
  614. scsi_req = cmd_ptr;
  615. scsi_req->hdr.tag = cmdctxt;
  616. scsi_req->hdr.sg_list_dma_address = be_dmaa;
  617. if (data_dir == READ)
  618. skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
  619. else
  620. skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
  621. if (io_flags & REQ_DISCARD) {
  622. page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
  623. if (!page) {
  624. pr_err("request_fn:Page allocation failed.\n");
  625. skd_end_request(skdev, skreq, -ENOMEM);
  626. break;
  627. }
  628. skreq->discard_page = 1;
  629. skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
  630. } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
  631. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  632. SKD_ASSERT(skreq->flush_cmd == 1);
  633. } else {
  634. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  635. }
  636. if (fua)
  637. scsi_req->cdb[1] |= SKD_FUA_NV;
  638. if (!req->bio)
  639. goto skip_sg;
  640. error = skd_preop_sg_list(skdev, skreq);
  641. if (error != 0) {
  642. /*
  643. * Complete the native request with error.
  644. * Note that the request context is still at the
  645. * head of the free list, and that the SoFIT request
  646. * was encoded into the FIT msg buffer but the FIT
  647. * msg length has not been updated. In short, the
  648. * only resource that has been allocated but might
  649. * not be used is that the FIT msg could be empty.
  650. */
  651. pr_debug("%s:%s:%d error Out\n",
  652. skdev->name, __func__, __LINE__);
  653. skd_end_request(skdev, skreq, error);
  654. continue;
  655. }
  656. skip_sg:
  657. scsi_req->hdr.sg_list_len_bytes =
  658. cpu_to_be32(skreq->sg_byte_count);
  659. /* Complete resource allocations. */
  660. skdev->skreq_free_list = skreq->next;
  661. skreq->state = SKD_REQ_STATE_BUSY;
  662. skreq->id += SKD_ID_INCR;
  663. skmsg->length += sizeof(struct skd_scsi_request);
  664. fmh->num_protocol_cmds_coalesced++;
  665. /*
  666. * Update the active request counts.
  667. * Capture the timeout timestamp.
  668. */
  669. skreq->timeout_stamp = skdev->timeout_stamp;
  670. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  671. skdev->timeout_slot[timo_slot]++;
  672. skdev->in_flight++;
  673. pr_debug("%s:%s:%d req=0x%x busy=%d\n",
  674. skdev->name, __func__, __LINE__,
  675. skreq->id, skdev->in_flight);
  676. /*
  677. * If the FIT msg buffer is full send it.
  678. */
  679. if (skmsg->length >= SKD_N_FITMSG_BYTES ||
  680. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  681. skd_send_fitmsg(skdev, skmsg);
  682. skmsg = NULL;
  683. fmh = NULL;
  684. }
  685. }
  686. /*
  687. * Is a FIT msg in progress? If it is empty put the buffer back
  688. * on the free list. If it is non-empty send what we got.
  689. * This minimizes latency when there are fewer requests than
  690. * what fits in a FIT msg.
  691. */
  692. if (skmsg != NULL) {
  693. /* Bigger than just a FIT msg header? */
  694. if (skmsg->length > sizeof(struct fit_msg_hdr)) {
  695. pr_debug("%s:%s:%d sending msg=%p, len %d\n",
  696. skdev->name, __func__, __LINE__,
  697. skmsg, skmsg->length);
  698. skd_send_fitmsg(skdev, skmsg);
  699. } else {
  700. /*
  701. * The FIT msg is empty. It means we got started
  702. * on the msg, but the requests were rejected.
  703. */
  704. skmsg->state = SKD_MSG_STATE_IDLE;
  705. skmsg->id += SKD_ID_INCR;
  706. skmsg->next = skdev->skmsg_free_list;
  707. skdev->skmsg_free_list = skmsg;
  708. }
  709. skmsg = NULL;
  710. fmh = NULL;
  711. }
  712. /*
  713. * If req is non-NULL it means there is something to do but
  714. * we are out of a resource.
  715. */
  716. if (req)
  717. blk_stop_queue(skdev->queue);
  718. }
  719. static void skd_end_request_blk(struct skd_device *skdev,
  720. struct skd_request_context *skreq, int error)
  721. {
  722. struct request *req = skreq->req;
  723. unsigned int io_flags = req->cmd_flags;
  724. if ((io_flags & REQ_DISCARD) &&
  725. (skreq->discard_page == 1)) {
  726. pr_debug("%s:%s:%d skd_end_request_blk, free the page!",
  727. skdev->name, __func__, __LINE__);
  728. free_page((unsigned long)req->buffer);
  729. req->buffer = NULL;
  730. }
  731. if (unlikely(error)) {
  732. struct request *req = skreq->req;
  733. char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
  734. u32 lba = (u32)blk_rq_pos(req);
  735. u32 count = blk_rq_sectors(req);
  736. pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
  737. skd_name(skdev), cmd, lba, count, skreq->id);
  738. } else
  739. pr_debug("%s:%s:%d id=0x%x error=%d\n",
  740. skdev->name, __func__, __LINE__, skreq->id, error);
  741. __blk_end_request_all(skreq->req, error);
  742. }
  743. static int skd_preop_sg_list(struct skd_device *skdev,
  744. struct skd_request_context *skreq)
  745. {
  746. struct request *req = skreq->req;
  747. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  748. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  749. struct scatterlist *sg = &skreq->sg[0];
  750. int n_sg;
  751. int i;
  752. skreq->sg_byte_count = 0;
  753. /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
  754. skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
  755. n_sg = blk_rq_map_sg(skdev->queue, req, sg);
  756. if (n_sg <= 0)
  757. return -EINVAL;
  758. /*
  759. * Map scatterlist to PCI bus addresses.
  760. * Note PCI might change the number of entries.
  761. */
  762. n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
  763. if (n_sg <= 0)
  764. return -EINVAL;
  765. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  766. skreq->n_sg = n_sg;
  767. for (i = 0; i < n_sg; i++) {
  768. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  769. u32 cnt = sg_dma_len(&sg[i]);
  770. uint64_t dma_addr = sg_dma_address(&sg[i]);
  771. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  772. sgd->byte_count = cnt;
  773. skreq->sg_byte_count += cnt;
  774. sgd->host_side_addr = dma_addr;
  775. sgd->dev_side_addr = 0;
  776. }
  777. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  778. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  779. if (unlikely(skdev->dbg_level > 1)) {
  780. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  781. skdev->name, __func__, __LINE__,
  782. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  783. for (i = 0; i < n_sg; i++) {
  784. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  785. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  786. "addr=0x%llx next=0x%llx\n",
  787. skdev->name, __func__, __LINE__,
  788. i, sgd->byte_count, sgd->control,
  789. sgd->host_side_addr, sgd->next_desc_ptr);
  790. }
  791. }
  792. return 0;
  793. }
  794. static void skd_postop_sg_list(struct skd_device *skdev,
  795. struct skd_request_context *skreq)
  796. {
  797. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  798. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  799. /*
  800. * restore the next ptr for next IO request so we
  801. * don't have to set it every time.
  802. */
  803. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  804. skreq->sksg_dma_address +
  805. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  806. pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
  807. }
  808. static void skd_end_request(struct skd_device *skdev,
  809. struct skd_request_context *skreq, int error)
  810. {
  811. skd_end_request_blk(skdev, skreq, error);
  812. }
  813. static void skd_request_fn_not_online(struct request_queue *q)
  814. {
  815. struct skd_device *skdev = q->queuedata;
  816. int error;
  817. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  818. skd_log_skdev(skdev, "req_not_online");
  819. switch (skdev->state) {
  820. case SKD_DRVR_STATE_PAUSING:
  821. case SKD_DRVR_STATE_PAUSED:
  822. case SKD_DRVR_STATE_STARTING:
  823. case SKD_DRVR_STATE_RESTARTING:
  824. case SKD_DRVR_STATE_WAIT_BOOT:
  825. /* In case of starting, we haven't started the queue,
  826. * so we can't get here... but requests are
  827. * possibly hanging out waiting for us because we
  828. * reported the dev/skd0 already. They'll wait
  829. * forever if connect doesn't complete.
  830. * What to do??? delay dev/skd0 ??
  831. */
  832. case SKD_DRVR_STATE_BUSY:
  833. case SKD_DRVR_STATE_BUSY_IMMINENT:
  834. case SKD_DRVR_STATE_BUSY_ERASE:
  835. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  836. return;
  837. case SKD_DRVR_STATE_BUSY_SANITIZE:
  838. case SKD_DRVR_STATE_STOPPING:
  839. case SKD_DRVR_STATE_SYNCING:
  840. case SKD_DRVR_STATE_FAULT:
  841. case SKD_DRVR_STATE_DISAPPEARED:
  842. default:
  843. error = -EIO;
  844. break;
  845. }
  846. /* If we get here, terminate all pending block requeusts
  847. * with EIO and any scsi pass thru with appropriate sense
  848. */
  849. skd_fail_all_pending(skdev);
  850. }
  851. /*
  852. *****************************************************************************
  853. * TIMER
  854. *****************************************************************************
  855. */
  856. static void skd_timer_tick_not_online(struct skd_device *skdev);
  857. static void skd_timer_tick(ulong arg)
  858. {
  859. struct skd_device *skdev = (struct skd_device *)arg;
  860. u32 timo_slot;
  861. u32 overdue_timestamp;
  862. unsigned long reqflags;
  863. u32 state;
  864. if (skdev->state == SKD_DRVR_STATE_FAULT)
  865. /* The driver has declared fault, and we want it to
  866. * stay that way until driver is reloaded.
  867. */
  868. return;
  869. spin_lock_irqsave(&skdev->lock, reqflags);
  870. state = SKD_READL(skdev, FIT_STATUS);
  871. state &= FIT_SR_DRIVE_STATE_MASK;
  872. if (state != skdev->drive_state)
  873. skd_isr_fwstate(skdev);
  874. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  875. skd_timer_tick_not_online(skdev);
  876. goto timer_func_out;
  877. }
  878. skdev->timeout_stamp++;
  879. timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  880. /*
  881. * All requests that happened during the previous use of
  882. * this slot should be done by now. The previous use was
  883. * over 7 seconds ago.
  884. */
  885. if (skdev->timeout_slot[timo_slot] == 0)
  886. goto timer_func_out;
  887. /* Something is overdue */
  888. overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
  889. pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
  890. skdev->name, __func__, __LINE__,
  891. skdev->timeout_slot[timo_slot], skdev->in_flight);
  892. pr_err("(%s): Overdue IOs (%d), busy %d\n",
  893. skd_name(skdev), skdev->timeout_slot[timo_slot],
  894. skdev->in_flight);
  895. skdev->timer_countdown = SKD_DRAINING_TIMO;
  896. skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
  897. skdev->timo_slot = timo_slot;
  898. blk_stop_queue(skdev->queue);
  899. timer_func_out:
  900. mod_timer(&skdev->timer, (jiffies + HZ));
  901. spin_unlock_irqrestore(&skdev->lock, reqflags);
  902. }
  903. static void skd_timer_tick_not_online(struct skd_device *skdev)
  904. {
  905. switch (skdev->state) {
  906. case SKD_DRVR_STATE_IDLE:
  907. case SKD_DRVR_STATE_LOAD:
  908. break;
  909. case SKD_DRVR_STATE_BUSY_SANITIZE:
  910. pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
  911. skdev->name, __func__, __LINE__,
  912. skdev->drive_state, skdev->state);
  913. /* If we've been in sanitize for 3 seconds, we figure we're not
  914. * going to get anymore completions, so recover requests now
  915. */
  916. if (skdev->timer_countdown > 0) {
  917. skdev->timer_countdown--;
  918. return;
  919. }
  920. skd_recover_requests(skdev, 0);
  921. break;
  922. case SKD_DRVR_STATE_BUSY:
  923. case SKD_DRVR_STATE_BUSY_IMMINENT:
  924. case SKD_DRVR_STATE_BUSY_ERASE:
  925. pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
  926. skdev->name, __func__, __LINE__,
  927. skdev->state, skdev->timer_countdown);
  928. if (skdev->timer_countdown > 0) {
  929. skdev->timer_countdown--;
  930. return;
  931. }
  932. pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
  933. skdev->name, __func__, __LINE__,
  934. skdev->state, skdev->timer_countdown);
  935. skd_restart_device(skdev);
  936. break;
  937. case SKD_DRVR_STATE_WAIT_BOOT:
  938. case SKD_DRVR_STATE_STARTING:
  939. if (skdev->timer_countdown > 0) {
  940. skdev->timer_countdown--;
  941. return;
  942. }
  943. /* For now, we fault the drive. Could attempt resets to
  944. * revcover at some point. */
  945. skdev->state = SKD_DRVR_STATE_FAULT;
  946. pr_err("(%s): DriveFault Connect Timeout (%x)\n",
  947. skd_name(skdev), skdev->drive_state);
  948. /*start the queue so we can respond with error to requests */
  949. /* wakeup anyone waiting for startup complete */
  950. blk_start_queue(skdev->queue);
  951. skdev->gendisk_on = -1;
  952. wake_up_interruptible(&skdev->waitq);
  953. break;
  954. case SKD_DRVR_STATE_ONLINE:
  955. /* shouldn't get here. */
  956. break;
  957. case SKD_DRVR_STATE_PAUSING:
  958. case SKD_DRVR_STATE_PAUSED:
  959. break;
  960. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  961. pr_debug("%s:%s:%d "
  962. "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
  963. skdev->name, __func__, __LINE__,
  964. skdev->timo_slot,
  965. skdev->timer_countdown,
  966. skdev->in_flight,
  967. skdev->timeout_slot[skdev->timo_slot]);
  968. /* if the slot has cleared we can let the I/O continue */
  969. if (skdev->timeout_slot[skdev->timo_slot] == 0) {
  970. pr_debug("%s:%s:%d Slot drained, starting queue.\n",
  971. skdev->name, __func__, __LINE__);
  972. skdev->state = SKD_DRVR_STATE_ONLINE;
  973. blk_start_queue(skdev->queue);
  974. return;
  975. }
  976. if (skdev->timer_countdown > 0) {
  977. skdev->timer_countdown--;
  978. return;
  979. }
  980. skd_restart_device(skdev);
  981. break;
  982. case SKD_DRVR_STATE_RESTARTING:
  983. if (skdev->timer_countdown > 0) {
  984. skdev->timer_countdown--;
  985. return;
  986. }
  987. /* For now, we fault the drive. Could attempt resets to
  988. * revcover at some point. */
  989. skdev->state = SKD_DRVR_STATE_FAULT;
  990. pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
  991. skd_name(skdev), skdev->drive_state);
  992. /*
  993. * Recovering does two things:
  994. * 1. completes IO with error
  995. * 2. reclaims dma resources
  996. * When is it safe to recover requests?
  997. * - if the drive state is faulted
  998. * - if the state is still soft reset after out timeout
  999. * - if the drive registers are dead (state = FF)
  1000. * If it is "unsafe", we still need to recover, so we will
  1001. * disable pci bus mastering and disable our interrupts.
  1002. */
  1003. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  1004. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  1005. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  1006. /* It never came out of soft reset. Try to
  1007. * recover the requests and then let them
  1008. * fail. This is to mitigate hung processes. */
  1009. skd_recover_requests(skdev, 0);
  1010. else {
  1011. pr_err("(%s): Disable BusMaster (%x)\n",
  1012. skd_name(skdev), skdev->drive_state);
  1013. pci_disable_device(skdev->pdev);
  1014. skd_disable_interrupts(skdev);
  1015. skd_recover_requests(skdev, 0);
  1016. }
  1017. /*start the queue so we can respond with error to requests */
  1018. /* wakeup anyone waiting for startup complete */
  1019. blk_start_queue(skdev->queue);
  1020. skdev->gendisk_on = -1;
  1021. wake_up_interruptible(&skdev->waitq);
  1022. break;
  1023. case SKD_DRVR_STATE_RESUMING:
  1024. case SKD_DRVR_STATE_STOPPING:
  1025. case SKD_DRVR_STATE_SYNCING:
  1026. case SKD_DRVR_STATE_FAULT:
  1027. case SKD_DRVR_STATE_DISAPPEARED:
  1028. default:
  1029. break;
  1030. }
  1031. }
  1032. static int skd_start_timer(struct skd_device *skdev)
  1033. {
  1034. int rc;
  1035. init_timer(&skdev->timer);
  1036. setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
  1037. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  1038. if (rc)
  1039. pr_err("%s: failed to start timer %d\n",
  1040. __func__, rc);
  1041. return rc;
  1042. }
  1043. static void skd_kill_timer(struct skd_device *skdev)
  1044. {
  1045. del_timer_sync(&skdev->timer);
  1046. }
  1047. /*
  1048. *****************************************************************************
  1049. * IOCTL
  1050. *****************************************************************************
  1051. */
  1052. static int skd_ioctl_sg_io(struct skd_device *skdev,
  1053. fmode_t mode, void __user *argp);
  1054. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1055. struct skd_sg_io *sksgio);
  1056. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1057. struct skd_sg_io *sksgio);
  1058. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1059. struct skd_sg_io *sksgio);
  1060. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1061. struct skd_sg_io *sksgio, int dxfer_dir);
  1062. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1063. struct skd_sg_io *sksgio);
  1064. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
  1065. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1066. struct skd_sg_io *sksgio);
  1067. static int skd_sg_io_put_status(struct skd_device *skdev,
  1068. struct skd_sg_io *sksgio);
  1069. static void skd_complete_special(struct skd_device *skdev,
  1070. volatile struct fit_completion_entry_v1
  1071. *skcomp,
  1072. volatile struct fit_comp_error_info *skerr,
  1073. struct skd_special_context *skspcl);
  1074. static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
  1075. uint cmd_in, ulong arg)
  1076. {
  1077. int rc = 0;
  1078. struct gendisk *disk = bdev->bd_disk;
  1079. struct skd_device *skdev = disk->private_data;
  1080. void __user *p = (void *)arg;
  1081. pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
  1082. skdev->name, __func__, __LINE__,
  1083. disk->disk_name, current->comm, mode, cmd_in, arg);
  1084. if (!capable(CAP_SYS_ADMIN))
  1085. return -EPERM;
  1086. switch (cmd_in) {
  1087. case SG_SET_TIMEOUT:
  1088. case SG_GET_TIMEOUT:
  1089. case SG_GET_VERSION_NUM:
  1090. rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
  1091. break;
  1092. case SG_IO:
  1093. rc = skd_ioctl_sg_io(skdev, mode, p);
  1094. break;
  1095. default:
  1096. rc = -ENOTTY;
  1097. break;
  1098. }
  1099. pr_debug("%s:%s:%d %s: completion rc %d\n",
  1100. skdev->name, __func__, __LINE__, disk->disk_name, rc);
  1101. return rc;
  1102. }
  1103. static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
  1104. void __user *argp)
  1105. {
  1106. int rc;
  1107. struct skd_sg_io sksgio;
  1108. memset(&sksgio, 0, sizeof(sksgio));
  1109. sksgio.mode = mode;
  1110. sksgio.argp = argp;
  1111. sksgio.iov = &sksgio.no_iov_iov;
  1112. switch (skdev->state) {
  1113. case SKD_DRVR_STATE_ONLINE:
  1114. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1115. break;
  1116. default:
  1117. pr_debug("%s:%s:%d drive not online\n",
  1118. skdev->name, __func__, __LINE__);
  1119. rc = -ENXIO;
  1120. goto out;
  1121. }
  1122. rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
  1123. if (rc)
  1124. goto out;
  1125. rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
  1126. if (rc)
  1127. goto out;
  1128. rc = skd_sg_io_prep_buffering(skdev, &sksgio);
  1129. if (rc)
  1130. goto out;
  1131. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
  1132. if (rc)
  1133. goto out;
  1134. rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
  1135. if (rc)
  1136. goto out;
  1137. rc = skd_sg_io_await(skdev, &sksgio);
  1138. if (rc)
  1139. goto out;
  1140. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
  1141. if (rc)
  1142. goto out;
  1143. rc = skd_sg_io_put_status(skdev, &sksgio);
  1144. if (rc)
  1145. goto out;
  1146. rc = 0;
  1147. out:
  1148. skd_sg_io_release_skspcl(skdev, &sksgio);
  1149. if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
  1150. kfree(sksgio.iov);
  1151. return rc;
  1152. }
  1153. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1154. struct skd_sg_io *sksgio)
  1155. {
  1156. struct sg_io_hdr *sgp = &sksgio->sg;
  1157. int i, acc;
  1158. if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1159. pr_debug("%s:%s:%d access sg failed %p\n",
  1160. skdev->name, __func__, __LINE__, sksgio->argp);
  1161. return -EFAULT;
  1162. }
  1163. if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1164. pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
  1165. skdev->name, __func__, __LINE__, sksgio->argp);
  1166. return -EFAULT;
  1167. }
  1168. if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
  1169. pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
  1170. skdev->name, __func__, __LINE__, sgp->interface_id);
  1171. return -EINVAL;
  1172. }
  1173. if (sgp->cmd_len > sizeof(sksgio->cdb)) {
  1174. pr_debug("%s:%s:%d cmd_len invalid %d\n",
  1175. skdev->name, __func__, __LINE__, sgp->cmd_len);
  1176. return -EINVAL;
  1177. }
  1178. if (sgp->iovec_count > 256) {
  1179. pr_debug("%s:%s:%d iovec_count invalid %d\n",
  1180. skdev->name, __func__, __LINE__, sgp->iovec_count);
  1181. return -EINVAL;
  1182. }
  1183. if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
  1184. pr_debug("%s:%s:%d dxfer_len invalid %d\n",
  1185. skdev->name, __func__, __LINE__, sgp->dxfer_len);
  1186. return -EINVAL;
  1187. }
  1188. switch (sgp->dxfer_direction) {
  1189. case SG_DXFER_NONE:
  1190. acc = -1;
  1191. break;
  1192. case SG_DXFER_TO_DEV:
  1193. acc = VERIFY_READ;
  1194. break;
  1195. case SG_DXFER_FROM_DEV:
  1196. case SG_DXFER_TO_FROM_DEV:
  1197. acc = VERIFY_WRITE;
  1198. break;
  1199. default:
  1200. pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
  1201. skdev->name, __func__, __LINE__, sgp->dxfer_direction);
  1202. return -EINVAL;
  1203. }
  1204. if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
  1205. pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
  1206. skdev->name, __func__, __LINE__, sgp->cmdp);
  1207. return -EFAULT;
  1208. }
  1209. if (sgp->mx_sb_len != 0) {
  1210. if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
  1211. pr_debug("%s:%s:%d access sbp failed %p\n",
  1212. skdev->name, __func__, __LINE__, sgp->sbp);
  1213. return -EFAULT;
  1214. }
  1215. }
  1216. if (sgp->iovec_count == 0) {
  1217. sksgio->iov[0].iov_base = sgp->dxferp;
  1218. sksgio->iov[0].iov_len = sgp->dxfer_len;
  1219. sksgio->iovcnt = 1;
  1220. sksgio->dxfer_len = sgp->dxfer_len;
  1221. } else {
  1222. struct sg_iovec *iov;
  1223. uint nbytes = sizeof(*iov) * sgp->iovec_count;
  1224. size_t iov_data_len;
  1225. iov = kmalloc(nbytes, GFP_KERNEL);
  1226. if (iov == NULL) {
  1227. pr_debug("%s:%s:%d alloc iovec failed %d\n",
  1228. skdev->name, __func__, __LINE__,
  1229. sgp->iovec_count);
  1230. return -ENOMEM;
  1231. }
  1232. sksgio->iov = iov;
  1233. sksgio->iovcnt = sgp->iovec_count;
  1234. if (copy_from_user(iov, sgp->dxferp, nbytes)) {
  1235. pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
  1236. skdev->name, __func__, __LINE__, sgp->dxferp);
  1237. return -EFAULT;
  1238. }
  1239. /*
  1240. * Sum up the vecs, making sure they don't overflow
  1241. */
  1242. iov_data_len = 0;
  1243. for (i = 0; i < sgp->iovec_count; i++) {
  1244. if (iov_data_len + iov[i].iov_len < iov_data_len)
  1245. return -EINVAL;
  1246. iov_data_len += iov[i].iov_len;
  1247. }
  1248. /* SG_IO howto says that the shorter of the two wins */
  1249. if (sgp->dxfer_len < iov_data_len) {
  1250. sksgio->iovcnt = iov_shorten((struct iovec *)iov,
  1251. sgp->iovec_count,
  1252. sgp->dxfer_len);
  1253. sksgio->dxfer_len = sgp->dxfer_len;
  1254. } else
  1255. sksgio->dxfer_len = iov_data_len;
  1256. }
  1257. if (sgp->dxfer_direction != SG_DXFER_NONE) {
  1258. struct sg_iovec *iov = sksgio->iov;
  1259. for (i = 0; i < sksgio->iovcnt; i++, iov++) {
  1260. if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
  1261. pr_debug("%s:%s:%d access data failed %p/%d\n",
  1262. skdev->name, __func__, __LINE__,
  1263. iov->iov_base, (int)iov->iov_len);
  1264. return -EFAULT;
  1265. }
  1266. }
  1267. }
  1268. return 0;
  1269. }
  1270. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1271. struct skd_sg_io *sksgio)
  1272. {
  1273. struct skd_special_context *skspcl = NULL;
  1274. int rc;
  1275. for (;; ) {
  1276. ulong flags;
  1277. spin_lock_irqsave(&skdev->lock, flags);
  1278. skspcl = skdev->skspcl_free_list;
  1279. if (skspcl != NULL) {
  1280. skdev->skspcl_free_list =
  1281. (struct skd_special_context *)skspcl->req.next;
  1282. skspcl->req.id += SKD_ID_INCR;
  1283. skspcl->req.state = SKD_REQ_STATE_SETUP;
  1284. skspcl->orphaned = 0;
  1285. skspcl->req.n_sg = 0;
  1286. }
  1287. spin_unlock_irqrestore(&skdev->lock, flags);
  1288. if (skspcl != NULL) {
  1289. rc = 0;
  1290. break;
  1291. }
  1292. pr_debug("%s:%s:%d blocking\n",
  1293. skdev->name, __func__, __LINE__);
  1294. rc = wait_event_interruptible_timeout(
  1295. skdev->waitq,
  1296. (skdev->skspcl_free_list != NULL),
  1297. msecs_to_jiffies(sksgio->sg.timeout));
  1298. pr_debug("%s:%s:%d unblocking, rc=%d\n",
  1299. skdev->name, __func__, __LINE__, rc);
  1300. if (rc <= 0) {
  1301. if (rc == 0)
  1302. rc = -ETIMEDOUT;
  1303. else
  1304. rc = -EINTR;
  1305. break;
  1306. }
  1307. /*
  1308. * If we get here rc > 0 meaning the timeout to
  1309. * wait_event_interruptible_timeout() had time left, hence the
  1310. * sought event -- non-empty free list -- happened.
  1311. * Retry the allocation.
  1312. */
  1313. }
  1314. sksgio->skspcl = skspcl;
  1315. return rc;
  1316. }
  1317. static int skd_skreq_prep_buffering(struct skd_device *skdev,
  1318. struct skd_request_context *skreq,
  1319. u32 dxfer_len)
  1320. {
  1321. u32 resid = dxfer_len;
  1322. /*
  1323. * The DMA engine must have aligned addresses and byte counts.
  1324. */
  1325. resid += (-resid) & 3;
  1326. skreq->sg_byte_count = resid;
  1327. skreq->n_sg = 0;
  1328. while (resid > 0) {
  1329. u32 nbytes = PAGE_SIZE;
  1330. u32 ix = skreq->n_sg;
  1331. struct scatterlist *sg = &skreq->sg[ix];
  1332. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1333. struct page *page;
  1334. if (nbytes > resid)
  1335. nbytes = resid;
  1336. page = alloc_page(GFP_KERNEL);
  1337. if (page == NULL)
  1338. return -ENOMEM;
  1339. sg_set_page(sg, page, nbytes, 0);
  1340. /* TODO: This should be going through a pci_???()
  1341. * routine to do proper mapping. */
  1342. sksg->control = FIT_SGD_CONTROL_NOT_LAST;
  1343. sksg->byte_count = nbytes;
  1344. sksg->host_side_addr = sg_phys(sg);
  1345. sksg->dev_side_addr = 0;
  1346. sksg->next_desc_ptr = skreq->sksg_dma_address +
  1347. (ix + 1) * sizeof(*sksg);
  1348. skreq->n_sg++;
  1349. resid -= nbytes;
  1350. }
  1351. if (skreq->n_sg > 0) {
  1352. u32 ix = skreq->n_sg - 1;
  1353. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1354. sksg->control = FIT_SGD_CONTROL_LAST;
  1355. sksg->next_desc_ptr = 0;
  1356. }
  1357. if (unlikely(skdev->dbg_level > 1)) {
  1358. u32 i;
  1359. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  1360. skdev->name, __func__, __LINE__,
  1361. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  1362. for (i = 0; i < skreq->n_sg; i++) {
  1363. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  1364. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1365. "addr=0x%llx next=0x%llx\n",
  1366. skdev->name, __func__, __LINE__,
  1367. i, sgd->byte_count, sgd->control,
  1368. sgd->host_side_addr, sgd->next_desc_ptr);
  1369. }
  1370. }
  1371. return 0;
  1372. }
  1373. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1374. struct skd_sg_io *sksgio)
  1375. {
  1376. struct skd_special_context *skspcl = sksgio->skspcl;
  1377. struct skd_request_context *skreq = &skspcl->req;
  1378. u32 dxfer_len = sksgio->dxfer_len;
  1379. int rc;
  1380. rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
  1381. /*
  1382. * Eventually, errors or not, skd_release_special() is called
  1383. * to recover allocations including partial allocations.
  1384. */
  1385. return rc;
  1386. }
  1387. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1388. struct skd_sg_io *sksgio, int dxfer_dir)
  1389. {
  1390. struct skd_special_context *skspcl = sksgio->skspcl;
  1391. u32 iov_ix = 0;
  1392. struct sg_iovec curiov;
  1393. u32 sksg_ix = 0;
  1394. u8 *bufp = NULL;
  1395. u32 buf_len = 0;
  1396. u32 resid = sksgio->dxfer_len;
  1397. int rc;
  1398. curiov.iov_len = 0;
  1399. curiov.iov_base = NULL;
  1400. if (dxfer_dir != sksgio->sg.dxfer_direction) {
  1401. if (dxfer_dir != SG_DXFER_TO_DEV ||
  1402. sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
  1403. return 0;
  1404. }
  1405. while (resid > 0) {
  1406. u32 nbytes = PAGE_SIZE;
  1407. if (curiov.iov_len == 0) {
  1408. curiov = sksgio->iov[iov_ix++];
  1409. continue;
  1410. }
  1411. if (buf_len == 0) {
  1412. struct page *page;
  1413. page = sg_page(&skspcl->req.sg[sksg_ix++]);
  1414. bufp = page_address(page);
  1415. buf_len = PAGE_SIZE;
  1416. }
  1417. nbytes = min_t(u32, nbytes, resid);
  1418. nbytes = min_t(u32, nbytes, curiov.iov_len);
  1419. nbytes = min_t(u32, nbytes, buf_len);
  1420. if (dxfer_dir == SG_DXFER_TO_DEV)
  1421. rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
  1422. else
  1423. rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
  1424. if (rc)
  1425. return -EFAULT;
  1426. resid -= nbytes;
  1427. curiov.iov_len -= nbytes;
  1428. curiov.iov_base += nbytes;
  1429. buf_len -= nbytes;
  1430. }
  1431. return 0;
  1432. }
  1433. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1434. struct skd_sg_io *sksgio)
  1435. {
  1436. struct skd_special_context *skspcl = sksgio->skspcl;
  1437. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  1438. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  1439. memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
  1440. /* Initialize the FIT msg header */
  1441. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1442. fmh->num_protocol_cmds_coalesced = 1;
  1443. /* Initialize the SCSI request */
  1444. if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
  1445. scsi_req->hdr.sg_list_dma_address =
  1446. cpu_to_be64(skspcl->req.sksg_dma_address);
  1447. scsi_req->hdr.tag = skspcl->req.id;
  1448. scsi_req->hdr.sg_list_len_bytes =
  1449. cpu_to_be32(skspcl->req.sg_byte_count);
  1450. memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
  1451. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1452. skd_send_special_fitmsg(skdev, skspcl);
  1453. return 0;
  1454. }
  1455. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
  1456. {
  1457. unsigned long flags;
  1458. int rc;
  1459. rc = wait_event_interruptible_timeout(skdev->waitq,
  1460. (sksgio->skspcl->req.state !=
  1461. SKD_REQ_STATE_BUSY),
  1462. msecs_to_jiffies(sksgio->sg.
  1463. timeout));
  1464. spin_lock_irqsave(&skdev->lock, flags);
  1465. if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
  1466. pr_debug("%s:%s:%d skspcl %p aborted\n",
  1467. skdev->name, __func__, __LINE__, sksgio->skspcl);
  1468. /* Build check cond, sense and let command finish. */
  1469. /* For a timeout, we must fabricate completion and sense
  1470. * data to complete the command */
  1471. sksgio->skspcl->req.completion.status =
  1472. SAM_STAT_CHECK_CONDITION;
  1473. memset(&sksgio->skspcl->req.err_info, 0,
  1474. sizeof(sksgio->skspcl->req.err_info));
  1475. sksgio->skspcl->req.err_info.type = 0x70;
  1476. sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
  1477. sksgio->skspcl->req.err_info.code = 0x44;
  1478. sksgio->skspcl->req.err_info.qual = 0;
  1479. rc = 0;
  1480. } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
  1481. /* No longer on the adapter. We finish. */
  1482. rc = 0;
  1483. else {
  1484. /* Something's gone wrong. Still busy. Timeout or
  1485. * user interrupted (control-C). Mark as an orphan
  1486. * so it will be disposed when completed. */
  1487. sksgio->skspcl->orphaned = 1;
  1488. sksgio->skspcl = NULL;
  1489. if (rc == 0) {
  1490. pr_debug("%s:%s:%d timed out %p (%u ms)\n",
  1491. skdev->name, __func__, __LINE__,
  1492. sksgio, sksgio->sg.timeout);
  1493. rc = -ETIMEDOUT;
  1494. } else {
  1495. pr_debug("%s:%s:%d cntlc %p\n",
  1496. skdev->name, __func__, __LINE__, sksgio);
  1497. rc = -EINTR;
  1498. }
  1499. }
  1500. spin_unlock_irqrestore(&skdev->lock, flags);
  1501. return rc;
  1502. }
  1503. static int skd_sg_io_put_status(struct skd_device *skdev,
  1504. struct skd_sg_io *sksgio)
  1505. {
  1506. struct sg_io_hdr *sgp = &sksgio->sg;
  1507. struct skd_special_context *skspcl = sksgio->skspcl;
  1508. int resid = 0;
  1509. u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
  1510. sgp->status = skspcl->req.completion.status;
  1511. resid = sksgio->dxfer_len - nb;
  1512. sgp->masked_status = sgp->status & STATUS_MASK;
  1513. sgp->msg_status = 0;
  1514. sgp->host_status = 0;
  1515. sgp->driver_status = 0;
  1516. sgp->resid = resid;
  1517. if (sgp->masked_status || sgp->host_status || sgp->driver_status)
  1518. sgp->info |= SG_INFO_CHECK;
  1519. pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
  1520. skdev->name, __func__, __LINE__,
  1521. sgp->status, sgp->masked_status, sgp->resid);
  1522. if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
  1523. if (sgp->mx_sb_len > 0) {
  1524. struct fit_comp_error_info *ei = &skspcl->req.err_info;
  1525. u32 nbytes = sizeof(*ei);
  1526. nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
  1527. sgp->sb_len_wr = nbytes;
  1528. if (__copy_to_user(sgp->sbp, ei, nbytes)) {
  1529. pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
  1530. skdev->name, __func__, __LINE__,
  1531. sgp->sbp);
  1532. return -EFAULT;
  1533. }
  1534. }
  1535. }
  1536. if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
  1537. pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
  1538. skdev->name, __func__, __LINE__, sksgio->argp);
  1539. return -EFAULT;
  1540. }
  1541. return 0;
  1542. }
  1543. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1544. struct skd_sg_io *sksgio)
  1545. {
  1546. struct skd_special_context *skspcl = sksgio->skspcl;
  1547. if (skspcl != NULL) {
  1548. ulong flags;
  1549. sksgio->skspcl = NULL;
  1550. spin_lock_irqsave(&skdev->lock, flags);
  1551. skd_release_special(skdev, skspcl);
  1552. spin_unlock_irqrestore(&skdev->lock, flags);
  1553. }
  1554. return 0;
  1555. }
  1556. /*
  1557. *****************************************************************************
  1558. * INTERNAL REQUESTS -- generated by driver itself
  1559. *****************************************************************************
  1560. */
  1561. static int skd_format_internal_skspcl(struct skd_device *skdev)
  1562. {
  1563. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1564. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1565. struct fit_msg_hdr *fmh;
  1566. uint64_t dma_address;
  1567. struct skd_scsi_request *scsi;
  1568. fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
  1569. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1570. fmh->num_protocol_cmds_coalesced = 1;
  1571. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1572. memset(scsi, 0, sizeof(*scsi));
  1573. dma_address = skspcl->req.sksg_dma_address;
  1574. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  1575. sgd->control = FIT_SGD_CONTROL_LAST;
  1576. sgd->byte_count = 0;
  1577. sgd->host_side_addr = skspcl->db_dma_address;
  1578. sgd->dev_side_addr = 0;
  1579. sgd->next_desc_ptr = 0LL;
  1580. return 1;
  1581. }
  1582. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  1583. static void skd_send_internal_skspcl(struct skd_device *skdev,
  1584. struct skd_special_context *skspcl,
  1585. u8 opcode)
  1586. {
  1587. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1588. struct skd_scsi_request *scsi;
  1589. unsigned char *buf = skspcl->data_buf;
  1590. int i;
  1591. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  1592. /*
  1593. * A refresh is already in progress.
  1594. * Just wait for it to finish.
  1595. */
  1596. return;
  1597. SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
  1598. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1599. skspcl->req.id += SKD_ID_INCR;
  1600. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1601. scsi->hdr.tag = skspcl->req.id;
  1602. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  1603. switch (opcode) {
  1604. case TEST_UNIT_READY:
  1605. scsi->cdb[0] = TEST_UNIT_READY;
  1606. sgd->byte_count = 0;
  1607. scsi->hdr.sg_list_len_bytes = 0;
  1608. break;
  1609. case READ_CAPACITY:
  1610. scsi->cdb[0] = READ_CAPACITY;
  1611. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  1612. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1613. break;
  1614. case INQUIRY:
  1615. scsi->cdb[0] = INQUIRY;
  1616. scsi->cdb[1] = 0x01; /* evpd */
  1617. scsi->cdb[2] = 0x80; /* serial number page */
  1618. scsi->cdb[4] = 0x10;
  1619. sgd->byte_count = 16;
  1620. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1621. break;
  1622. case SYNCHRONIZE_CACHE:
  1623. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  1624. sgd->byte_count = 0;
  1625. scsi->hdr.sg_list_len_bytes = 0;
  1626. break;
  1627. case WRITE_BUFFER:
  1628. scsi->cdb[0] = WRITE_BUFFER;
  1629. scsi->cdb[1] = 0x02;
  1630. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1631. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1632. sgd->byte_count = WR_BUF_SIZE;
  1633. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1634. /* fill incrementing byte pattern */
  1635. for (i = 0; i < sgd->byte_count; i++)
  1636. buf[i] = i & 0xFF;
  1637. break;
  1638. case READ_BUFFER:
  1639. scsi->cdb[0] = READ_BUFFER;
  1640. scsi->cdb[1] = 0x02;
  1641. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1642. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1643. sgd->byte_count = WR_BUF_SIZE;
  1644. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1645. memset(skspcl->data_buf, 0, sgd->byte_count);
  1646. break;
  1647. default:
  1648. SKD_ASSERT("Don't know what to send");
  1649. return;
  1650. }
  1651. skd_send_special_fitmsg(skdev, skspcl);
  1652. }
  1653. static void skd_refresh_device_data(struct skd_device *skdev)
  1654. {
  1655. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1656. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  1657. }
  1658. static int skd_chk_read_buf(struct skd_device *skdev,
  1659. struct skd_special_context *skspcl)
  1660. {
  1661. unsigned char *buf = skspcl->data_buf;
  1662. int i;
  1663. /* check for incrementing byte pattern */
  1664. for (i = 0; i < WR_BUF_SIZE; i++)
  1665. if (buf[i] != (i & 0xFF))
  1666. return 1;
  1667. return 0;
  1668. }
  1669. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  1670. u8 code, u8 qual, u8 fruc)
  1671. {
  1672. /* If the check condition is of special interest, log a message */
  1673. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  1674. && (code == 0x04) && (qual == 0x06)) {
  1675. pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
  1676. "ascq/fruc %02x/%02x/%02x/%02x\n",
  1677. skd_name(skdev), key, code, qual, fruc);
  1678. }
  1679. }
  1680. static void skd_complete_internal(struct skd_device *skdev,
  1681. volatile struct fit_completion_entry_v1
  1682. *skcomp,
  1683. volatile struct fit_comp_error_info *skerr,
  1684. struct skd_special_context *skspcl)
  1685. {
  1686. u8 *buf = skspcl->data_buf;
  1687. u8 status;
  1688. int i;
  1689. struct skd_scsi_request *scsi =
  1690. (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1691. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  1692. pr_debug("%s:%s:%d complete internal %x\n",
  1693. skdev->name, __func__, __LINE__, scsi->cdb[0]);
  1694. skspcl->req.completion = *skcomp;
  1695. skspcl->req.state = SKD_REQ_STATE_IDLE;
  1696. skspcl->req.id += SKD_ID_INCR;
  1697. status = skspcl->req.completion.status;
  1698. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  1699. skerr->qual, skerr->fruc);
  1700. switch (scsi->cdb[0]) {
  1701. case TEST_UNIT_READY:
  1702. if (status == SAM_STAT_GOOD)
  1703. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1704. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1705. (skerr->key == MEDIUM_ERROR))
  1706. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1707. else {
  1708. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1709. pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
  1710. skdev->name, __func__, __LINE__,
  1711. skdev->state);
  1712. return;
  1713. }
  1714. pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
  1715. skdev->name, __func__, __LINE__);
  1716. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1717. }
  1718. break;
  1719. case WRITE_BUFFER:
  1720. if (status == SAM_STAT_GOOD)
  1721. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  1722. else {
  1723. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1724. pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
  1725. skdev->name, __func__, __LINE__,
  1726. skdev->state);
  1727. return;
  1728. }
  1729. pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
  1730. skdev->name, __func__, __LINE__);
  1731. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1732. }
  1733. break;
  1734. case READ_BUFFER:
  1735. if (status == SAM_STAT_GOOD) {
  1736. if (skd_chk_read_buf(skdev, skspcl) == 0)
  1737. skd_send_internal_skspcl(skdev, skspcl,
  1738. READ_CAPACITY);
  1739. else {
  1740. pr_err(
  1741. "(%s):*** W/R Buffer mismatch %d ***\n",
  1742. skd_name(skdev), skdev->connect_retries);
  1743. if (skdev->connect_retries <
  1744. SKD_MAX_CONNECT_RETRIES) {
  1745. skdev->connect_retries++;
  1746. skd_soft_reset(skdev);
  1747. } else {
  1748. pr_err(
  1749. "(%s): W/R Buffer Connect Error\n",
  1750. skd_name(skdev));
  1751. return;
  1752. }
  1753. }
  1754. } else {
  1755. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1756. pr_debug("%s:%s:%d "
  1757. "read buffer failed, don't send anymore state 0x%x\n",
  1758. skdev->name, __func__, __LINE__,
  1759. skdev->state);
  1760. return;
  1761. }
  1762. pr_debug("%s:%s:%d "
  1763. "**** read buffer failed, retry skerr\n",
  1764. skdev->name, __func__, __LINE__);
  1765. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1766. }
  1767. break;
  1768. case READ_CAPACITY:
  1769. skdev->read_cap_is_valid = 0;
  1770. if (status == SAM_STAT_GOOD) {
  1771. skdev->read_cap_last_lba =
  1772. (buf[0] << 24) | (buf[1] << 16) |
  1773. (buf[2] << 8) | buf[3];
  1774. skdev->read_cap_blocksize =
  1775. (buf[4] << 24) | (buf[5] << 16) |
  1776. (buf[6] << 8) | buf[7];
  1777. pr_debug("%s:%s:%d last lba %d, bs %d\n",
  1778. skdev->name, __func__, __LINE__,
  1779. skdev->read_cap_last_lba,
  1780. skdev->read_cap_blocksize);
  1781. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1782. skdev->read_cap_is_valid = 1;
  1783. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1784. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1785. (skerr->key == MEDIUM_ERROR)) {
  1786. skdev->read_cap_last_lba = ~0;
  1787. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1788. pr_debug("%s:%s:%d "
  1789. "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
  1790. skdev->name, __func__, __LINE__);
  1791. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1792. } else {
  1793. pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
  1794. skdev->name, __func__, __LINE__);
  1795. skd_send_internal_skspcl(skdev, skspcl,
  1796. TEST_UNIT_READY);
  1797. }
  1798. break;
  1799. case INQUIRY:
  1800. skdev->inquiry_is_valid = 0;
  1801. if (status == SAM_STAT_GOOD) {
  1802. skdev->inquiry_is_valid = 1;
  1803. for (i = 0; i < 12; i++)
  1804. skdev->inq_serial_num[i] = buf[i + 4];
  1805. skdev->inq_serial_num[12] = 0;
  1806. }
  1807. if (skd_unquiesce_dev(skdev) < 0)
  1808. pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
  1809. skdev->name, __func__, __LINE__);
  1810. /* connection is complete */
  1811. skdev->connect_retries = 0;
  1812. break;
  1813. case SYNCHRONIZE_CACHE:
  1814. if (status == SAM_STAT_GOOD)
  1815. skdev->sync_done = 1;
  1816. else
  1817. skdev->sync_done = -1;
  1818. wake_up_interruptible(&skdev->waitq);
  1819. break;
  1820. default:
  1821. SKD_ASSERT("we didn't send this");
  1822. }
  1823. }
  1824. /*
  1825. *****************************************************************************
  1826. * FIT MESSAGES
  1827. *****************************************************************************
  1828. */
  1829. static void skd_send_fitmsg(struct skd_device *skdev,
  1830. struct skd_fitmsg_context *skmsg)
  1831. {
  1832. u64 qcmd;
  1833. struct fit_msg_hdr *fmh;
  1834. pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
  1835. skdev->name, __func__, __LINE__,
  1836. skmsg->mb_dma_address, skdev->in_flight);
  1837. pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
  1838. skdev->name, __func__, __LINE__,
  1839. skmsg->msg_buf, skmsg->offset);
  1840. qcmd = skmsg->mb_dma_address;
  1841. qcmd |= FIT_QCMD_QID_NORMAL;
  1842. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  1843. skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
  1844. if (unlikely(skdev->dbg_level > 1)) {
  1845. u8 *bp = (u8 *)skmsg->msg_buf;
  1846. int i;
  1847. for (i = 0; i < skmsg->length; i += 8) {
  1848. pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
  1849. "%02x %02x %02x %02x\n",
  1850. skdev->name, __func__, __LINE__,
  1851. i, bp[i + 0], bp[i + 1], bp[i + 2],
  1852. bp[i + 3], bp[i + 4], bp[i + 5],
  1853. bp[i + 6], bp[i + 7]);
  1854. if (i == 0)
  1855. i = 64 - 8;
  1856. }
  1857. }
  1858. if (skmsg->length > 256)
  1859. qcmd |= FIT_QCMD_MSGSIZE_512;
  1860. else if (skmsg->length > 128)
  1861. qcmd |= FIT_QCMD_MSGSIZE_256;
  1862. else if (skmsg->length > 64)
  1863. qcmd |= FIT_QCMD_MSGSIZE_128;
  1864. else
  1865. /*
  1866. * This makes no sense because the FIT msg header is
  1867. * 64 bytes. If the msg is only 64 bytes long it has
  1868. * no payload.
  1869. */
  1870. qcmd |= FIT_QCMD_MSGSIZE_64;
  1871. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1872. }
  1873. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1874. struct skd_special_context *skspcl)
  1875. {
  1876. u64 qcmd;
  1877. if (unlikely(skdev->dbg_level > 1)) {
  1878. u8 *bp = (u8 *)skspcl->msg_buf;
  1879. int i;
  1880. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1881. pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
  1882. "%02x %02x %02x %02x\n",
  1883. skdev->name, __func__, __LINE__, i,
  1884. bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
  1885. bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
  1886. if (i == 0)
  1887. i = 64 - 8;
  1888. }
  1889. pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
  1890. skdev->name, __func__, __LINE__,
  1891. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1892. skspcl->req.sksg_dma_address);
  1893. for (i = 0; i < skspcl->req.n_sg; i++) {
  1894. struct fit_sg_descriptor *sgd =
  1895. &skspcl->req.sksg_list[i];
  1896. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1897. "addr=0x%llx next=0x%llx\n",
  1898. skdev->name, __func__, __LINE__,
  1899. i, sgd->byte_count, sgd->control,
  1900. sgd->host_side_addr, sgd->next_desc_ptr);
  1901. }
  1902. }
  1903. /*
  1904. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1905. * and one 64-byte SSDI command.
  1906. */
  1907. qcmd = skspcl->mb_dma_address;
  1908. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1909. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1910. }
  1911. /*
  1912. *****************************************************************************
  1913. * COMPLETION QUEUE
  1914. *****************************************************************************
  1915. */
  1916. static void skd_complete_other(struct skd_device *skdev,
  1917. volatile struct fit_completion_entry_v1 *skcomp,
  1918. volatile struct fit_comp_error_info *skerr);
  1919. static void skd_requeue_request(struct skd_device *skdev,
  1920. struct skd_request_context *skreq);
  1921. struct sns_info {
  1922. u8 type;
  1923. u8 stat;
  1924. u8 key;
  1925. u8 asc;
  1926. u8 ascq;
  1927. u8 mask;
  1928. enum skd_check_status_action action;
  1929. };
  1930. static struct sns_info skd_chkstat_table[] = {
  1931. /* Good */
  1932. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1933. SKD_CHECK_STATUS_REPORT_GOOD },
  1934. /* Smart alerts */
  1935. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1936. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1937. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1938. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1939. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1940. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1941. /* Retry (with limits) */
  1942. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1943. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1944. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1945. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1946. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1947. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1948. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1949. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1950. /* Busy (or about to be) */
  1951. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1952. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1953. };
  1954. /*
  1955. * Look up status and sense data to decide how to handle the error
  1956. * from the device.
  1957. * mask says which fields must match e.g., mask=0x18 means check
  1958. * type and stat, ignore key, asc, ascq.
  1959. */
  1960. static enum skd_check_status_action skd_check_status(struct skd_device *skdev,
  1961. u8 cmp_status,
  1962. volatile struct fit_comp_error_info *skerr)
  1963. {
  1964. int i, n;
  1965. pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1966. skd_name(skdev), skerr->key, skerr->code, skerr->qual,
  1967. skerr->fruc);
  1968. pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1969. skdev->name, __func__, __LINE__, skerr->type, cmp_status,
  1970. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1971. /* Does the info match an entry in the good category? */
  1972. n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
  1973. for (i = 0; i < n; i++) {
  1974. struct sns_info *sns = &skd_chkstat_table[i];
  1975. if (sns->mask & 0x10)
  1976. if (skerr->type != sns->type)
  1977. continue;
  1978. if (sns->mask & 0x08)
  1979. if (cmp_status != sns->stat)
  1980. continue;
  1981. if (sns->mask & 0x04)
  1982. if (skerr->key != sns->key)
  1983. continue;
  1984. if (sns->mask & 0x02)
  1985. if (skerr->code != sns->asc)
  1986. continue;
  1987. if (sns->mask & 0x01)
  1988. if (skerr->qual != sns->ascq)
  1989. continue;
  1990. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1991. pr_err("(%s): SMART Alert: sense key/asc/ascq "
  1992. "%02x/%02x/%02x\n",
  1993. skd_name(skdev), skerr->key,
  1994. skerr->code, skerr->qual);
  1995. }
  1996. return sns->action;
  1997. }
  1998. /* No other match, so nonzero status means error,
  1999. * zero status means good
  2000. */
  2001. if (cmp_status) {
  2002. pr_debug("%s:%s:%d status check: error\n",
  2003. skdev->name, __func__, __LINE__);
  2004. return SKD_CHECK_STATUS_REPORT_ERROR;
  2005. }
  2006. pr_debug("%s:%s:%d status check good default\n",
  2007. skdev->name, __func__, __LINE__);
  2008. return SKD_CHECK_STATUS_REPORT_GOOD;
  2009. }
  2010. static void skd_resolve_req_exception(struct skd_device *skdev,
  2011. struct skd_request_context *skreq)
  2012. {
  2013. u8 cmp_status = skreq->completion.status;
  2014. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  2015. case SKD_CHECK_STATUS_REPORT_GOOD:
  2016. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  2017. skd_end_request(skdev, skreq, 0);
  2018. break;
  2019. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  2020. skd_log_skreq(skdev, skreq, "retry(busy)");
  2021. skd_requeue_request(skdev, skreq);
  2022. pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
  2023. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  2024. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  2025. skd_quiesce_dev(skdev);
  2026. break;
  2027. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  2028. if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
  2029. skd_log_skreq(skdev, skreq, "retry");
  2030. skd_requeue_request(skdev, skreq);
  2031. break;
  2032. }
  2033. /* fall through to report error */
  2034. case SKD_CHECK_STATUS_REPORT_ERROR:
  2035. default:
  2036. skd_end_request(skdev, skreq, -EIO);
  2037. break;
  2038. }
  2039. }
  2040. static void skd_requeue_request(struct skd_device *skdev,
  2041. struct skd_request_context *skreq)
  2042. {
  2043. blk_requeue_request(skdev->queue, skreq->req);
  2044. }
  2045. /* assume spinlock is already held */
  2046. static void skd_release_skreq(struct skd_device *skdev,
  2047. struct skd_request_context *skreq)
  2048. {
  2049. u32 msg_slot;
  2050. struct skd_fitmsg_context *skmsg;
  2051. u32 timo_slot;
  2052. /*
  2053. * Reclaim the FIT msg buffer if this is
  2054. * the first of the requests it carried to
  2055. * be completed. The FIT msg buffer used to
  2056. * send this request cannot be reused until
  2057. * we are sure the s1120 card has copied
  2058. * it to its memory. The FIT msg might have
  2059. * contained several requests. As soon as
  2060. * any of them are completed we know that
  2061. * the entire FIT msg was transferred.
  2062. * Only the first completed request will
  2063. * match the FIT msg buffer id. The FIT
  2064. * msg buffer id is immediately updated.
  2065. * When subsequent requests complete the FIT
  2066. * msg buffer id won't match, so we know
  2067. * quite cheaply that it is already done.
  2068. */
  2069. msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
  2070. SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
  2071. skmsg = &skdev->skmsg_table[msg_slot];
  2072. if (skmsg->id == skreq->fitmsg_id) {
  2073. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
  2074. SKD_ASSERT(skmsg->outstanding > 0);
  2075. skmsg->outstanding--;
  2076. if (skmsg->outstanding == 0) {
  2077. skmsg->state = SKD_MSG_STATE_IDLE;
  2078. skmsg->id += SKD_ID_INCR;
  2079. skmsg->next = skdev->skmsg_free_list;
  2080. skdev->skmsg_free_list = skmsg;
  2081. }
  2082. }
  2083. /*
  2084. * Decrease the number of active requests.
  2085. * Also decrements the count in the timeout slot.
  2086. */
  2087. SKD_ASSERT(skdev->in_flight > 0);
  2088. skdev->in_flight -= 1;
  2089. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  2090. SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
  2091. skdev->timeout_slot[timo_slot] -= 1;
  2092. /*
  2093. * Reset backpointer
  2094. */
  2095. skreq->req = NULL;
  2096. /*
  2097. * Reclaim the skd_request_context
  2098. */
  2099. skreq->state = SKD_REQ_STATE_IDLE;
  2100. skreq->id += SKD_ID_INCR;
  2101. skreq->next = skdev->skreq_free_list;
  2102. skdev->skreq_free_list = skreq;
  2103. }
  2104. #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
  2105. static void skd_do_inq_page_00(struct skd_device *skdev,
  2106. volatile struct fit_completion_entry_v1 *skcomp,
  2107. volatile struct fit_comp_error_info *skerr,
  2108. uint8_t *cdb, uint8_t *buf)
  2109. {
  2110. uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
  2111. /* Caller requested "supported pages". The driver needs to insert
  2112. * its page.
  2113. */
  2114. pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
  2115. skdev->name, __func__, __LINE__);
  2116. /* If the device rejected the request because the CDB was
  2117. * improperly formed, then just leave.
  2118. */
  2119. if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
  2120. skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
  2121. return;
  2122. /* Get the amount of space the caller allocated */
  2123. max_bytes = (cdb[3] << 8) | cdb[4];
  2124. /* Get the number of pages actually returned by the device */
  2125. drive_pages = (buf[2] << 8) | buf[3];
  2126. drive_bytes = drive_pages + 4;
  2127. new_size = drive_pages + 1;
  2128. /* Supported pages must be in numerical order, so find where
  2129. * the driver page needs to be inserted into the list of
  2130. * pages returned by the device.
  2131. */
  2132. for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
  2133. if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
  2134. return; /* Device using this page code. abort */
  2135. else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
  2136. break;
  2137. }
  2138. if (insert_pt < max_bytes) {
  2139. uint16_t u;
  2140. /* Shift everything up one byte to make room. */
  2141. for (u = new_size + 3; u > insert_pt; u--)
  2142. buf[u] = buf[u - 1];
  2143. buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
  2144. /* SCSI byte order increment of num_returned_bytes by 1 */
  2145. skcomp->num_returned_bytes =
  2146. be32_to_cpu(skcomp->num_returned_bytes) + 1;
  2147. skcomp->num_returned_bytes =
  2148. be32_to_cpu(skcomp->num_returned_bytes);
  2149. }
  2150. /* update page length field to reflect the driver's page too */
  2151. buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
  2152. buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
  2153. }
  2154. static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
  2155. {
  2156. int pcie_reg;
  2157. u16 pci_bus_speed;
  2158. u8 pci_lanes;
  2159. pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  2160. if (pcie_reg) {
  2161. u16 linksta;
  2162. pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
  2163. pci_bus_speed = linksta & 0xF;
  2164. pci_lanes = (linksta & 0x3F0) >> 4;
  2165. } else {
  2166. *speed = STEC_LINK_UNKNOWN;
  2167. *width = 0xFF;
  2168. return;
  2169. }
  2170. switch (pci_bus_speed) {
  2171. case 1:
  2172. *speed = STEC_LINK_2_5GTS;
  2173. break;
  2174. case 2:
  2175. *speed = STEC_LINK_5GTS;
  2176. break;
  2177. case 3:
  2178. *speed = STEC_LINK_8GTS;
  2179. break;
  2180. default:
  2181. *speed = STEC_LINK_UNKNOWN;
  2182. break;
  2183. }
  2184. if (pci_lanes <= 0x20)
  2185. *width = pci_lanes;
  2186. else
  2187. *width = 0xFF;
  2188. }
  2189. static void skd_do_inq_page_da(struct skd_device *skdev,
  2190. volatile struct fit_completion_entry_v1 *skcomp,
  2191. volatile struct fit_comp_error_info *skerr,
  2192. uint8_t *cdb, uint8_t *buf)
  2193. {
  2194. unsigned max_bytes;
  2195. struct driver_inquiry_data inq;
  2196. u16 val;
  2197. pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
  2198. skdev->name, __func__, __LINE__);
  2199. memset(&inq, 0, sizeof(inq));
  2200. inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
  2201. if (skdev->pdev && skdev->pdev->bus) {
  2202. skd_get_link_info(skdev->pdev,
  2203. &inq.pcie_link_speed, &inq.pcie_link_lanes);
  2204. inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number);
  2205. inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn);
  2206. inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn);
  2207. pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val);
  2208. inq.pcie_vendor_id = cpu_to_be16(val);
  2209. pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val);
  2210. inq.pcie_device_id = cpu_to_be16(val);
  2211. pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID,
  2212. &val);
  2213. inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
  2214. pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val);
  2215. inq.pcie_subsystem_device_id = cpu_to_be16(val);
  2216. } else {
  2217. inq.pcie_bus_number = 0xFFFF;
  2218. inq.pcie_device_number = 0xFF;
  2219. inq.pcie_function_number = 0xFF;
  2220. inq.pcie_link_speed = 0xFF;
  2221. inq.pcie_link_lanes = 0xFF;
  2222. inq.pcie_vendor_id = 0xFFFF;
  2223. inq.pcie_device_id = 0xFFFF;
  2224. inq.pcie_subsystem_vendor_id = 0xFFFF;
  2225. inq.pcie_subsystem_device_id = 0xFFFF;
  2226. }
  2227. /* Driver version, fixed lenth, padded with spaces on the right */
  2228. inq.driver_version_length = sizeof(inq.driver_version);
  2229. memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
  2230. memcpy(inq.driver_version, DRV_VER_COMPL,
  2231. min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
  2232. inq.page_length = cpu_to_be16((sizeof(inq) - 4));
  2233. /* Clear the error set by the device */
  2234. skcomp->status = SAM_STAT_GOOD;
  2235. memset((void *)skerr, 0, sizeof(*skerr));
  2236. /* copy response into output buffer */
  2237. max_bytes = (cdb[3] << 8) | cdb[4];
  2238. memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
  2239. skcomp->num_returned_bytes =
  2240. be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
  2241. }
  2242. static void skd_do_driver_inq(struct skd_device *skdev,
  2243. volatile struct fit_completion_entry_v1 *skcomp,
  2244. volatile struct fit_comp_error_info *skerr,
  2245. uint8_t *cdb, uint8_t *buf)
  2246. {
  2247. if (!buf)
  2248. return;
  2249. else if (cdb[0] != INQUIRY)
  2250. return; /* Not an INQUIRY */
  2251. else if ((cdb[1] & 1) == 0)
  2252. return; /* EVPD not set */
  2253. else if (cdb[2] == 0)
  2254. /* Need to add driver's page to supported pages list */
  2255. skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
  2256. else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
  2257. /* Caller requested driver's page */
  2258. skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
  2259. }
  2260. static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
  2261. {
  2262. if (!sg)
  2263. return NULL;
  2264. if (!sg_page(sg))
  2265. return NULL;
  2266. return sg_virt(sg);
  2267. }
  2268. static void skd_process_scsi_inq(struct skd_device *skdev,
  2269. volatile struct fit_completion_entry_v1
  2270. *skcomp,
  2271. volatile struct fit_comp_error_info *skerr,
  2272. struct skd_special_context *skspcl)
  2273. {
  2274. uint8_t *buf;
  2275. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  2276. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  2277. dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
  2278. skspcl->req.sg_data_dir);
  2279. buf = skd_sg_1st_page_ptr(skspcl->req.sg);
  2280. if (buf)
  2281. skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
  2282. }
  2283. static int skd_isr_completion_posted(struct skd_device *skdev,
  2284. int limit, int *enqueued)
  2285. {
  2286. volatile struct fit_completion_entry_v1 *skcmp = NULL;
  2287. volatile struct fit_comp_error_info *skerr;
  2288. u16 req_id;
  2289. u32 req_slot;
  2290. struct skd_request_context *skreq;
  2291. u16 cmp_cntxt = 0;
  2292. u8 cmp_status = 0;
  2293. u8 cmp_cycle = 0;
  2294. u32 cmp_bytes = 0;
  2295. int rc = 0;
  2296. int processed = 0;
  2297. for (;; ) {
  2298. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  2299. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  2300. cmp_cycle = skcmp->cycle;
  2301. cmp_cntxt = skcmp->tag;
  2302. cmp_status = skcmp->status;
  2303. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  2304. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  2305. pr_debug("%s:%s:%d "
  2306. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
  2307. "busy=%d rbytes=0x%x proto=%d\n",
  2308. skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
  2309. skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
  2310. skdev->in_flight, cmp_bytes, skdev->proto_ver);
  2311. if (cmp_cycle != skdev->skcomp_cycle) {
  2312. pr_debug("%s:%s:%d end of completions\n",
  2313. skdev->name, __func__, __LINE__);
  2314. break;
  2315. }
  2316. /*
  2317. * Update the completion queue head index and possibly
  2318. * the completion cycle count. 8-bit wrap-around.
  2319. */
  2320. skdev->skcomp_ix++;
  2321. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  2322. skdev->skcomp_ix = 0;
  2323. skdev->skcomp_cycle++;
  2324. }
  2325. /*
  2326. * The command context is a unique 32-bit ID. The low order
  2327. * bits help locate the request. The request is usually a
  2328. * r/w request (see skd_start() above) or a special request.
  2329. */
  2330. req_id = cmp_cntxt;
  2331. req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  2332. /* Is this other than a r/w request? */
  2333. if (req_slot >= skdev->num_req_context) {
  2334. /*
  2335. * This is not a completion for a r/w request.
  2336. */
  2337. skd_complete_other(skdev, skcmp, skerr);
  2338. continue;
  2339. }
  2340. skreq = &skdev->skreq_table[req_slot];
  2341. /*
  2342. * Make sure the request ID for the slot matches.
  2343. */
  2344. if (skreq->id != req_id) {
  2345. pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
  2346. skdev->name, __func__, __LINE__,
  2347. req_id, skreq->id);
  2348. {
  2349. u16 new_id = cmp_cntxt;
  2350. pr_err("(%s): Completion mismatch "
  2351. "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  2352. skd_name(skdev), req_id,
  2353. skreq->id, new_id);
  2354. continue;
  2355. }
  2356. }
  2357. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  2358. if (skreq->state == SKD_REQ_STATE_ABORTED) {
  2359. pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
  2360. skdev->name, __func__, __LINE__,
  2361. skreq, skreq->id);
  2362. /* a previously timed out command can
  2363. * now be cleaned up */
  2364. skd_release_skreq(skdev, skreq);
  2365. continue;
  2366. }
  2367. skreq->completion = *skcmp;
  2368. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  2369. skreq->err_info = *skerr;
  2370. skd_log_check_status(skdev, cmp_status, skerr->key,
  2371. skerr->code, skerr->qual,
  2372. skerr->fruc);
  2373. }
  2374. /* Release DMA resources for the request. */
  2375. if (skreq->n_sg > 0)
  2376. skd_postop_sg_list(skdev, skreq);
  2377. if (!skreq->req) {
  2378. pr_debug("%s:%s:%d NULL backptr skdreq %p, "
  2379. "req=0x%x req_id=0x%x\n",
  2380. skdev->name, __func__, __LINE__,
  2381. skreq, skreq->id, req_id);
  2382. } else {
  2383. /*
  2384. * Capture the outcome and post it back to the
  2385. * native request.
  2386. */
  2387. if (likely(cmp_status == SAM_STAT_GOOD))
  2388. skd_end_request(skdev, skreq, 0);
  2389. else
  2390. skd_resolve_req_exception(skdev, skreq);
  2391. }
  2392. /*
  2393. * Release the skreq, its FIT msg (if one), timeout slot,
  2394. * and queue depth.
  2395. */
  2396. skd_release_skreq(skdev, skreq);
  2397. /* skd_isr_comp_limit equal zero means no limit */
  2398. if (limit) {
  2399. if (++processed >= limit) {
  2400. rc = 1;
  2401. break;
  2402. }
  2403. }
  2404. }
  2405. if ((skdev->state == SKD_DRVR_STATE_PAUSING)
  2406. && (skdev->in_flight) == 0) {
  2407. skdev->state = SKD_DRVR_STATE_PAUSED;
  2408. wake_up_interruptible(&skdev->waitq);
  2409. }
  2410. return rc;
  2411. }
  2412. static void skd_complete_other(struct skd_device *skdev,
  2413. volatile struct fit_completion_entry_v1 *skcomp,
  2414. volatile struct fit_comp_error_info *skerr)
  2415. {
  2416. u32 req_id = 0;
  2417. u32 req_table;
  2418. u32 req_slot;
  2419. struct skd_special_context *skspcl;
  2420. req_id = skcomp->tag;
  2421. req_table = req_id & SKD_ID_TABLE_MASK;
  2422. req_slot = req_id & SKD_ID_SLOT_MASK;
  2423. pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
  2424. skdev->name, __func__, __LINE__,
  2425. req_table, req_id, req_slot);
  2426. /*
  2427. * Based on the request id, determine how to dispatch this completion.
  2428. * This swich/case is finding the good cases and forwarding the
  2429. * completion entry. Errors are reported below the switch.
  2430. */
  2431. switch (req_table) {
  2432. case SKD_ID_RW_REQUEST:
  2433. /*
  2434. * The caller, skd_completion_posted_isr() above,
  2435. * handles r/w requests. The only way we get here
  2436. * is if the req_slot is out of bounds.
  2437. */
  2438. break;
  2439. case SKD_ID_SPECIAL_REQUEST:
  2440. /*
  2441. * Make sure the req_slot is in bounds and that the id
  2442. * matches.
  2443. */
  2444. if (req_slot < skdev->n_special) {
  2445. skspcl = &skdev->skspcl_table[req_slot];
  2446. if (skspcl->req.id == req_id &&
  2447. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2448. skd_complete_special(skdev,
  2449. skcomp, skerr, skspcl);
  2450. return;
  2451. }
  2452. }
  2453. break;
  2454. case SKD_ID_INTERNAL:
  2455. if (req_slot == 0) {
  2456. skspcl = &skdev->internal_skspcl;
  2457. if (skspcl->req.id == req_id &&
  2458. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2459. skd_complete_internal(skdev,
  2460. skcomp, skerr, skspcl);
  2461. return;
  2462. }
  2463. }
  2464. break;
  2465. case SKD_ID_FIT_MSG:
  2466. /*
  2467. * These id's should never appear in a completion record.
  2468. */
  2469. break;
  2470. default:
  2471. /*
  2472. * These id's should never appear anywhere;
  2473. */
  2474. break;
  2475. }
  2476. /*
  2477. * If we get here it is a bad or stale id.
  2478. */
  2479. }
  2480. static void skd_complete_special(struct skd_device *skdev,
  2481. volatile struct fit_completion_entry_v1
  2482. *skcomp,
  2483. volatile struct fit_comp_error_info *skerr,
  2484. struct skd_special_context *skspcl)
  2485. {
  2486. pr_debug("%s:%s:%d completing special request %p\n",
  2487. skdev->name, __func__, __LINE__, skspcl);
  2488. if (skspcl->orphaned) {
  2489. /* Discard orphaned request */
  2490. /* ?: Can this release directly or does it need
  2491. * to use a worker? */
  2492. pr_debug("%s:%s:%d release orphaned %p\n",
  2493. skdev->name, __func__, __LINE__, skspcl);
  2494. skd_release_special(skdev, skspcl);
  2495. return;
  2496. }
  2497. skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
  2498. skspcl->req.state = SKD_REQ_STATE_COMPLETED;
  2499. skspcl->req.completion = *skcomp;
  2500. skspcl->req.err_info = *skerr;
  2501. skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
  2502. skerr->code, skerr->qual, skerr->fruc);
  2503. wake_up_interruptible(&skdev->waitq);
  2504. }
  2505. /* assume spinlock is already held */
  2506. static void skd_release_special(struct skd_device *skdev,
  2507. struct skd_special_context *skspcl)
  2508. {
  2509. int i, was_depleted;
  2510. for (i = 0; i < skspcl->req.n_sg; i++) {
  2511. struct page *page = sg_page(&skspcl->req.sg[i]);
  2512. __free_page(page);
  2513. }
  2514. was_depleted = (skdev->skspcl_free_list == NULL);
  2515. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2516. skspcl->req.id += SKD_ID_INCR;
  2517. skspcl->req.next =
  2518. (struct skd_request_context *)skdev->skspcl_free_list;
  2519. skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
  2520. if (was_depleted) {
  2521. pr_debug("%s:%s:%d skspcl was depleted\n",
  2522. skdev->name, __func__, __LINE__);
  2523. /* Free list was depleted. Their might be waiters. */
  2524. wake_up_interruptible(&skdev->waitq);
  2525. }
  2526. }
  2527. static void skd_reset_skcomp(struct skd_device *skdev)
  2528. {
  2529. u32 nbytes;
  2530. struct fit_completion_entry_v1 *skcomp;
  2531. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  2532. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  2533. memset(skdev->skcomp_table, 0, nbytes);
  2534. skdev->skcomp_ix = 0;
  2535. skdev->skcomp_cycle = 1;
  2536. }
  2537. /*
  2538. *****************************************************************************
  2539. * INTERRUPTS
  2540. *****************************************************************************
  2541. */
  2542. static void skd_completion_worker(struct work_struct *work)
  2543. {
  2544. struct skd_device *skdev =
  2545. container_of(work, struct skd_device, completion_worker);
  2546. unsigned long flags;
  2547. int flush_enqueued = 0;
  2548. spin_lock_irqsave(&skdev->lock, flags);
  2549. /*
  2550. * pass in limit=0, which means no limit..
  2551. * process everything in compq
  2552. */
  2553. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  2554. skd_request_fn(skdev->queue);
  2555. spin_unlock_irqrestore(&skdev->lock, flags);
  2556. }
  2557. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  2558. irqreturn_t
  2559. static skd_isr(int irq, void *ptr)
  2560. {
  2561. struct skd_device *skdev;
  2562. u32 intstat;
  2563. u32 ack;
  2564. int rc = 0;
  2565. int deferred = 0;
  2566. int flush_enqueued = 0;
  2567. skdev = (struct skd_device *)ptr;
  2568. spin_lock(&skdev->lock);
  2569. for (;; ) {
  2570. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2571. ack = FIT_INT_DEF_MASK;
  2572. ack &= intstat;
  2573. pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
  2574. skdev->name, __func__, __LINE__, intstat, ack);
  2575. /* As long as there is an int pending on device, keep
  2576. * running loop. When none, get out, but if we've never
  2577. * done any processing, call completion handler?
  2578. */
  2579. if (ack == 0) {
  2580. /* No interrupts on device, but run the completion
  2581. * processor anyway?
  2582. */
  2583. if (rc == 0)
  2584. if (likely (skdev->state
  2585. == SKD_DRVR_STATE_ONLINE))
  2586. deferred = 1;
  2587. break;
  2588. }
  2589. rc = IRQ_HANDLED;
  2590. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  2591. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  2592. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  2593. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  2594. /*
  2595. * If we have already deferred completion
  2596. * processing, don't bother running it again
  2597. */
  2598. if (deferred == 0)
  2599. deferred =
  2600. skd_isr_completion_posted(skdev,
  2601. skd_isr_comp_limit, &flush_enqueued);
  2602. }
  2603. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  2604. skd_isr_fwstate(skdev);
  2605. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  2606. skdev->state ==
  2607. SKD_DRVR_STATE_DISAPPEARED) {
  2608. spin_unlock(&skdev->lock);
  2609. return rc;
  2610. }
  2611. }
  2612. if (intstat & FIT_ISH_MSG_FROM_DEV)
  2613. skd_isr_msg_from_dev(skdev);
  2614. }
  2615. }
  2616. if (unlikely(flush_enqueued))
  2617. skd_request_fn(skdev->queue);
  2618. if (deferred)
  2619. schedule_work(&skdev->completion_worker);
  2620. else if (!flush_enqueued)
  2621. skd_request_fn(skdev->queue);
  2622. spin_unlock(&skdev->lock);
  2623. return rc;
  2624. }
  2625. static void skd_drive_fault(struct skd_device *skdev)
  2626. {
  2627. skdev->state = SKD_DRVR_STATE_FAULT;
  2628. pr_err("(%s): Drive FAULT\n", skd_name(skdev));
  2629. }
  2630. static void skd_drive_disappeared(struct skd_device *skdev)
  2631. {
  2632. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  2633. pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
  2634. }
  2635. static void skd_isr_fwstate(struct skd_device *skdev)
  2636. {
  2637. u32 sense;
  2638. u32 state;
  2639. u32 mtd;
  2640. int prev_driver_state = skdev->state;
  2641. sense = SKD_READL(skdev, FIT_STATUS);
  2642. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2643. pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
  2644. skd_name(skdev),
  2645. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2646. skd_drive_state_to_str(state), state);
  2647. skdev->drive_state = state;
  2648. switch (skdev->drive_state) {
  2649. case FIT_SR_DRIVE_INIT:
  2650. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  2651. skd_disable_interrupts(skdev);
  2652. break;
  2653. }
  2654. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  2655. skd_recover_requests(skdev, 0);
  2656. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  2657. skdev->timer_countdown = SKD_STARTING_TIMO;
  2658. skdev->state = SKD_DRVR_STATE_STARTING;
  2659. skd_soft_reset(skdev);
  2660. break;
  2661. }
  2662. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  2663. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2664. skdev->last_mtd = mtd;
  2665. break;
  2666. case FIT_SR_DRIVE_ONLINE:
  2667. skdev->cur_max_queue_depth = skd_max_queue_depth;
  2668. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  2669. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  2670. skdev->queue_low_water_mark =
  2671. skdev->cur_max_queue_depth * 2 / 3 + 1;
  2672. if (skdev->queue_low_water_mark < 1)
  2673. skdev->queue_low_water_mark = 1;
  2674. pr_info(
  2675. "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
  2676. skd_name(skdev),
  2677. skdev->cur_max_queue_depth,
  2678. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2679. skd_refresh_device_data(skdev);
  2680. break;
  2681. case FIT_SR_DRIVE_BUSY:
  2682. skdev->state = SKD_DRVR_STATE_BUSY;
  2683. skdev->timer_countdown = SKD_BUSY_TIMO;
  2684. skd_quiesce_dev(skdev);
  2685. break;
  2686. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2687. /* set timer for 3 seconds, we'll abort any unfinished
  2688. * commands after that expires
  2689. */
  2690. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2691. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  2692. blk_start_queue(skdev->queue);
  2693. break;
  2694. case FIT_SR_DRIVE_BUSY_ERASE:
  2695. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2696. skdev->timer_countdown = SKD_BUSY_TIMO;
  2697. break;
  2698. case FIT_SR_DRIVE_OFFLINE:
  2699. skdev->state = SKD_DRVR_STATE_IDLE;
  2700. break;
  2701. case FIT_SR_DRIVE_SOFT_RESET:
  2702. switch (skdev->state) {
  2703. case SKD_DRVR_STATE_STARTING:
  2704. case SKD_DRVR_STATE_RESTARTING:
  2705. /* Expected by a caller of skd_soft_reset() */
  2706. break;
  2707. default:
  2708. skdev->state = SKD_DRVR_STATE_RESTARTING;
  2709. break;
  2710. }
  2711. break;
  2712. case FIT_SR_DRIVE_FW_BOOTING:
  2713. pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
  2714. skdev->name, __func__, __LINE__, skdev->name);
  2715. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2716. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2717. break;
  2718. case FIT_SR_DRIVE_DEGRADED:
  2719. case FIT_SR_PCIE_LINK_DOWN:
  2720. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2721. break;
  2722. case FIT_SR_DRIVE_FAULT:
  2723. skd_drive_fault(skdev);
  2724. skd_recover_requests(skdev, 0);
  2725. blk_start_queue(skdev->queue);
  2726. break;
  2727. /* PCIe bus returned all Fs? */
  2728. case 0xFF:
  2729. pr_info("(%s): state=0x%x sense=0x%x\n",
  2730. skd_name(skdev), state, sense);
  2731. skd_drive_disappeared(skdev);
  2732. skd_recover_requests(skdev, 0);
  2733. blk_start_queue(skdev->queue);
  2734. break;
  2735. default:
  2736. /*
  2737. * Uknown FW State. Wait for a state we recognize.
  2738. */
  2739. break;
  2740. }
  2741. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  2742. skd_name(skdev),
  2743. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  2744. skd_skdev_state_to_str(skdev->state), skdev->state);
  2745. }
  2746. static void skd_recover_requests(struct skd_device *skdev, int requeue)
  2747. {
  2748. int i;
  2749. for (i = 0; i < skdev->num_req_context; i++) {
  2750. struct skd_request_context *skreq = &skdev->skreq_table[i];
  2751. if (skreq->state == SKD_REQ_STATE_BUSY) {
  2752. skd_log_skreq(skdev, skreq, "recover");
  2753. SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
  2754. SKD_ASSERT(skreq->req != NULL);
  2755. /* Release DMA resources for the request. */
  2756. if (skreq->n_sg > 0)
  2757. skd_postop_sg_list(skdev, skreq);
  2758. if (requeue &&
  2759. (unsigned long) ++skreq->req->special <
  2760. SKD_MAX_RETRIES)
  2761. skd_requeue_request(skdev, skreq);
  2762. else
  2763. skd_end_request(skdev, skreq, -EIO);
  2764. skreq->req = NULL;
  2765. skreq->state = SKD_REQ_STATE_IDLE;
  2766. skreq->id += SKD_ID_INCR;
  2767. }
  2768. if (i > 0)
  2769. skreq[-1].next = skreq;
  2770. skreq->next = NULL;
  2771. }
  2772. skdev->skreq_free_list = skdev->skreq_table;
  2773. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2774. struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
  2775. if (skmsg->state == SKD_MSG_STATE_BUSY) {
  2776. skd_log_skmsg(skdev, skmsg, "salvaged");
  2777. SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
  2778. skmsg->state = SKD_MSG_STATE_IDLE;
  2779. skmsg->id += SKD_ID_INCR;
  2780. }
  2781. if (i > 0)
  2782. skmsg[-1].next = skmsg;
  2783. skmsg->next = NULL;
  2784. }
  2785. skdev->skmsg_free_list = skdev->skmsg_table;
  2786. for (i = 0; i < skdev->n_special; i++) {
  2787. struct skd_special_context *skspcl = &skdev->skspcl_table[i];
  2788. /* If orphaned, reclaim it because it has already been reported
  2789. * to the process as an error (it was just waiting for
  2790. * a completion that didn't come, and now it will never come)
  2791. * If busy, change to a state that will cause it to error
  2792. * out in the wait routine and let it do the normal
  2793. * reporting and reclaiming
  2794. */
  2795. if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2796. if (skspcl->orphaned) {
  2797. pr_debug("%s:%s:%d orphaned %p\n",
  2798. skdev->name, __func__, __LINE__,
  2799. skspcl);
  2800. skd_release_special(skdev, skspcl);
  2801. } else {
  2802. pr_debug("%s:%s:%d not orphaned %p\n",
  2803. skdev->name, __func__, __LINE__,
  2804. skspcl);
  2805. skspcl->req.state = SKD_REQ_STATE_ABORTED;
  2806. }
  2807. }
  2808. }
  2809. skdev->skspcl_free_list = skdev->skspcl_table;
  2810. for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
  2811. skdev->timeout_slot[i] = 0;
  2812. skdev->in_flight = 0;
  2813. }
  2814. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  2815. {
  2816. u32 mfd;
  2817. u32 mtd;
  2818. u32 data;
  2819. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2820. pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
  2821. skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
  2822. /* ignore any mtd that is an ack for something we didn't send */
  2823. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  2824. return;
  2825. switch (FIT_MXD_TYPE(mfd)) {
  2826. case FIT_MTD_FITFW_INIT:
  2827. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  2828. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  2829. pr_err("(%s): protocol mismatch\n",
  2830. skdev->name);
  2831. pr_err("(%s): got=%d support=%d\n",
  2832. skdev->name, skdev->proto_ver,
  2833. FIT_PROTOCOL_VERSION_1);
  2834. pr_err("(%s): please upgrade driver\n",
  2835. skdev->name);
  2836. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  2837. skd_soft_reset(skdev);
  2838. break;
  2839. }
  2840. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  2841. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2842. skdev->last_mtd = mtd;
  2843. break;
  2844. case FIT_MTD_GET_CMDQ_DEPTH:
  2845. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  2846. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  2847. SKD_N_COMPLETION_ENTRY);
  2848. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2849. skdev->last_mtd = mtd;
  2850. break;
  2851. case FIT_MTD_SET_COMPQ_DEPTH:
  2852. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  2853. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  2854. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2855. skdev->last_mtd = mtd;
  2856. break;
  2857. case FIT_MTD_SET_COMPQ_ADDR:
  2858. skd_reset_skcomp(skdev);
  2859. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  2860. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2861. skdev->last_mtd = mtd;
  2862. break;
  2863. case FIT_MTD_CMD_LOG_HOST_ID:
  2864. skdev->connect_time_stamp = get_seconds();
  2865. data = skdev->connect_time_stamp & 0xFFFF;
  2866. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  2867. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2868. skdev->last_mtd = mtd;
  2869. break;
  2870. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  2871. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  2872. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  2873. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  2874. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2875. skdev->last_mtd = mtd;
  2876. break;
  2877. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  2878. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  2879. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  2880. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2881. skdev->last_mtd = mtd;
  2882. pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
  2883. skd_name(skdev),
  2884. skdev->connect_time_stamp, skdev->drive_jiffies);
  2885. break;
  2886. case FIT_MTD_ARM_QUEUE:
  2887. skdev->last_mtd = 0;
  2888. /*
  2889. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  2890. */
  2891. break;
  2892. default:
  2893. break;
  2894. }
  2895. }
  2896. static void skd_disable_interrupts(struct skd_device *skdev)
  2897. {
  2898. u32 sense;
  2899. sense = SKD_READL(skdev, FIT_CONTROL);
  2900. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  2901. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  2902. pr_debug("%s:%s:%d sense 0x%x\n",
  2903. skdev->name, __func__, __LINE__, sense);
  2904. /* Note that the 1s is written. A 1-bit means
  2905. * disable, a 0 means enable.
  2906. */
  2907. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  2908. }
  2909. static void skd_enable_interrupts(struct skd_device *skdev)
  2910. {
  2911. u32 val;
  2912. /* unmask interrupts first */
  2913. val = FIT_ISH_FW_STATE_CHANGE +
  2914. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  2915. /* Note that the compliment of mask is written. A 1-bit means
  2916. * disable, a 0 means enable. */
  2917. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  2918. pr_debug("%s:%s:%d interrupt mask=0x%x\n",
  2919. skdev->name, __func__, __LINE__, ~val);
  2920. val = SKD_READL(skdev, FIT_CONTROL);
  2921. val |= FIT_CR_ENABLE_INTERRUPTS;
  2922. pr_debug("%s:%s:%d control=0x%x\n",
  2923. skdev->name, __func__, __LINE__, val);
  2924. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2925. }
  2926. /*
  2927. *****************************************************************************
  2928. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  2929. *****************************************************************************
  2930. */
  2931. static void skd_soft_reset(struct skd_device *skdev)
  2932. {
  2933. u32 val;
  2934. val = SKD_READL(skdev, FIT_CONTROL);
  2935. val |= (FIT_CR_SOFT_RESET);
  2936. pr_debug("%s:%s:%d control=0x%x\n",
  2937. skdev->name, __func__, __LINE__, val);
  2938. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2939. }
  2940. static void skd_start_device(struct skd_device *skdev)
  2941. {
  2942. unsigned long flags;
  2943. u32 sense;
  2944. u32 state;
  2945. spin_lock_irqsave(&skdev->lock, flags);
  2946. /* ack all ghost interrupts */
  2947. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2948. sense = SKD_READL(skdev, FIT_STATUS);
  2949. pr_debug("%s:%s:%d initial status=0x%x\n",
  2950. skdev->name, __func__, __LINE__, sense);
  2951. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2952. skdev->drive_state = state;
  2953. skdev->last_mtd = 0;
  2954. skdev->state = SKD_DRVR_STATE_STARTING;
  2955. skdev->timer_countdown = SKD_STARTING_TIMO;
  2956. skd_enable_interrupts(skdev);
  2957. switch (skdev->drive_state) {
  2958. case FIT_SR_DRIVE_OFFLINE:
  2959. pr_err("(%s): Drive offline...\n", skd_name(skdev));
  2960. break;
  2961. case FIT_SR_DRIVE_FW_BOOTING:
  2962. pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
  2963. skdev->name, __func__, __LINE__, skdev->name);
  2964. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2965. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2966. break;
  2967. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2968. pr_info("(%s): Start: BUSY_SANITIZE\n",
  2969. skd_name(skdev));
  2970. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2971. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2972. break;
  2973. case FIT_SR_DRIVE_BUSY_ERASE:
  2974. pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
  2975. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2976. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2977. break;
  2978. case FIT_SR_DRIVE_INIT:
  2979. case FIT_SR_DRIVE_ONLINE:
  2980. skd_soft_reset(skdev);
  2981. break;
  2982. case FIT_SR_DRIVE_BUSY:
  2983. pr_err("(%s): Drive Busy...\n", skd_name(skdev));
  2984. skdev->state = SKD_DRVR_STATE_BUSY;
  2985. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2986. break;
  2987. case FIT_SR_DRIVE_SOFT_RESET:
  2988. pr_err("(%s) drive soft reset in prog\n",
  2989. skd_name(skdev));
  2990. break;
  2991. case FIT_SR_DRIVE_FAULT:
  2992. /* Fault state is bad...soft reset won't do it...
  2993. * Hard reset, maybe, but does it work on device?
  2994. * For now, just fault so the system doesn't hang.
  2995. */
  2996. skd_drive_fault(skdev);
  2997. /*start the queue so we can respond with error to requests */
  2998. pr_debug("%s:%s:%d starting %s queue\n",
  2999. skdev->name, __func__, __LINE__, skdev->name);
  3000. blk_start_queue(skdev->queue);
  3001. skdev->gendisk_on = -1;
  3002. wake_up_interruptible(&skdev->waitq);
  3003. break;
  3004. case 0xFF:
  3005. /* Most likely the device isn't there or isn't responding
  3006. * to the BAR1 addresses. */
  3007. skd_drive_disappeared(skdev);
  3008. /*start the queue so we can respond with error to requests */
  3009. pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
  3010. skdev->name, __func__, __LINE__, skdev->name);
  3011. blk_start_queue(skdev->queue);
  3012. skdev->gendisk_on = -1;
  3013. wake_up_interruptible(&skdev->waitq);
  3014. break;
  3015. default:
  3016. pr_err("(%s) Start: unknown state %x\n",
  3017. skd_name(skdev), skdev->drive_state);
  3018. break;
  3019. }
  3020. state = SKD_READL(skdev, FIT_CONTROL);
  3021. pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
  3022. skdev->name, __func__, __LINE__, state);
  3023. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  3024. pr_debug("%s:%s:%d Intr Status=0x%x\n",
  3025. skdev->name, __func__, __LINE__, state);
  3026. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  3027. pr_debug("%s:%s:%d Intr Mask=0x%x\n",
  3028. skdev->name, __func__, __LINE__, state);
  3029. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  3030. pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
  3031. skdev->name, __func__, __LINE__, state);
  3032. state = SKD_READL(skdev, FIT_HW_VERSION);
  3033. pr_debug("%s:%s:%d HW version=0x%x\n",
  3034. skdev->name, __func__, __LINE__, state);
  3035. spin_unlock_irqrestore(&skdev->lock, flags);
  3036. }
  3037. static void skd_stop_device(struct skd_device *skdev)
  3038. {
  3039. unsigned long flags;
  3040. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  3041. u32 dev_state;
  3042. int i;
  3043. spin_lock_irqsave(&skdev->lock, flags);
  3044. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  3045. pr_err("(%s): skd_stop_device not online no sync\n",
  3046. skd_name(skdev));
  3047. goto stop_out;
  3048. }
  3049. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  3050. pr_err("(%s): skd_stop_device no special\n",
  3051. skd_name(skdev));
  3052. goto stop_out;
  3053. }
  3054. skdev->state = SKD_DRVR_STATE_SYNCING;
  3055. skdev->sync_done = 0;
  3056. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  3057. spin_unlock_irqrestore(&skdev->lock, flags);
  3058. wait_event_interruptible_timeout(skdev->waitq,
  3059. (skdev->sync_done), (10 * HZ));
  3060. spin_lock_irqsave(&skdev->lock, flags);
  3061. switch (skdev->sync_done) {
  3062. case 0:
  3063. pr_err("(%s): skd_stop_device no sync\n",
  3064. skd_name(skdev));
  3065. break;
  3066. case 1:
  3067. pr_err("(%s): skd_stop_device sync done\n",
  3068. skd_name(skdev));
  3069. break;
  3070. default:
  3071. pr_err("(%s): skd_stop_device sync error\n",
  3072. skd_name(skdev));
  3073. }
  3074. stop_out:
  3075. skdev->state = SKD_DRVR_STATE_STOPPING;
  3076. spin_unlock_irqrestore(&skdev->lock, flags);
  3077. skd_kill_timer(skdev);
  3078. spin_lock_irqsave(&skdev->lock, flags);
  3079. skd_disable_interrupts(skdev);
  3080. /* ensure all ints on device are cleared */
  3081. /* soft reset the device to unload with a clean slate */
  3082. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3083. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  3084. spin_unlock_irqrestore(&skdev->lock, flags);
  3085. /* poll every 100ms, 1 second timeout */
  3086. for (i = 0; i < 10; i++) {
  3087. dev_state =
  3088. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  3089. if (dev_state == FIT_SR_DRIVE_INIT)
  3090. break;
  3091. set_current_state(TASK_INTERRUPTIBLE);
  3092. schedule_timeout(msecs_to_jiffies(100));
  3093. }
  3094. if (dev_state != FIT_SR_DRIVE_INIT)
  3095. pr_err("(%s): skd_stop_device state error 0x%02x\n",
  3096. skd_name(skdev), dev_state);
  3097. }
  3098. /* assume spinlock is held */
  3099. static void skd_restart_device(struct skd_device *skdev)
  3100. {
  3101. u32 state;
  3102. /* ack all ghost interrupts */
  3103. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3104. state = SKD_READL(skdev, FIT_STATUS);
  3105. pr_debug("%s:%s:%d drive status=0x%x\n",
  3106. skdev->name, __func__, __LINE__, state);
  3107. state &= FIT_SR_DRIVE_STATE_MASK;
  3108. skdev->drive_state = state;
  3109. skdev->last_mtd = 0;
  3110. skdev->state = SKD_DRVR_STATE_RESTARTING;
  3111. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  3112. skd_soft_reset(skdev);
  3113. }
  3114. /* assume spinlock is held */
  3115. static int skd_quiesce_dev(struct skd_device *skdev)
  3116. {
  3117. int rc = 0;
  3118. switch (skdev->state) {
  3119. case SKD_DRVR_STATE_BUSY:
  3120. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3121. pr_debug("%s:%s:%d stopping %s queue\n",
  3122. skdev->name, __func__, __LINE__, skdev->name);
  3123. blk_stop_queue(skdev->queue);
  3124. break;
  3125. case SKD_DRVR_STATE_ONLINE:
  3126. case SKD_DRVR_STATE_STOPPING:
  3127. case SKD_DRVR_STATE_SYNCING:
  3128. case SKD_DRVR_STATE_PAUSING:
  3129. case SKD_DRVR_STATE_PAUSED:
  3130. case SKD_DRVR_STATE_STARTING:
  3131. case SKD_DRVR_STATE_RESTARTING:
  3132. case SKD_DRVR_STATE_RESUMING:
  3133. default:
  3134. rc = -EINVAL;
  3135. pr_debug("%s:%s:%d state [%d] not implemented\n",
  3136. skdev->name, __func__, __LINE__, skdev->state);
  3137. }
  3138. return rc;
  3139. }
  3140. /* assume spinlock is held */
  3141. static int skd_unquiesce_dev(struct skd_device *skdev)
  3142. {
  3143. int prev_driver_state = skdev->state;
  3144. skd_log_skdev(skdev, "unquiesce");
  3145. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  3146. pr_debug("%s:%s:%d **** device already ONLINE\n",
  3147. skdev->name, __func__, __LINE__);
  3148. return 0;
  3149. }
  3150. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  3151. /*
  3152. * If there has been an state change to other than
  3153. * ONLINE, we will rely on controller state change
  3154. * to come back online and restart the queue.
  3155. * The BUSY state means that driver is ready to
  3156. * continue normal processing but waiting for controller
  3157. * to become available.
  3158. */
  3159. skdev->state = SKD_DRVR_STATE_BUSY;
  3160. pr_debug("%s:%s:%d drive BUSY state\n",
  3161. skdev->name, __func__, __LINE__);
  3162. return 0;
  3163. }
  3164. /*
  3165. * Drive has just come online, driver is either in startup,
  3166. * paused performing a task, or bust waiting for hardware.
  3167. */
  3168. switch (skdev->state) {
  3169. case SKD_DRVR_STATE_PAUSED:
  3170. case SKD_DRVR_STATE_BUSY:
  3171. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3172. case SKD_DRVR_STATE_BUSY_ERASE:
  3173. case SKD_DRVR_STATE_STARTING:
  3174. case SKD_DRVR_STATE_RESTARTING:
  3175. case SKD_DRVR_STATE_FAULT:
  3176. case SKD_DRVR_STATE_IDLE:
  3177. case SKD_DRVR_STATE_LOAD:
  3178. skdev->state = SKD_DRVR_STATE_ONLINE;
  3179. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  3180. skd_name(skdev),
  3181. skd_skdev_state_to_str(prev_driver_state),
  3182. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  3183. skdev->state);
  3184. pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
  3185. skdev->name, __func__, __LINE__);
  3186. pr_debug("%s:%s:%d starting %s queue\n",
  3187. skdev->name, __func__, __LINE__, skdev->name);
  3188. pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
  3189. blk_start_queue(skdev->queue);
  3190. skdev->gendisk_on = 1;
  3191. wake_up_interruptible(&skdev->waitq);
  3192. break;
  3193. case SKD_DRVR_STATE_DISAPPEARED:
  3194. default:
  3195. pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
  3196. skdev->name, __func__, __LINE__,
  3197. skdev->state);
  3198. return -EBUSY;
  3199. }
  3200. return 0;
  3201. }
  3202. /*
  3203. *****************************************************************************
  3204. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  3205. *****************************************************************************
  3206. */
  3207. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  3208. {
  3209. struct skd_device *skdev = skd_host_data;
  3210. unsigned long flags;
  3211. spin_lock_irqsave(&skdev->lock, flags);
  3212. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3213. skdev->name, __func__, __LINE__,
  3214. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3215. pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
  3216. irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3217. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  3218. spin_unlock_irqrestore(&skdev->lock, flags);
  3219. return IRQ_HANDLED;
  3220. }
  3221. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  3222. {
  3223. struct skd_device *skdev = skd_host_data;
  3224. unsigned long flags;
  3225. spin_lock_irqsave(&skdev->lock, flags);
  3226. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3227. skdev->name, __func__, __LINE__,
  3228. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3229. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  3230. skd_isr_fwstate(skdev);
  3231. spin_unlock_irqrestore(&skdev->lock, flags);
  3232. return IRQ_HANDLED;
  3233. }
  3234. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  3235. {
  3236. struct skd_device *skdev = skd_host_data;
  3237. unsigned long flags;
  3238. int flush_enqueued = 0;
  3239. int deferred;
  3240. spin_lock_irqsave(&skdev->lock, flags);
  3241. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3242. skdev->name, __func__, __LINE__,
  3243. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3244. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  3245. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  3246. &flush_enqueued);
  3247. if (flush_enqueued)
  3248. skd_request_fn(skdev->queue);
  3249. if (deferred)
  3250. schedule_work(&skdev->completion_worker);
  3251. else if (!flush_enqueued)
  3252. skd_request_fn(skdev->queue);
  3253. spin_unlock_irqrestore(&skdev->lock, flags);
  3254. return IRQ_HANDLED;
  3255. }
  3256. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  3257. {
  3258. struct skd_device *skdev = skd_host_data;
  3259. unsigned long flags;
  3260. spin_lock_irqsave(&skdev->lock, flags);
  3261. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3262. skdev->name, __func__, __LINE__,
  3263. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3264. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  3265. skd_isr_msg_from_dev(skdev);
  3266. spin_unlock_irqrestore(&skdev->lock, flags);
  3267. return IRQ_HANDLED;
  3268. }
  3269. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  3270. {
  3271. struct skd_device *skdev = skd_host_data;
  3272. unsigned long flags;
  3273. spin_lock_irqsave(&skdev->lock, flags);
  3274. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3275. skdev->name, __func__, __LINE__,
  3276. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3277. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  3278. spin_unlock_irqrestore(&skdev->lock, flags);
  3279. return IRQ_HANDLED;
  3280. }
  3281. /*
  3282. *****************************************************************************
  3283. * PCIe MSI/MSI-X SETUP
  3284. *****************************************************************************
  3285. */
  3286. struct skd_msix_entry {
  3287. int have_irq;
  3288. u32 vector;
  3289. u32 entry;
  3290. struct skd_device *rsp;
  3291. char isr_name[30];
  3292. };
  3293. struct skd_init_msix_entry {
  3294. const char *name;
  3295. irq_handler_t handler;
  3296. };
  3297. #define SKD_MAX_MSIX_COUNT 13
  3298. #define SKD_MIN_MSIX_COUNT 7
  3299. #define SKD_BASE_MSIX_IRQ 4
  3300. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  3301. { "(DMA 0)", skd_reserved_isr },
  3302. { "(DMA 1)", skd_reserved_isr },
  3303. { "(DMA 2)", skd_reserved_isr },
  3304. { "(DMA 3)", skd_reserved_isr },
  3305. { "(State Change)", skd_statec_isr },
  3306. { "(COMPL_Q)", skd_comp_q },
  3307. { "(MSG)", skd_msg_isr },
  3308. { "(Reserved)", skd_reserved_isr },
  3309. { "(Reserved)", skd_reserved_isr },
  3310. { "(Queue Full 0)", skd_qfull_isr },
  3311. { "(Queue Full 1)", skd_qfull_isr },
  3312. { "(Queue Full 2)", skd_qfull_isr },
  3313. { "(Queue Full 3)", skd_qfull_isr },
  3314. };
  3315. static void skd_release_msix(struct skd_device *skdev)
  3316. {
  3317. struct skd_msix_entry *qentry;
  3318. int i;
  3319. if (skdev->msix_entries == NULL)
  3320. return;
  3321. for (i = 0; i < skdev->msix_count; i++) {
  3322. qentry = &skdev->msix_entries[i];
  3323. skdev = qentry->rsp;
  3324. if (qentry->have_irq)
  3325. devm_free_irq(&skdev->pdev->dev,
  3326. qentry->vector, qentry->rsp);
  3327. }
  3328. pci_disable_msix(skdev->pdev);
  3329. kfree(skdev->msix_entries);
  3330. skdev->msix_count = 0;
  3331. skdev->msix_entries = NULL;
  3332. }
  3333. static int skd_acquire_msix(struct skd_device *skdev)
  3334. {
  3335. int i, rc;
  3336. struct pci_dev *pdev;
  3337. struct msix_entry *entries = NULL;
  3338. struct skd_msix_entry *qentry;
  3339. pdev = skdev->pdev;
  3340. skdev->msix_count = SKD_MAX_MSIX_COUNT;
  3341. entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
  3342. GFP_KERNEL);
  3343. if (!entries)
  3344. return -ENOMEM;
  3345. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
  3346. entries[i].entry = i;
  3347. rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
  3348. if (rc < 0)
  3349. goto msix_out;
  3350. if (rc) {
  3351. if (rc < SKD_MIN_MSIX_COUNT) {
  3352. pr_err("(%s): failed to enable MSI-X %d\n",
  3353. skd_name(skdev), rc);
  3354. goto msix_out;
  3355. }
  3356. pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
  3357. skdev->name, __func__, __LINE__,
  3358. pci_name(pdev), skdev->name, rc);
  3359. skdev->msix_count = rc;
  3360. rc = pci_enable_msix(pdev, entries, skdev->msix_count);
  3361. if (rc) {
  3362. pr_err("(%s): failed to enable MSI-X "
  3363. "support (%d) %d\n",
  3364. skd_name(skdev), skdev->msix_count, rc);
  3365. goto msix_out;
  3366. }
  3367. }
  3368. skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
  3369. skdev->msix_count, GFP_KERNEL);
  3370. if (!skdev->msix_entries) {
  3371. rc = -ENOMEM;
  3372. skdev->msix_count = 0;
  3373. pr_err("(%s): msix table allocation error\n",
  3374. skd_name(skdev));
  3375. goto msix_out;
  3376. }
  3377. qentry = skdev->msix_entries;
  3378. for (i = 0; i < skdev->msix_count; i++) {
  3379. qentry->vector = entries[i].vector;
  3380. qentry->entry = entries[i].entry;
  3381. qentry->rsp = NULL;
  3382. qentry->have_irq = 0;
  3383. pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
  3384. skdev->name, __func__, __LINE__,
  3385. pci_name(pdev), skdev->name,
  3386. i, qentry->vector, qentry->entry);
  3387. qentry++;
  3388. }
  3389. /* Enable MSI-X vectors for the base queue */
  3390. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  3391. qentry = &skdev->msix_entries[i];
  3392. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  3393. "%s%d-msix %s", DRV_NAME, skdev->devno,
  3394. msix_entries[i].name);
  3395. rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
  3396. msix_entries[i].handler, 0,
  3397. qentry->isr_name, skdev);
  3398. if (rc) {
  3399. pr_err("(%s): Unable to register(%d) MSI-X "
  3400. "handler %d: %s\n",
  3401. skd_name(skdev), rc, i, qentry->isr_name);
  3402. goto msix_out;
  3403. } else {
  3404. qentry->have_irq = 1;
  3405. qentry->rsp = skdev;
  3406. }
  3407. }
  3408. pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
  3409. skdev->name, __func__, __LINE__,
  3410. pci_name(pdev), skdev->name, skdev->msix_count);
  3411. return 0;
  3412. msix_out:
  3413. if (entries)
  3414. kfree(entries);
  3415. skd_release_msix(skdev);
  3416. return rc;
  3417. }
  3418. static int skd_acquire_irq(struct skd_device *skdev)
  3419. {
  3420. int rc;
  3421. struct pci_dev *pdev;
  3422. pdev = skdev->pdev;
  3423. skdev->msix_count = 0;
  3424. RETRY_IRQ_TYPE:
  3425. switch (skdev->irq_type) {
  3426. case SKD_IRQ_MSIX:
  3427. rc = skd_acquire_msix(skdev);
  3428. if (!rc)
  3429. pr_info("(%s): MSI-X %d irqs enabled\n",
  3430. skd_name(skdev), skdev->msix_count);
  3431. else {
  3432. pr_err(
  3433. "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
  3434. skd_name(skdev), rc);
  3435. skdev->irq_type = SKD_IRQ_MSI;
  3436. goto RETRY_IRQ_TYPE;
  3437. }
  3438. break;
  3439. case SKD_IRQ_MSI:
  3440. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
  3441. DRV_NAME, skdev->devno);
  3442. rc = pci_enable_msi(pdev);
  3443. if (!rc) {
  3444. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
  3445. skdev->isr_name, skdev);
  3446. if (rc) {
  3447. pci_disable_msi(pdev);
  3448. pr_err(
  3449. "(%s): failed to allocate the MSI interrupt %d\n",
  3450. skd_name(skdev), rc);
  3451. goto RETRY_IRQ_LEGACY;
  3452. }
  3453. pr_info("(%s): MSI irq %d enabled\n",
  3454. skd_name(skdev), pdev->irq);
  3455. } else {
  3456. RETRY_IRQ_LEGACY:
  3457. pr_err(
  3458. "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
  3459. skd_name(skdev), rc);
  3460. skdev->irq_type = SKD_IRQ_LEGACY;
  3461. goto RETRY_IRQ_TYPE;
  3462. }
  3463. break;
  3464. case SKD_IRQ_LEGACY:
  3465. snprintf(skdev->isr_name, sizeof(skdev->isr_name),
  3466. "%s%d-legacy", DRV_NAME, skdev->devno);
  3467. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  3468. IRQF_SHARED, skdev->isr_name, skdev);
  3469. if (!rc)
  3470. pr_info("(%s): LEGACY irq %d enabled\n",
  3471. skd_name(skdev), pdev->irq);
  3472. else
  3473. pr_err("(%s): request LEGACY irq error %d\n",
  3474. skd_name(skdev), rc);
  3475. break;
  3476. default:
  3477. pr_info("(%s): irq_type %d invalid, re-set to %d\n",
  3478. skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
  3479. skdev->irq_type = SKD_IRQ_LEGACY;
  3480. goto RETRY_IRQ_TYPE;
  3481. }
  3482. return rc;
  3483. }
  3484. static void skd_release_irq(struct skd_device *skdev)
  3485. {
  3486. switch (skdev->irq_type) {
  3487. case SKD_IRQ_MSIX:
  3488. skd_release_msix(skdev);
  3489. break;
  3490. case SKD_IRQ_MSI:
  3491. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3492. pci_disable_msi(skdev->pdev);
  3493. break;
  3494. case SKD_IRQ_LEGACY:
  3495. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3496. break;
  3497. default:
  3498. pr_err("(%s): wrong irq type %d!",
  3499. skd_name(skdev), skdev->irq_type);
  3500. break;
  3501. }
  3502. }
  3503. /*
  3504. *****************************************************************************
  3505. * CONSTRUCT
  3506. *****************************************************************************
  3507. */
  3508. static int skd_cons_skcomp(struct skd_device *skdev);
  3509. static int skd_cons_skmsg(struct skd_device *skdev);
  3510. static int skd_cons_skreq(struct skd_device *skdev);
  3511. static int skd_cons_skspcl(struct skd_device *skdev);
  3512. static int skd_cons_sksb(struct skd_device *skdev);
  3513. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3514. u32 n_sg,
  3515. dma_addr_t *ret_dma_addr);
  3516. static int skd_cons_disk(struct skd_device *skdev);
  3517. #define SKD_N_DEV_TABLE 16u
  3518. static u32 skd_next_devno;
  3519. static struct skd_device *skd_construct(struct pci_dev *pdev)
  3520. {
  3521. struct skd_device *skdev;
  3522. int blk_major = skd_major;
  3523. int rc;
  3524. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  3525. if (!skdev) {
  3526. pr_err(PFX "(%s): memory alloc failure\n",
  3527. pci_name(pdev));
  3528. return NULL;
  3529. }
  3530. skdev->state = SKD_DRVR_STATE_LOAD;
  3531. skdev->pdev = pdev;
  3532. skdev->devno = skd_next_devno++;
  3533. skdev->major = blk_major;
  3534. skdev->irq_type = skd_isr_type;
  3535. sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
  3536. skdev->dev_max_queue_depth = 0;
  3537. skdev->num_req_context = skd_max_queue_depth;
  3538. skdev->num_fitmsg_context = skd_max_queue_depth;
  3539. skdev->n_special = skd_max_pass_thru;
  3540. skdev->cur_max_queue_depth = 1;
  3541. skdev->queue_low_water_mark = 1;
  3542. skdev->proto_ver = 99;
  3543. skdev->sgs_per_request = skd_sgs_per_request;
  3544. skdev->dbg_level = skd_dbg_level;
  3545. atomic_set(&skdev->device_count, 0);
  3546. spin_lock_init(&skdev->lock);
  3547. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  3548. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3549. rc = skd_cons_skcomp(skdev);
  3550. if (rc < 0)
  3551. goto err_out;
  3552. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3553. rc = skd_cons_skmsg(skdev);
  3554. if (rc < 0)
  3555. goto err_out;
  3556. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3557. rc = skd_cons_skreq(skdev);
  3558. if (rc < 0)
  3559. goto err_out;
  3560. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3561. rc = skd_cons_skspcl(skdev);
  3562. if (rc < 0)
  3563. goto err_out;
  3564. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3565. rc = skd_cons_sksb(skdev);
  3566. if (rc < 0)
  3567. goto err_out;
  3568. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3569. rc = skd_cons_disk(skdev);
  3570. if (rc < 0)
  3571. goto err_out;
  3572. pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
  3573. return skdev;
  3574. err_out:
  3575. pr_debug("%s:%s:%d construct failed\n",
  3576. skdev->name, __func__, __LINE__);
  3577. skd_destruct(skdev);
  3578. return NULL;
  3579. }
  3580. static int skd_cons_skcomp(struct skd_device *skdev)
  3581. {
  3582. int rc = 0;
  3583. struct fit_completion_entry_v1 *skcomp;
  3584. u32 nbytes;
  3585. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  3586. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  3587. pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
  3588. skdev->name, __func__, __LINE__,
  3589. nbytes, SKD_N_COMPLETION_ENTRY);
  3590. skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
  3591. &skdev->cq_dma_address);
  3592. if (skcomp == NULL) {
  3593. rc = -ENOMEM;
  3594. goto err_out;
  3595. }
  3596. memset(skcomp, 0, nbytes);
  3597. skdev->skcomp_table = skcomp;
  3598. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  3599. sizeof(*skcomp) *
  3600. SKD_N_COMPLETION_ENTRY);
  3601. err_out:
  3602. return rc;
  3603. }
  3604. static int skd_cons_skmsg(struct skd_device *skdev)
  3605. {
  3606. int rc = 0;
  3607. u32 i;
  3608. pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
  3609. skdev->name, __func__, __LINE__,
  3610. sizeof(struct skd_fitmsg_context),
  3611. skdev->num_fitmsg_context,
  3612. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  3613. skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
  3614. *skdev->num_fitmsg_context, GFP_KERNEL);
  3615. if (skdev->skmsg_table == NULL) {
  3616. rc = -ENOMEM;
  3617. goto err_out;
  3618. }
  3619. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3620. struct skd_fitmsg_context *skmsg;
  3621. skmsg = &skdev->skmsg_table[i];
  3622. skmsg->id = i + SKD_ID_FIT_MSG;
  3623. skmsg->state = SKD_MSG_STATE_IDLE;
  3624. skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
  3625. SKD_N_FITMSG_BYTES + 64,
  3626. &skmsg->mb_dma_address);
  3627. if (skmsg->msg_buf == NULL) {
  3628. rc = -ENOMEM;
  3629. goto err_out;
  3630. }
  3631. skmsg->offset = (u32)((u64)skmsg->msg_buf &
  3632. (~FIT_QCMD_BASE_ADDRESS_MASK));
  3633. skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3634. skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
  3635. FIT_QCMD_BASE_ADDRESS_MASK);
  3636. skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3637. skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
  3638. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  3639. skmsg->next = &skmsg[1];
  3640. }
  3641. /* Free list is in order starting with the 0th entry. */
  3642. skdev->skmsg_table[i - 1].next = NULL;
  3643. skdev->skmsg_free_list = skdev->skmsg_table;
  3644. err_out:
  3645. return rc;
  3646. }
  3647. static int skd_cons_skreq(struct skd_device *skdev)
  3648. {
  3649. int rc = 0;
  3650. u32 i;
  3651. pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
  3652. skdev->name, __func__, __LINE__,
  3653. sizeof(struct skd_request_context),
  3654. skdev->num_req_context,
  3655. sizeof(struct skd_request_context) * skdev->num_req_context);
  3656. skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
  3657. * skdev->num_req_context, GFP_KERNEL);
  3658. if (skdev->skreq_table == NULL) {
  3659. rc = -ENOMEM;
  3660. goto err_out;
  3661. }
  3662. pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
  3663. skdev->name, __func__, __LINE__,
  3664. skdev->sgs_per_request, sizeof(struct scatterlist),
  3665. skdev->sgs_per_request * sizeof(struct scatterlist));
  3666. for (i = 0; i < skdev->num_req_context; i++) {
  3667. struct skd_request_context *skreq;
  3668. skreq = &skdev->skreq_table[i];
  3669. skreq->id = i + SKD_ID_RW_REQUEST;
  3670. skreq->state = SKD_REQ_STATE_IDLE;
  3671. skreq->sg = kzalloc(sizeof(struct scatterlist) *
  3672. skdev->sgs_per_request, GFP_KERNEL);
  3673. if (skreq->sg == NULL) {
  3674. rc = -ENOMEM;
  3675. goto err_out;
  3676. }
  3677. sg_init_table(skreq->sg, skdev->sgs_per_request);
  3678. skreq->sksg_list = skd_cons_sg_list(skdev,
  3679. skdev->sgs_per_request,
  3680. &skreq->sksg_dma_address);
  3681. if (skreq->sksg_list == NULL) {
  3682. rc = -ENOMEM;
  3683. goto err_out;
  3684. }
  3685. skreq->next = &skreq[1];
  3686. }
  3687. /* Free list is in order starting with the 0th entry. */
  3688. skdev->skreq_table[i - 1].next = NULL;
  3689. skdev->skreq_free_list = skdev->skreq_table;
  3690. err_out:
  3691. return rc;
  3692. }
  3693. static int skd_cons_skspcl(struct skd_device *skdev)
  3694. {
  3695. int rc = 0;
  3696. u32 i, nbytes;
  3697. pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
  3698. skdev->name, __func__, __LINE__,
  3699. sizeof(struct skd_special_context),
  3700. skdev->n_special,
  3701. sizeof(struct skd_special_context) * skdev->n_special);
  3702. skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
  3703. * skdev->n_special, GFP_KERNEL);
  3704. if (skdev->skspcl_table == NULL) {
  3705. rc = -ENOMEM;
  3706. goto err_out;
  3707. }
  3708. for (i = 0; i < skdev->n_special; i++) {
  3709. struct skd_special_context *skspcl;
  3710. skspcl = &skdev->skspcl_table[i];
  3711. skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
  3712. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3713. skspcl->req.next = &skspcl[1].req;
  3714. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3715. skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
  3716. &skspcl->mb_dma_address);
  3717. if (skspcl->msg_buf == NULL) {
  3718. rc = -ENOMEM;
  3719. goto err_out;
  3720. }
  3721. memset(skspcl->msg_buf, 0, nbytes);
  3722. skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
  3723. SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
  3724. if (skspcl->req.sg == NULL) {
  3725. rc = -ENOMEM;
  3726. goto err_out;
  3727. }
  3728. skspcl->req.sksg_list = skd_cons_sg_list(skdev,
  3729. SKD_N_SG_PER_SPECIAL,
  3730. &skspcl->req.
  3731. sksg_dma_address);
  3732. if (skspcl->req.sksg_list == NULL) {
  3733. rc = -ENOMEM;
  3734. goto err_out;
  3735. }
  3736. }
  3737. /* Free list is in order starting with the 0th entry. */
  3738. skdev->skspcl_table[i - 1].req.next = NULL;
  3739. skdev->skspcl_free_list = skdev->skspcl_table;
  3740. return rc;
  3741. err_out:
  3742. return rc;
  3743. }
  3744. static int skd_cons_sksb(struct skd_device *skdev)
  3745. {
  3746. int rc = 0;
  3747. struct skd_special_context *skspcl;
  3748. u32 nbytes;
  3749. skspcl = &skdev->internal_skspcl;
  3750. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  3751. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3752. nbytes = SKD_N_INTERNAL_BYTES;
  3753. skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
  3754. &skspcl->db_dma_address);
  3755. if (skspcl->data_buf == NULL) {
  3756. rc = -ENOMEM;
  3757. goto err_out;
  3758. }
  3759. memset(skspcl->data_buf, 0, nbytes);
  3760. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3761. skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
  3762. &skspcl->mb_dma_address);
  3763. if (skspcl->msg_buf == NULL) {
  3764. rc = -ENOMEM;
  3765. goto err_out;
  3766. }
  3767. memset(skspcl->msg_buf, 0, nbytes);
  3768. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  3769. &skspcl->req.sksg_dma_address);
  3770. if (skspcl->req.sksg_list == NULL) {
  3771. rc = -ENOMEM;
  3772. goto err_out;
  3773. }
  3774. if (!skd_format_internal_skspcl(skdev)) {
  3775. rc = -EINVAL;
  3776. goto err_out;
  3777. }
  3778. err_out:
  3779. return rc;
  3780. }
  3781. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3782. u32 n_sg,
  3783. dma_addr_t *ret_dma_addr)
  3784. {
  3785. struct fit_sg_descriptor *sg_list;
  3786. u32 nbytes;
  3787. nbytes = sizeof(*sg_list) * n_sg;
  3788. sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
  3789. if (sg_list != NULL) {
  3790. uint64_t dma_address = *ret_dma_addr;
  3791. u32 i;
  3792. memset(sg_list, 0, nbytes);
  3793. for (i = 0; i < n_sg - 1; i++) {
  3794. uint64_t ndp_off;
  3795. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  3796. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  3797. }
  3798. sg_list[i].next_desc_ptr = 0LL;
  3799. }
  3800. return sg_list;
  3801. }
  3802. static int skd_cons_disk(struct skd_device *skdev)
  3803. {
  3804. int rc = 0;
  3805. struct gendisk *disk;
  3806. struct request_queue *q;
  3807. unsigned long flags;
  3808. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  3809. if (!disk) {
  3810. rc = -ENOMEM;
  3811. goto err_out;
  3812. }
  3813. skdev->disk = disk;
  3814. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  3815. disk->major = skdev->major;
  3816. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  3817. disk->fops = &skd_blockdev_ops;
  3818. disk->private_data = skdev;
  3819. q = blk_init_queue(skd_request_fn, &skdev->lock);
  3820. if (!q) {
  3821. rc = -ENOMEM;
  3822. goto err_out;
  3823. }
  3824. skdev->queue = q;
  3825. disk->queue = q;
  3826. q->queuedata = skdev;
  3827. blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
  3828. blk_queue_max_segments(q, skdev->sgs_per_request);
  3829. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  3830. /* set sysfs ptimal_io_size to 8K */
  3831. blk_queue_io_opt(q, 8192);
  3832. /* DISCARD Flag initialization. */
  3833. q->limits.discard_granularity = 8192;
  3834. q->limits.discard_alignment = 0;
  3835. q->limits.max_discard_sectors = UINT_MAX >> 9;
  3836. q->limits.discard_zeroes_data = 1;
  3837. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  3838. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  3839. spin_lock_irqsave(&skdev->lock, flags);
  3840. pr_debug("%s:%s:%d stopping %s queue\n",
  3841. skdev->name, __func__, __LINE__, skdev->name);
  3842. blk_stop_queue(skdev->queue);
  3843. spin_unlock_irqrestore(&skdev->lock, flags);
  3844. err_out:
  3845. return rc;
  3846. }
  3847. /*
  3848. *****************************************************************************
  3849. * DESTRUCT (FREE)
  3850. *****************************************************************************
  3851. */
  3852. static void skd_free_skcomp(struct skd_device *skdev);
  3853. static void skd_free_skmsg(struct skd_device *skdev);
  3854. static void skd_free_skreq(struct skd_device *skdev);
  3855. static void skd_free_skspcl(struct skd_device *skdev);
  3856. static void skd_free_sksb(struct skd_device *skdev);
  3857. static void skd_free_sg_list(struct skd_device *skdev,
  3858. struct fit_sg_descriptor *sg_list,
  3859. u32 n_sg, dma_addr_t dma_addr);
  3860. static void skd_free_disk(struct skd_device *skdev);
  3861. static void skd_destruct(struct skd_device *skdev)
  3862. {
  3863. if (skdev == NULL)
  3864. return;
  3865. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3866. skd_free_disk(skdev);
  3867. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3868. skd_free_sksb(skdev);
  3869. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3870. skd_free_skspcl(skdev);
  3871. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3872. skd_free_skreq(skdev);
  3873. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3874. skd_free_skmsg(skdev);
  3875. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3876. skd_free_skcomp(skdev);
  3877. pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
  3878. kfree(skdev);
  3879. }
  3880. static void skd_free_skcomp(struct skd_device *skdev)
  3881. {
  3882. if (skdev->skcomp_table != NULL) {
  3883. u32 nbytes;
  3884. nbytes = sizeof(skdev->skcomp_table[0]) *
  3885. SKD_N_COMPLETION_ENTRY;
  3886. pci_free_consistent(skdev->pdev, nbytes,
  3887. skdev->skcomp_table, skdev->cq_dma_address);
  3888. }
  3889. skdev->skcomp_table = NULL;
  3890. skdev->cq_dma_address = 0;
  3891. }
  3892. static void skd_free_skmsg(struct skd_device *skdev)
  3893. {
  3894. u32 i;
  3895. if (skdev->skmsg_table == NULL)
  3896. return;
  3897. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3898. struct skd_fitmsg_context *skmsg;
  3899. skmsg = &skdev->skmsg_table[i];
  3900. if (skmsg->msg_buf != NULL) {
  3901. skmsg->msg_buf += skmsg->offset;
  3902. skmsg->mb_dma_address += skmsg->offset;
  3903. pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
  3904. skmsg->msg_buf,
  3905. skmsg->mb_dma_address);
  3906. }
  3907. skmsg->msg_buf = NULL;
  3908. skmsg->mb_dma_address = 0;
  3909. }
  3910. kfree(skdev->skmsg_table);
  3911. skdev->skmsg_table = NULL;
  3912. }
  3913. static void skd_free_skreq(struct skd_device *skdev)
  3914. {
  3915. u32 i;
  3916. if (skdev->skreq_table == NULL)
  3917. return;
  3918. for (i = 0; i < skdev->num_req_context; i++) {
  3919. struct skd_request_context *skreq;
  3920. skreq = &skdev->skreq_table[i];
  3921. skd_free_sg_list(skdev, skreq->sksg_list,
  3922. skdev->sgs_per_request,
  3923. skreq->sksg_dma_address);
  3924. skreq->sksg_list = NULL;
  3925. skreq->sksg_dma_address = 0;
  3926. kfree(skreq->sg);
  3927. }
  3928. kfree(skdev->skreq_table);
  3929. skdev->skreq_table = NULL;
  3930. }
  3931. static void skd_free_skspcl(struct skd_device *skdev)
  3932. {
  3933. u32 i;
  3934. u32 nbytes;
  3935. if (skdev->skspcl_table == NULL)
  3936. return;
  3937. for (i = 0; i < skdev->n_special; i++) {
  3938. struct skd_special_context *skspcl;
  3939. skspcl = &skdev->skspcl_table[i];
  3940. if (skspcl->msg_buf != NULL) {
  3941. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3942. pci_free_consistent(skdev->pdev, nbytes,
  3943. skspcl->msg_buf,
  3944. skspcl->mb_dma_address);
  3945. }
  3946. skspcl->msg_buf = NULL;
  3947. skspcl->mb_dma_address = 0;
  3948. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  3949. SKD_N_SG_PER_SPECIAL,
  3950. skspcl->req.sksg_dma_address);
  3951. skspcl->req.sksg_list = NULL;
  3952. skspcl->req.sksg_dma_address = 0;
  3953. kfree(skspcl->req.sg);
  3954. }
  3955. kfree(skdev->skspcl_table);
  3956. skdev->skspcl_table = NULL;
  3957. }
  3958. static void skd_free_sksb(struct skd_device *skdev)
  3959. {
  3960. struct skd_special_context *skspcl;
  3961. u32 nbytes;
  3962. skspcl = &skdev->internal_skspcl;
  3963. if (skspcl->data_buf != NULL) {
  3964. nbytes = SKD_N_INTERNAL_BYTES;
  3965. pci_free_consistent(skdev->pdev, nbytes,
  3966. skspcl->data_buf, skspcl->db_dma_address);
  3967. }
  3968. skspcl->data_buf = NULL;
  3969. skspcl->db_dma_address = 0;
  3970. if (skspcl->msg_buf != NULL) {
  3971. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3972. pci_free_consistent(skdev->pdev, nbytes,
  3973. skspcl->msg_buf, skspcl->mb_dma_address);
  3974. }
  3975. skspcl->msg_buf = NULL;
  3976. skspcl->mb_dma_address = 0;
  3977. skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
  3978. skspcl->req.sksg_dma_address);
  3979. skspcl->req.sksg_list = NULL;
  3980. skspcl->req.sksg_dma_address = 0;
  3981. }
  3982. static void skd_free_sg_list(struct skd_device *skdev,
  3983. struct fit_sg_descriptor *sg_list,
  3984. u32 n_sg, dma_addr_t dma_addr)
  3985. {
  3986. if (sg_list != NULL) {
  3987. u32 nbytes;
  3988. nbytes = sizeof(*sg_list) * n_sg;
  3989. pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
  3990. }
  3991. }
  3992. static void skd_free_disk(struct skd_device *skdev)
  3993. {
  3994. struct gendisk *disk = skdev->disk;
  3995. if (disk != NULL) {
  3996. struct request_queue *q = disk->queue;
  3997. if (disk->flags & GENHD_FL_UP)
  3998. del_gendisk(disk);
  3999. if (q)
  4000. blk_cleanup_queue(q);
  4001. put_disk(disk);
  4002. }
  4003. skdev->disk = NULL;
  4004. }
  4005. /*
  4006. *****************************************************************************
  4007. * BLOCK DEVICE (BDEV) GLUE
  4008. *****************************************************************************
  4009. */
  4010. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  4011. {
  4012. struct skd_device *skdev;
  4013. u64 capacity;
  4014. skdev = bdev->bd_disk->private_data;
  4015. pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
  4016. skdev->name, __func__, __LINE__,
  4017. bdev->bd_disk->disk_name, current->comm);
  4018. if (skdev->read_cap_is_valid) {
  4019. capacity = get_capacity(skdev->disk);
  4020. geo->heads = 64;
  4021. geo->sectors = 255;
  4022. geo->cylinders = (capacity) / (255 * 64);
  4023. return 0;
  4024. }
  4025. return -EIO;
  4026. }
  4027. static int skd_bdev_attach(struct skd_device *skdev)
  4028. {
  4029. pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
  4030. add_disk(skdev->disk);
  4031. return 0;
  4032. }
  4033. static const struct block_device_operations skd_blockdev_ops = {
  4034. .owner = THIS_MODULE,
  4035. .ioctl = skd_bdev_ioctl,
  4036. .getgeo = skd_bdev_getgeo,
  4037. };
  4038. /*
  4039. *****************************************************************************
  4040. * PCIe DRIVER GLUE
  4041. *****************************************************************************
  4042. */
  4043. static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
  4044. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  4045. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  4046. { 0 } /* terminate list */
  4047. };
  4048. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  4049. static char *skd_pci_info(struct skd_device *skdev, char *str)
  4050. {
  4051. int pcie_reg;
  4052. strcpy(str, "PCIe (");
  4053. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  4054. if (pcie_reg) {
  4055. char lwstr[6];
  4056. uint16_t pcie_lstat, lspeed, lwidth;
  4057. pcie_reg += 0x12;
  4058. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  4059. lspeed = pcie_lstat & (0xF);
  4060. lwidth = (pcie_lstat & 0x3F0) >> 4;
  4061. if (lspeed == 1)
  4062. strcat(str, "2.5GT/s ");
  4063. else if (lspeed == 2)
  4064. strcat(str, "5.0GT/s ");
  4065. else
  4066. strcat(str, "<unknown> ");
  4067. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  4068. strcat(str, lwstr);
  4069. }
  4070. return str;
  4071. }
  4072. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  4073. {
  4074. int i;
  4075. int rc = 0;
  4076. char pci_str[32];
  4077. struct skd_device *skdev;
  4078. pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
  4079. DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
  4080. pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
  4081. pci_name(pdev), pdev->vendor, pdev->device);
  4082. rc = pci_enable_device(pdev);
  4083. if (rc)
  4084. return rc;
  4085. rc = pci_request_regions(pdev, DRV_NAME);
  4086. if (rc)
  4087. goto err_out;
  4088. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4089. if (!rc) {
  4090. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4091. pr_err("(%s): consistent DMA mask error %d\n",
  4092. pci_name(pdev), rc);
  4093. }
  4094. } else {
  4095. (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
  4096. if (rc) {
  4097. pr_err("(%s): DMA mask error %d\n",
  4098. pci_name(pdev), rc);
  4099. goto err_out_regions;
  4100. }
  4101. }
  4102. skdev = skd_construct(pdev);
  4103. if (skdev == NULL) {
  4104. rc = -ENOMEM;
  4105. goto err_out_regions;
  4106. }
  4107. skd_pci_info(skdev, pci_str);
  4108. pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
  4109. pci_set_master(pdev);
  4110. rc = pci_enable_pcie_error_reporting(pdev);
  4111. if (rc) {
  4112. pr_err(
  4113. "(%s): bad enable of PCIe error reporting rc=%d\n",
  4114. skd_name(skdev), rc);
  4115. skdev->pcie_error_reporting_is_enabled = 0;
  4116. } else
  4117. skdev->pcie_error_reporting_is_enabled = 1;
  4118. pci_set_drvdata(pdev, skdev);
  4119. skdev->pdev = pdev;
  4120. skdev->disk->driverfs_dev = &pdev->dev;
  4121. for (i = 0; i < SKD_MAX_BARS; i++) {
  4122. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4123. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4124. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4125. skdev->mem_size[i]);
  4126. if (!skdev->mem_map[i]) {
  4127. pr_err("(%s): Unable to map adapter memory!\n",
  4128. skd_name(skdev));
  4129. rc = -ENODEV;
  4130. goto err_out_iounmap;
  4131. }
  4132. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4133. skdev->name, __func__, __LINE__,
  4134. skdev->mem_map[i],
  4135. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4136. }
  4137. rc = skd_acquire_irq(skdev);
  4138. if (rc) {
  4139. pr_err("(%s): interrupt resource error %d\n",
  4140. skd_name(skdev), rc);
  4141. goto err_out_iounmap;
  4142. }
  4143. rc = skd_start_timer(skdev);
  4144. if (rc)
  4145. goto err_out_timer;
  4146. init_waitqueue_head(&skdev->waitq);
  4147. skd_start_device(skdev);
  4148. rc = wait_event_interruptible_timeout(skdev->waitq,
  4149. (skdev->gendisk_on),
  4150. (SKD_START_WAIT_SECONDS * HZ));
  4151. if (skdev->gendisk_on > 0) {
  4152. /* device came on-line after reset */
  4153. skd_bdev_attach(skdev);
  4154. rc = 0;
  4155. } else {
  4156. /* we timed out, something is wrong with the device,
  4157. don't add the disk structure */
  4158. pr_err(
  4159. "(%s): error: waiting for s1120 timed out %d!\n",
  4160. skd_name(skdev), rc);
  4161. /* in case of no error; we timeout with ENXIO */
  4162. if (!rc)
  4163. rc = -ENXIO;
  4164. goto err_out_timer;
  4165. }
  4166. #ifdef SKD_VMK_POLL_HANDLER
  4167. if (skdev->irq_type == SKD_IRQ_MSIX) {
  4168. /* MSIX completion handler is being used for coredump */
  4169. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4170. skdev->msix_entries[5].vector,
  4171. skd_comp_q, skdev);
  4172. } else {
  4173. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4174. skdev->pdev->irq, skd_isr,
  4175. skdev);
  4176. }
  4177. #endif /* SKD_VMK_POLL_HANDLER */
  4178. return rc;
  4179. err_out_timer:
  4180. skd_stop_device(skdev);
  4181. skd_release_irq(skdev);
  4182. err_out_iounmap:
  4183. for (i = 0; i < SKD_MAX_BARS; i++)
  4184. if (skdev->mem_map[i])
  4185. iounmap(skdev->mem_map[i]);
  4186. if (skdev->pcie_error_reporting_is_enabled)
  4187. pci_disable_pcie_error_reporting(pdev);
  4188. skd_destruct(skdev);
  4189. err_out_regions:
  4190. pci_release_regions(pdev);
  4191. err_out:
  4192. pci_disable_device(pdev);
  4193. pci_set_drvdata(pdev, NULL);
  4194. return rc;
  4195. }
  4196. static void skd_pci_remove(struct pci_dev *pdev)
  4197. {
  4198. int i;
  4199. struct skd_device *skdev;
  4200. skdev = pci_get_drvdata(pdev);
  4201. if (!skdev) {
  4202. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4203. return;
  4204. }
  4205. skd_stop_device(skdev);
  4206. skd_release_irq(skdev);
  4207. for (i = 0; i < SKD_MAX_BARS; i++)
  4208. if (skdev->mem_map[i])
  4209. iounmap((u32 *)skdev->mem_map[i]);
  4210. if (skdev->pcie_error_reporting_is_enabled)
  4211. pci_disable_pcie_error_reporting(pdev);
  4212. skd_destruct(skdev);
  4213. pci_release_regions(pdev);
  4214. pci_disable_device(pdev);
  4215. pci_set_drvdata(pdev, NULL);
  4216. return;
  4217. }
  4218. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  4219. {
  4220. int i;
  4221. struct skd_device *skdev;
  4222. skdev = pci_get_drvdata(pdev);
  4223. if (!skdev) {
  4224. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4225. return -EIO;
  4226. }
  4227. skd_stop_device(skdev);
  4228. skd_release_irq(skdev);
  4229. for (i = 0; i < SKD_MAX_BARS; i++)
  4230. if (skdev->mem_map[i])
  4231. iounmap((u32 *)skdev->mem_map[i]);
  4232. if (skdev->pcie_error_reporting_is_enabled)
  4233. pci_disable_pcie_error_reporting(pdev);
  4234. pci_release_regions(pdev);
  4235. pci_save_state(pdev);
  4236. pci_disable_device(pdev);
  4237. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4238. return 0;
  4239. }
  4240. static int skd_pci_resume(struct pci_dev *pdev)
  4241. {
  4242. int i;
  4243. int rc = 0;
  4244. struct skd_device *skdev;
  4245. skdev = pci_get_drvdata(pdev);
  4246. if (!skdev) {
  4247. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4248. return -1;
  4249. }
  4250. pci_set_power_state(pdev, PCI_D0);
  4251. pci_enable_wake(pdev, PCI_D0, 0);
  4252. pci_restore_state(pdev);
  4253. rc = pci_enable_device(pdev);
  4254. if (rc)
  4255. return rc;
  4256. rc = pci_request_regions(pdev, DRV_NAME);
  4257. if (rc)
  4258. goto err_out;
  4259. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4260. if (!rc) {
  4261. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4262. pr_err("(%s): consistent DMA mask error %d\n",
  4263. pci_name(pdev), rc);
  4264. }
  4265. } else {
  4266. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4267. if (rc) {
  4268. pr_err("(%s): DMA mask error %d\n",
  4269. pci_name(pdev), rc);
  4270. goto err_out_regions;
  4271. }
  4272. }
  4273. pci_set_master(pdev);
  4274. rc = pci_enable_pcie_error_reporting(pdev);
  4275. if (rc) {
  4276. pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
  4277. skdev->name, rc);
  4278. skdev->pcie_error_reporting_is_enabled = 0;
  4279. } else
  4280. skdev->pcie_error_reporting_is_enabled = 1;
  4281. for (i = 0; i < SKD_MAX_BARS; i++) {
  4282. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4283. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4284. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4285. skdev->mem_size[i]);
  4286. if (!skdev->mem_map[i]) {
  4287. pr_err("(%s): Unable to map adapter memory!\n",
  4288. skd_name(skdev));
  4289. rc = -ENODEV;
  4290. goto err_out_iounmap;
  4291. }
  4292. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4293. skdev->name, __func__, __LINE__,
  4294. skdev->mem_map[i],
  4295. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4296. }
  4297. rc = skd_acquire_irq(skdev);
  4298. if (rc) {
  4299. pr_err("(%s): interrupt resource error %d\n",
  4300. pci_name(pdev), rc);
  4301. goto err_out_iounmap;
  4302. }
  4303. rc = skd_start_timer(skdev);
  4304. if (rc)
  4305. goto err_out_timer;
  4306. init_waitqueue_head(&skdev->waitq);
  4307. skd_start_device(skdev);
  4308. return rc;
  4309. err_out_timer:
  4310. skd_stop_device(skdev);
  4311. skd_release_irq(skdev);
  4312. err_out_iounmap:
  4313. for (i = 0; i < SKD_MAX_BARS; i++)
  4314. if (skdev->mem_map[i])
  4315. iounmap(skdev->mem_map[i]);
  4316. if (skdev->pcie_error_reporting_is_enabled)
  4317. pci_disable_pcie_error_reporting(pdev);
  4318. err_out_regions:
  4319. pci_release_regions(pdev);
  4320. err_out:
  4321. pci_disable_device(pdev);
  4322. return rc;
  4323. }
  4324. static void skd_pci_shutdown(struct pci_dev *pdev)
  4325. {
  4326. struct skd_device *skdev;
  4327. pr_err("skd_pci_shutdown called\n");
  4328. skdev = pci_get_drvdata(pdev);
  4329. if (!skdev) {
  4330. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4331. return;
  4332. }
  4333. pr_err("%s: calling stop\n", skd_name(skdev));
  4334. skd_stop_device(skdev);
  4335. }
  4336. static struct pci_driver skd_driver = {
  4337. .name = DRV_NAME,
  4338. .id_table = skd_pci_tbl,
  4339. .probe = skd_pci_probe,
  4340. .remove = skd_pci_remove,
  4341. .suspend = skd_pci_suspend,
  4342. .resume = skd_pci_resume,
  4343. .shutdown = skd_pci_shutdown,
  4344. };
  4345. /*
  4346. *****************************************************************************
  4347. * LOGGING SUPPORT
  4348. *****************************************************************************
  4349. */
  4350. static const char *skd_name(struct skd_device *skdev)
  4351. {
  4352. memset(skdev->id_str, 0, sizeof(skdev->id_str));
  4353. if (skdev->inquiry_is_valid)
  4354. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
  4355. skdev->name, skdev->inq_serial_num,
  4356. pci_name(skdev->pdev));
  4357. else
  4358. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
  4359. skdev->name, pci_name(skdev->pdev));
  4360. return skdev->id_str;
  4361. }
  4362. const char *skd_drive_state_to_str(int state)
  4363. {
  4364. switch (state) {
  4365. case FIT_SR_DRIVE_OFFLINE:
  4366. return "OFFLINE";
  4367. case FIT_SR_DRIVE_INIT:
  4368. return "INIT";
  4369. case FIT_SR_DRIVE_ONLINE:
  4370. return "ONLINE";
  4371. case FIT_SR_DRIVE_BUSY:
  4372. return "BUSY";
  4373. case FIT_SR_DRIVE_FAULT:
  4374. return "FAULT";
  4375. case FIT_SR_DRIVE_DEGRADED:
  4376. return "DEGRADED";
  4377. case FIT_SR_PCIE_LINK_DOWN:
  4378. return "INK_DOWN";
  4379. case FIT_SR_DRIVE_SOFT_RESET:
  4380. return "SOFT_RESET";
  4381. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  4382. return "NEED_FW";
  4383. case FIT_SR_DRIVE_INIT_FAULT:
  4384. return "INIT_FAULT";
  4385. case FIT_SR_DRIVE_BUSY_SANITIZE:
  4386. return "BUSY_SANITIZE";
  4387. case FIT_SR_DRIVE_BUSY_ERASE:
  4388. return "BUSY_ERASE";
  4389. case FIT_SR_DRIVE_FW_BOOTING:
  4390. return "FW_BOOTING";
  4391. default:
  4392. return "???";
  4393. }
  4394. }
  4395. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  4396. {
  4397. switch (state) {
  4398. case SKD_DRVR_STATE_LOAD:
  4399. return "LOAD";
  4400. case SKD_DRVR_STATE_IDLE:
  4401. return "IDLE";
  4402. case SKD_DRVR_STATE_BUSY:
  4403. return "BUSY";
  4404. case SKD_DRVR_STATE_STARTING:
  4405. return "STARTING";
  4406. case SKD_DRVR_STATE_ONLINE:
  4407. return "ONLINE";
  4408. case SKD_DRVR_STATE_PAUSING:
  4409. return "PAUSING";
  4410. case SKD_DRVR_STATE_PAUSED:
  4411. return "PAUSED";
  4412. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  4413. return "DRAINING_TIMEOUT";
  4414. case SKD_DRVR_STATE_RESTARTING:
  4415. return "RESTARTING";
  4416. case SKD_DRVR_STATE_RESUMING:
  4417. return "RESUMING";
  4418. case SKD_DRVR_STATE_STOPPING:
  4419. return "STOPPING";
  4420. case SKD_DRVR_STATE_SYNCING:
  4421. return "SYNCING";
  4422. case SKD_DRVR_STATE_FAULT:
  4423. return "FAULT";
  4424. case SKD_DRVR_STATE_DISAPPEARED:
  4425. return "DISAPPEARED";
  4426. case SKD_DRVR_STATE_BUSY_ERASE:
  4427. return "BUSY_ERASE";
  4428. case SKD_DRVR_STATE_BUSY_SANITIZE:
  4429. return "BUSY_SANITIZE";
  4430. case SKD_DRVR_STATE_BUSY_IMMINENT:
  4431. return "BUSY_IMMINENT";
  4432. case SKD_DRVR_STATE_WAIT_BOOT:
  4433. return "WAIT_BOOT";
  4434. default:
  4435. return "???";
  4436. }
  4437. }
  4438. const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
  4439. {
  4440. switch (state) {
  4441. case SKD_MSG_STATE_IDLE:
  4442. return "IDLE";
  4443. case SKD_MSG_STATE_BUSY:
  4444. return "BUSY";
  4445. default:
  4446. return "???";
  4447. }
  4448. }
  4449. const char *skd_skreq_state_to_str(enum skd_req_state state)
  4450. {
  4451. switch (state) {
  4452. case SKD_REQ_STATE_IDLE:
  4453. return "IDLE";
  4454. case SKD_REQ_STATE_SETUP:
  4455. return "SETUP";
  4456. case SKD_REQ_STATE_BUSY:
  4457. return "BUSY";
  4458. case SKD_REQ_STATE_COMPLETED:
  4459. return "COMPLETED";
  4460. case SKD_REQ_STATE_TIMEOUT:
  4461. return "TIMEOUT";
  4462. case SKD_REQ_STATE_ABORTED:
  4463. return "ABORTED";
  4464. default:
  4465. return "???";
  4466. }
  4467. }
  4468. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  4469. {
  4470. pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
  4471. skdev->name, __func__, __LINE__, skdev->name, skdev, event);
  4472. pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
  4473. skdev->name, __func__, __LINE__,
  4474. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  4475. skd_skdev_state_to_str(skdev->state), skdev->state);
  4476. pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
  4477. skdev->name, __func__, __LINE__,
  4478. skdev->in_flight, skdev->cur_max_queue_depth,
  4479. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  4480. pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
  4481. skdev->name, __func__, __LINE__,
  4482. skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
  4483. }
  4484. static void skd_log_skmsg(struct skd_device *skdev,
  4485. struct skd_fitmsg_context *skmsg, const char *event)
  4486. {
  4487. pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
  4488. skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
  4489. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
  4490. skdev->name, __func__, __LINE__,
  4491. skd_skmsg_state_to_str(skmsg->state), skmsg->state,
  4492. skmsg->id, skmsg->length);
  4493. }
  4494. static void skd_log_skreq(struct skd_device *skdev,
  4495. struct skd_request_context *skreq, const char *event)
  4496. {
  4497. pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
  4498. skdev->name, __func__, __LINE__, skdev->name, skreq, event);
  4499. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  4500. skdev->name, __func__, __LINE__,
  4501. skd_skreq_state_to_str(skreq->state), skreq->state,
  4502. skreq->id, skreq->fitmsg_id);
  4503. pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
  4504. skdev->name, __func__, __LINE__,
  4505. skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
  4506. if (skreq->req != NULL) {
  4507. struct request *req = skreq->req;
  4508. u32 lba = (u32)blk_rq_pos(req);
  4509. u32 count = blk_rq_sectors(req);
  4510. pr_debug("%s:%s:%d "
  4511. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
  4512. skdev->name, __func__, __LINE__,
  4513. req, lba, lba, count, count,
  4514. (int)rq_data_dir(req));
  4515. } else
  4516. pr_debug("%s:%s:%d req=NULL\n",
  4517. skdev->name, __func__, __LINE__);
  4518. }
  4519. /*
  4520. *****************************************************************************
  4521. * MODULE GLUE
  4522. *****************************************************************************
  4523. */
  4524. static int __init skd_init(void)
  4525. {
  4526. int rc = 0;
  4527. pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
  4528. switch (skd_isr_type) {
  4529. case SKD_IRQ_LEGACY:
  4530. case SKD_IRQ_MSI:
  4531. case SKD_IRQ_MSIX:
  4532. break;
  4533. default:
  4534. pr_info("skd_isr_type %d invalid, re-set to %d\n",
  4535. skd_isr_type, SKD_IRQ_DEFAULT);
  4536. skd_isr_type = SKD_IRQ_DEFAULT;
  4537. }
  4538. skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
  4539. sizeof(struct skd_flush_cmd),
  4540. 0, 0, NULL);
  4541. if (!skd_flush_slab) {
  4542. pr_err("failed to allocated flush slab.\n");
  4543. return -ENOMEM;
  4544. }
  4545. if (skd_max_queue_depth < 1
  4546. || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  4547. pr_info(
  4548. "skd_max_queue_depth %d invalid, re-set to %d\n",
  4549. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  4550. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  4551. }
  4552. if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
  4553. pr_info(
  4554. "skd_max_req_per_msg %d invalid, re-set to %d\n",
  4555. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  4556. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  4557. }
  4558. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  4559. pr_info(
  4560. "skd_sg_per_request %d invalid, re-set to %d\n",
  4561. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  4562. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  4563. }
  4564. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  4565. pr_info("skd_dbg_level %d invalid, re-set to %d\n",
  4566. skd_dbg_level, 0);
  4567. skd_dbg_level = 0;
  4568. }
  4569. if (skd_isr_comp_limit < 0) {
  4570. pr_info("skd_isr_comp_limit %d invalid, set to %d\n",
  4571. skd_isr_comp_limit, 0);
  4572. skd_isr_comp_limit = 0;
  4573. }
  4574. if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
  4575. pr_info("skd_max_pass_thru %d invalid, re-set to %d\n",
  4576. skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
  4577. skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  4578. }
  4579. /* Obtain major device number. */
  4580. rc = register_blkdev(0, DRV_NAME);
  4581. if (rc < 0)
  4582. return rc;
  4583. skd_major = rc;
  4584. return pci_register_driver(&skd_driver);
  4585. }
  4586. static void __exit skd_exit(void)
  4587. {
  4588. pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
  4589. unregister_blkdev(skd_major, DRV_NAME);
  4590. pci_unregister_driver(&skd_driver);
  4591. kmem_cache_destroy(skd_flush_slab);
  4592. }
  4593. module_init(skd_init);
  4594. module_exit(skd_exit);