skd_main.c 140 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455
  1. /* Copyright 2012 STEC, Inc.
  2. *
  3. * This file is licensed under the terms of the 3-clause
  4. * BSD License (http://opensource.org/licenses/BSD-3-Clause)
  5. * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
  6. * at your option. Both licenses are also available in the LICENSE file
  7. * distributed with this project. This file may not be copied, modified,
  8. * or distributed except in accordance with those terms.
  9. * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10. * Initial Driver Design!
  11. * Thomas Swann <tswann@stec-inc.com>
  12. * Interrupt handling.
  13. * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14. * biomode implementation.
  15. * Akhil Bhansali <abhansali@stec-inc.com>
  16. * Added support for DISCARD / FLUSH and FUA.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/sched.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/compiler.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/bitops.h>
  30. #include <linux/delay.h>
  31. #include <linux/time.h>
  32. #include <linux/hdreg.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/completion.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/version.h>
  37. #include <linux/err.h>
  38. #include <linux/scatterlist.h>
  39. #include <linux/aer.h>
  40. #include <linux/ctype.h>
  41. #include <linux/wait.h>
  42. #include <linux/uio.h>
  43. #include <scsi/scsi.h>
  44. #include <scsi/sg.h>
  45. #include <linux/io.h>
  46. #include <linux/uaccess.h>
  47. #include <asm/unaligned.h>
  48. #include "skd_s1120.h"
  49. static int skd_dbg_level;
  50. static int skd_isr_comp_limit = 4;
  51. enum {
  52. STEC_LINK_2_5GTS = 0,
  53. STEC_LINK_5GTS = 1,
  54. STEC_LINK_8GTS = 2,
  55. STEC_LINK_UNKNOWN = 0xFF
  56. };
  57. enum {
  58. SKD_FLUSH_INITIALIZER,
  59. SKD_FLUSH_ZERO_SIZE_FIRST,
  60. SKD_FLUSH_DATA_SECOND,
  61. };
  62. #define SKD_ASSERT(expr) \
  63. do { \
  64. if (unlikely(!(expr))) { \
  65. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  66. # expr, __FILE__, __func__, __LINE__); \
  67. } \
  68. } while (0)
  69. #define DRV_NAME "skd"
  70. #define DRV_VERSION "2.2.1"
  71. #define DRV_BUILD_ID "0260"
  72. #define PFX DRV_NAME ": "
  73. #define DRV_BIN_VERSION 0x100
  74. #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
  75. MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  76. MODULE_LICENSE("Dual BSD/GPL");
  77. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  78. MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  79. #define PCI_VENDOR_ID_STEC 0x1B39
  80. #define PCI_DEVICE_ID_S1120 0x0001
  81. #define SKD_FUA_NV (1 << 1)
  82. #define SKD_MINORS_PER_DEVICE 16
  83. #define SKD_MAX_QUEUE_DEPTH 200u
  84. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  85. #define SKD_N_FITMSG_BYTES (512u)
  86. #define SKD_N_SPECIAL_CONTEXT 32u
  87. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  88. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  89. * 128KB limit. That allows 4096*4K = 16M xfer size
  90. */
  91. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  92. #define SKD_N_SG_PER_SPECIAL 256u
  93. #define SKD_N_COMPLETION_ENTRY 256u
  94. #define SKD_N_READ_CAP_BYTES (8u)
  95. #define SKD_N_INTERNAL_BYTES (512u)
  96. /* 5 bits of uniqifier, 0xF800 */
  97. #define SKD_ID_INCR (0x400)
  98. #define SKD_ID_TABLE_MASK (3u << 8u)
  99. #define SKD_ID_RW_REQUEST (0u << 8u)
  100. #define SKD_ID_INTERNAL (1u << 8u)
  101. #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
  102. #define SKD_ID_FIT_MSG (3u << 8u)
  103. #define SKD_ID_SLOT_MASK 0x00FFu
  104. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  105. #define SKD_N_TIMEOUT_SLOT 4u
  106. #define SKD_TIMEOUT_SLOT_MASK 3u
  107. #define SKD_N_MAX_SECTORS 2048u
  108. #define SKD_MAX_RETRIES 2u
  109. #define SKD_TIMER_SECONDS(seconds) (seconds)
  110. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  111. #define INQ_STD_NBYTES 36
  112. #define SKD_DISCARD_CDB_LENGTH 24
  113. enum skd_drvr_state {
  114. SKD_DRVR_STATE_LOAD,
  115. SKD_DRVR_STATE_IDLE,
  116. SKD_DRVR_STATE_BUSY,
  117. SKD_DRVR_STATE_STARTING,
  118. SKD_DRVR_STATE_ONLINE,
  119. SKD_DRVR_STATE_PAUSING,
  120. SKD_DRVR_STATE_PAUSED,
  121. SKD_DRVR_STATE_DRAINING_TIMEOUT,
  122. SKD_DRVR_STATE_RESTARTING,
  123. SKD_DRVR_STATE_RESUMING,
  124. SKD_DRVR_STATE_STOPPING,
  125. SKD_DRVR_STATE_FAULT,
  126. SKD_DRVR_STATE_DISAPPEARED,
  127. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  128. SKD_DRVR_STATE_BUSY_ERASE,
  129. SKD_DRVR_STATE_BUSY_SANITIZE,
  130. SKD_DRVR_STATE_BUSY_IMMINENT,
  131. SKD_DRVR_STATE_WAIT_BOOT,
  132. SKD_DRVR_STATE_SYNCING,
  133. };
  134. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  135. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  136. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  137. #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
  138. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  139. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  140. #define SKD_START_WAIT_SECONDS 90u
  141. enum skd_req_state {
  142. SKD_REQ_STATE_IDLE,
  143. SKD_REQ_STATE_SETUP,
  144. SKD_REQ_STATE_BUSY,
  145. SKD_REQ_STATE_COMPLETED,
  146. SKD_REQ_STATE_TIMEOUT,
  147. SKD_REQ_STATE_ABORTED,
  148. };
  149. enum skd_fit_msg_state {
  150. SKD_MSG_STATE_IDLE,
  151. SKD_MSG_STATE_BUSY,
  152. };
  153. enum skd_check_status_action {
  154. SKD_CHECK_STATUS_REPORT_GOOD,
  155. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  156. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  157. SKD_CHECK_STATUS_REPORT_ERROR,
  158. SKD_CHECK_STATUS_BUSY_IMMINENT,
  159. };
  160. struct skd_fitmsg_context {
  161. enum skd_fit_msg_state state;
  162. struct skd_fitmsg_context *next;
  163. u32 id;
  164. u16 outstanding;
  165. u32 length;
  166. u32 offset;
  167. u8 *msg_buf;
  168. dma_addr_t mb_dma_address;
  169. };
  170. struct skd_request_context {
  171. enum skd_req_state state;
  172. struct skd_request_context *next;
  173. u16 id;
  174. u32 fitmsg_id;
  175. struct request *req;
  176. u8 flush_cmd;
  177. u8 discard_page;
  178. u32 timeout_stamp;
  179. u8 sg_data_dir;
  180. struct scatterlist *sg;
  181. u32 n_sg;
  182. u32 sg_byte_count;
  183. struct fit_sg_descriptor *sksg_list;
  184. dma_addr_t sksg_dma_address;
  185. struct fit_completion_entry_v1 completion;
  186. struct fit_comp_error_info err_info;
  187. };
  188. #define SKD_DATA_DIR_HOST_TO_CARD 1
  189. #define SKD_DATA_DIR_CARD_TO_HOST 2
  190. #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
  191. struct skd_special_context {
  192. struct skd_request_context req;
  193. u8 orphaned;
  194. void *data_buf;
  195. dma_addr_t db_dma_address;
  196. u8 *msg_buf;
  197. dma_addr_t mb_dma_address;
  198. };
  199. struct skd_sg_io {
  200. fmode_t mode;
  201. void __user *argp;
  202. struct sg_io_hdr sg;
  203. u8 cdb[16];
  204. u32 dxfer_len;
  205. u32 iovcnt;
  206. struct sg_iovec *iov;
  207. struct sg_iovec no_iov_iov;
  208. struct skd_special_context *skspcl;
  209. };
  210. typedef enum skd_irq_type {
  211. SKD_IRQ_LEGACY,
  212. SKD_IRQ_MSI,
  213. SKD_IRQ_MSIX
  214. } skd_irq_type_t;
  215. #define SKD_MAX_BARS 2
  216. struct skd_device {
  217. volatile void __iomem *mem_map[SKD_MAX_BARS];
  218. resource_size_t mem_phys[SKD_MAX_BARS];
  219. u32 mem_size[SKD_MAX_BARS];
  220. skd_irq_type_t irq_type;
  221. u32 msix_count;
  222. struct skd_msix_entry *msix_entries;
  223. struct pci_dev *pdev;
  224. int pcie_error_reporting_is_enabled;
  225. spinlock_t lock;
  226. struct gendisk *disk;
  227. struct request_queue *queue;
  228. struct device *class_dev;
  229. int gendisk_on;
  230. int sync_done;
  231. atomic_t device_count;
  232. u32 devno;
  233. u32 major;
  234. char name[32];
  235. char isr_name[30];
  236. enum skd_drvr_state state;
  237. u32 drive_state;
  238. u32 in_flight;
  239. u32 cur_max_queue_depth;
  240. u32 queue_low_water_mark;
  241. u32 dev_max_queue_depth;
  242. u32 num_fitmsg_context;
  243. u32 num_req_context;
  244. u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
  245. u32 timeout_stamp;
  246. struct skd_fitmsg_context *skmsg_free_list;
  247. struct skd_fitmsg_context *skmsg_table;
  248. struct skd_request_context *skreq_free_list;
  249. struct skd_request_context *skreq_table;
  250. struct skd_special_context *skspcl_free_list;
  251. struct skd_special_context *skspcl_table;
  252. struct skd_special_context internal_skspcl;
  253. u32 read_cap_blocksize;
  254. u32 read_cap_last_lba;
  255. int read_cap_is_valid;
  256. int inquiry_is_valid;
  257. u8 inq_serial_num[13]; /*12 chars plus null term */
  258. u8 id_str[80]; /* holds a composite name (pci + sernum) */
  259. u8 skcomp_cycle;
  260. u32 skcomp_ix;
  261. struct fit_completion_entry_v1 *skcomp_table;
  262. struct fit_comp_error_info *skerr_table;
  263. dma_addr_t cq_dma_address;
  264. wait_queue_head_t waitq;
  265. struct timer_list timer;
  266. u32 timer_countdown;
  267. u32 timer_substate;
  268. int n_special;
  269. int sgs_per_request;
  270. u32 last_mtd;
  271. u32 proto_ver;
  272. int dbg_level;
  273. u32 connect_time_stamp;
  274. int connect_retries;
  275. #define SKD_MAX_CONNECT_RETRIES 16
  276. u32 drive_jiffies;
  277. u32 timo_slot;
  278. struct work_struct completion_worker;
  279. };
  280. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  281. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  282. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  283. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  284. {
  285. u32 val;
  286. if (likely(skdev->dbg_level < 2))
  287. return readl(skdev->mem_map[1] + offset);
  288. else {
  289. barrier();
  290. val = readl(skdev->mem_map[1] + offset);
  291. barrier();
  292. pr_debug("%s:%s:%d offset %x = %x\n",
  293. skdev->name, __func__, __LINE__, offset, val);
  294. return val;
  295. }
  296. }
  297. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  298. u32 offset)
  299. {
  300. if (likely(skdev->dbg_level < 2)) {
  301. writel(val, skdev->mem_map[1] + offset);
  302. barrier();
  303. } else {
  304. barrier();
  305. writel(val, skdev->mem_map[1] + offset);
  306. barrier();
  307. pr_debug("%s:%s:%d offset %x = %x\n",
  308. skdev->name, __func__, __LINE__, offset, val);
  309. }
  310. }
  311. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  312. u32 offset)
  313. {
  314. if (likely(skdev->dbg_level < 2)) {
  315. writeq(val, skdev->mem_map[1] + offset);
  316. barrier();
  317. } else {
  318. barrier();
  319. writeq(val, skdev->mem_map[1] + offset);
  320. barrier();
  321. pr_debug("%s:%s:%d offset %x = %016llx\n",
  322. skdev->name, __func__, __LINE__, offset, val);
  323. }
  324. }
  325. #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
  326. static int skd_isr_type = SKD_IRQ_DEFAULT;
  327. module_param(skd_isr_type, int, 0444);
  328. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  329. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  330. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  331. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  332. module_param(skd_max_req_per_msg, int, 0444);
  333. MODULE_PARM_DESC(skd_max_req_per_msg,
  334. "Maximum SCSI requests packed in a single message."
  335. " (1-14, default==1)");
  336. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  337. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  338. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  339. module_param(skd_max_queue_depth, int, 0444);
  340. MODULE_PARM_DESC(skd_max_queue_depth,
  341. "Maximum SCSI requests issued to s1120."
  342. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  343. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  344. module_param(skd_sgs_per_request, int, 0444);
  345. MODULE_PARM_DESC(skd_sgs_per_request,
  346. "Maximum SG elements per block request."
  347. " (1-4096, default==256)");
  348. static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  349. module_param(skd_max_pass_thru, int, 0444);
  350. MODULE_PARM_DESC(skd_max_pass_thru,
  351. "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
  352. module_param(skd_dbg_level, int, 0444);
  353. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  354. module_param(skd_isr_comp_limit, int, 0444);
  355. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  356. /* Major device number dynamically assigned. */
  357. static u32 skd_major;
  358. static struct skd_device *skd_construct(struct pci_dev *pdev);
  359. static void skd_destruct(struct skd_device *skdev);
  360. static const struct block_device_operations skd_blockdev_ops;
  361. static void skd_send_fitmsg(struct skd_device *skdev,
  362. struct skd_fitmsg_context *skmsg);
  363. static void skd_send_special_fitmsg(struct skd_device *skdev,
  364. struct skd_special_context *skspcl);
  365. static void skd_request_fn(struct request_queue *rq);
  366. static void skd_end_request(struct skd_device *skdev,
  367. struct skd_request_context *skreq, int error);
  368. static int skd_preop_sg_list(struct skd_device *skdev,
  369. struct skd_request_context *skreq);
  370. static void skd_postop_sg_list(struct skd_device *skdev,
  371. struct skd_request_context *skreq);
  372. static void skd_restart_device(struct skd_device *skdev);
  373. static int skd_quiesce_dev(struct skd_device *skdev);
  374. static int skd_unquiesce_dev(struct skd_device *skdev);
  375. static void skd_release_special(struct skd_device *skdev,
  376. struct skd_special_context *skspcl);
  377. static void skd_disable_interrupts(struct skd_device *skdev);
  378. static void skd_isr_fwstate(struct skd_device *skdev);
  379. static void skd_recover_requests(struct skd_device *skdev, int requeue);
  380. static void skd_soft_reset(struct skd_device *skdev);
  381. static const char *skd_name(struct skd_device *skdev);
  382. const char *skd_drive_state_to_str(int state);
  383. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  384. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  385. static void skd_log_skmsg(struct skd_device *skdev,
  386. struct skd_fitmsg_context *skmsg, const char *event);
  387. static void skd_log_skreq(struct skd_device *skdev,
  388. struct skd_request_context *skreq, const char *event);
  389. /*
  390. *****************************************************************************
  391. * READ/WRITE REQUESTS
  392. *****************************************************************************
  393. */
  394. static void skd_fail_all_pending(struct skd_device *skdev)
  395. {
  396. struct request_queue *q = skdev->queue;
  397. struct request *req;
  398. for (;; ) {
  399. req = blk_peek_request(q);
  400. if (req == NULL)
  401. break;
  402. blk_start_request(req);
  403. __blk_end_request_all(req, -EIO);
  404. }
  405. }
  406. static void
  407. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  408. int data_dir, unsigned lba,
  409. unsigned count)
  410. {
  411. if (data_dir == READ)
  412. scsi_req->cdb[0] = 0x28;
  413. else
  414. scsi_req->cdb[0] = 0x2a;
  415. scsi_req->cdb[1] = 0;
  416. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  417. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  418. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  419. scsi_req->cdb[5] = (lba & 0xff);
  420. scsi_req->cdb[6] = 0;
  421. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  422. scsi_req->cdb[8] = count & 0xff;
  423. scsi_req->cdb[9] = 0;
  424. }
  425. static void
  426. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  427. struct skd_request_context *skreq)
  428. {
  429. skreq->flush_cmd = 1;
  430. scsi_req->cdb[0] = 0x35;
  431. scsi_req->cdb[1] = 0;
  432. scsi_req->cdb[2] = 0;
  433. scsi_req->cdb[3] = 0;
  434. scsi_req->cdb[4] = 0;
  435. scsi_req->cdb[5] = 0;
  436. scsi_req->cdb[6] = 0;
  437. scsi_req->cdb[7] = 0;
  438. scsi_req->cdb[8] = 0;
  439. scsi_req->cdb[9] = 0;
  440. }
  441. static void
  442. skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
  443. struct skd_request_context *skreq,
  444. struct page *page,
  445. u32 lba, u32 count)
  446. {
  447. char *buf;
  448. unsigned long len;
  449. struct request *req;
  450. buf = page_address(page);
  451. len = SKD_DISCARD_CDB_LENGTH;
  452. scsi_req->cdb[0] = UNMAP;
  453. scsi_req->cdb[8] = len;
  454. put_unaligned_be16(6 + 16, &buf[0]);
  455. put_unaligned_be16(16, &buf[2]);
  456. put_unaligned_be64(lba, &buf[8]);
  457. put_unaligned_be32(count, &buf[16]);
  458. req = skreq->req;
  459. blk_add_request_payload(req, page, len);
  460. req->buffer = buf;
  461. }
  462. static void skd_request_fn_not_online(struct request_queue *q);
  463. static void skd_request_fn(struct request_queue *q)
  464. {
  465. struct skd_device *skdev = q->queuedata;
  466. struct skd_fitmsg_context *skmsg = NULL;
  467. struct fit_msg_hdr *fmh = NULL;
  468. struct skd_request_context *skreq;
  469. struct request *req = NULL;
  470. struct skd_scsi_request *scsi_req;
  471. struct page *page;
  472. unsigned long io_flags;
  473. int error;
  474. u32 lba;
  475. u32 count;
  476. int data_dir;
  477. u32 be_lba;
  478. u32 be_count;
  479. u64 be_dmaa;
  480. u64 cmdctxt;
  481. u32 timo_slot;
  482. void *cmd_ptr;
  483. int flush, fua;
  484. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  485. skd_request_fn_not_online(q);
  486. return;
  487. }
  488. if (blk_queue_stopped(skdev->queue)) {
  489. if (skdev->skmsg_free_list == NULL ||
  490. skdev->skreq_free_list == NULL ||
  491. skdev->in_flight >= skdev->queue_low_water_mark)
  492. /* There is still some kind of shortage */
  493. return;
  494. queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
  495. }
  496. /*
  497. * Stop conditions:
  498. * - There are no more native requests
  499. * - There are already the maximum number of requests in progress
  500. * - There are no more skd_request_context entries
  501. * - There are no more FIT msg buffers
  502. */
  503. for (;; ) {
  504. flush = fua = 0;
  505. req = blk_peek_request(q);
  506. /* Are there any native requests to start? */
  507. if (req == NULL)
  508. break;
  509. lba = (u32)blk_rq_pos(req);
  510. count = blk_rq_sectors(req);
  511. data_dir = rq_data_dir(req);
  512. io_flags = req->cmd_flags;
  513. if (io_flags & REQ_FLUSH)
  514. flush++;
  515. if (io_flags & REQ_FUA)
  516. fua++;
  517. pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
  518. "count=%u(0x%x) dir=%d\n",
  519. skdev->name, __func__, __LINE__,
  520. req, lba, lba, count, count, data_dir);
  521. /* At this point we know there is a request */
  522. /* Are too many requets already in progress? */
  523. if (skdev->in_flight >= skdev->cur_max_queue_depth) {
  524. pr_debug("%s:%s:%d qdepth %d, limit %d\n",
  525. skdev->name, __func__, __LINE__,
  526. skdev->in_flight, skdev->cur_max_queue_depth);
  527. break;
  528. }
  529. /* Is a skd_request_context available? */
  530. skreq = skdev->skreq_free_list;
  531. if (skreq == NULL) {
  532. pr_debug("%s:%s:%d Out of req=%p\n",
  533. skdev->name, __func__, __LINE__, q);
  534. break;
  535. }
  536. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  537. SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
  538. /* Now we check to see if we can get a fit msg */
  539. if (skmsg == NULL) {
  540. if (skdev->skmsg_free_list == NULL) {
  541. pr_debug("%s:%s:%d Out of msg\n",
  542. skdev->name, __func__, __LINE__);
  543. break;
  544. }
  545. }
  546. skreq->flush_cmd = 0;
  547. skreq->n_sg = 0;
  548. skreq->sg_byte_count = 0;
  549. skreq->discard_page = 0;
  550. /*
  551. * OK to now dequeue request from q.
  552. *
  553. * At this point we are comitted to either start or reject
  554. * the native request. Note that skd_request_context is
  555. * available but is still at the head of the free list.
  556. */
  557. blk_start_request(req);
  558. skreq->req = req;
  559. skreq->fitmsg_id = 0;
  560. /* Either a FIT msg is in progress or we have to start one. */
  561. if (skmsg == NULL) {
  562. /* Are there any FIT msg buffers available? */
  563. skmsg = skdev->skmsg_free_list;
  564. if (skmsg == NULL) {
  565. pr_debug("%s:%s:%d Out of msg skdev=%p\n",
  566. skdev->name, __func__, __LINE__,
  567. skdev);
  568. break;
  569. }
  570. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
  571. SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
  572. skdev->skmsg_free_list = skmsg->next;
  573. skmsg->state = SKD_MSG_STATE_BUSY;
  574. skmsg->id += SKD_ID_INCR;
  575. /* Initialize the FIT msg header */
  576. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  577. memset(fmh, 0, sizeof(*fmh));
  578. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  579. skmsg->length = sizeof(*fmh);
  580. }
  581. skreq->fitmsg_id = skmsg->id;
  582. /*
  583. * Note that a FIT msg may have just been started
  584. * but contains no SoFIT requests yet.
  585. */
  586. /*
  587. * Transcode the request, checking as we go. The outcome of
  588. * the transcoding is represented by the error variable.
  589. */
  590. cmd_ptr = &skmsg->msg_buf[skmsg->length];
  591. memset(cmd_ptr, 0, 32);
  592. be_lba = cpu_to_be32(lba);
  593. be_count = cpu_to_be32(count);
  594. be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
  595. cmdctxt = skreq->id + SKD_ID_INCR;
  596. scsi_req = cmd_ptr;
  597. scsi_req->hdr.tag = cmdctxt;
  598. scsi_req->hdr.sg_list_dma_address = be_dmaa;
  599. if (data_dir == READ)
  600. skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
  601. else
  602. skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
  603. if (io_flags & REQ_DISCARD) {
  604. page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
  605. if (!page) {
  606. pr_err("request_fn:Page allocation failed.\n");
  607. skd_end_request(skdev, skreq, -ENOMEM);
  608. break;
  609. }
  610. skreq->discard_page = 1;
  611. skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
  612. } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
  613. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  614. SKD_ASSERT(skreq->flush_cmd == 1);
  615. } else {
  616. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  617. }
  618. if (fua)
  619. scsi_req->cdb[1] |= SKD_FUA_NV;
  620. if (!req->bio)
  621. goto skip_sg;
  622. error = skd_preop_sg_list(skdev, skreq);
  623. if (error != 0) {
  624. /*
  625. * Complete the native request with error.
  626. * Note that the request context is still at the
  627. * head of the free list, and that the SoFIT request
  628. * was encoded into the FIT msg buffer but the FIT
  629. * msg length has not been updated. In short, the
  630. * only resource that has been allocated but might
  631. * not be used is that the FIT msg could be empty.
  632. */
  633. pr_debug("%s:%s:%d error Out\n",
  634. skdev->name, __func__, __LINE__);
  635. skd_end_request(skdev, skreq, error);
  636. continue;
  637. }
  638. skip_sg:
  639. scsi_req->hdr.sg_list_len_bytes =
  640. cpu_to_be32(skreq->sg_byte_count);
  641. /* Complete resource allocations. */
  642. skdev->skreq_free_list = skreq->next;
  643. skreq->state = SKD_REQ_STATE_BUSY;
  644. skreq->id += SKD_ID_INCR;
  645. skmsg->length += sizeof(struct skd_scsi_request);
  646. fmh->num_protocol_cmds_coalesced++;
  647. /*
  648. * Update the active request counts.
  649. * Capture the timeout timestamp.
  650. */
  651. skreq->timeout_stamp = skdev->timeout_stamp;
  652. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  653. skdev->timeout_slot[timo_slot]++;
  654. skdev->in_flight++;
  655. pr_debug("%s:%s:%d req=0x%x busy=%d\n",
  656. skdev->name, __func__, __LINE__,
  657. skreq->id, skdev->in_flight);
  658. /*
  659. * If the FIT msg buffer is full send it.
  660. */
  661. if (skmsg->length >= SKD_N_FITMSG_BYTES ||
  662. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  663. skd_send_fitmsg(skdev, skmsg);
  664. skmsg = NULL;
  665. fmh = NULL;
  666. }
  667. }
  668. /*
  669. * Is a FIT msg in progress? If it is empty put the buffer back
  670. * on the free list. If it is non-empty send what we got.
  671. * This minimizes latency when there are fewer requests than
  672. * what fits in a FIT msg.
  673. */
  674. if (skmsg != NULL) {
  675. /* Bigger than just a FIT msg header? */
  676. if (skmsg->length > sizeof(struct fit_msg_hdr)) {
  677. pr_debug("%s:%s:%d sending msg=%p, len %d\n",
  678. skdev->name, __func__, __LINE__,
  679. skmsg, skmsg->length);
  680. skd_send_fitmsg(skdev, skmsg);
  681. } else {
  682. /*
  683. * The FIT msg is empty. It means we got started
  684. * on the msg, but the requests were rejected.
  685. */
  686. skmsg->state = SKD_MSG_STATE_IDLE;
  687. skmsg->id += SKD_ID_INCR;
  688. skmsg->next = skdev->skmsg_free_list;
  689. skdev->skmsg_free_list = skmsg;
  690. }
  691. skmsg = NULL;
  692. fmh = NULL;
  693. }
  694. /*
  695. * If req is non-NULL it means there is something to do but
  696. * we are out of a resource.
  697. */
  698. if (req)
  699. blk_stop_queue(skdev->queue);
  700. }
  701. static void skd_end_request(struct skd_device *skdev,
  702. struct skd_request_context *skreq, int error)
  703. {
  704. struct request *req = skreq->req;
  705. unsigned int io_flags = req->cmd_flags;
  706. if ((io_flags & REQ_DISCARD) &&
  707. (skreq->discard_page == 1)) {
  708. pr_debug("%s:%s:%d, free the page!",
  709. skdev->name, __func__, __LINE__);
  710. free_page((unsigned long)req->buffer);
  711. req->buffer = NULL;
  712. }
  713. if (unlikely(error)) {
  714. struct request *req = skreq->req;
  715. char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
  716. u32 lba = (u32)blk_rq_pos(req);
  717. u32 count = blk_rq_sectors(req);
  718. pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
  719. skd_name(skdev), cmd, lba, count, skreq->id);
  720. } else
  721. pr_debug("%s:%s:%d id=0x%x error=%d\n",
  722. skdev->name, __func__, __LINE__, skreq->id, error);
  723. __blk_end_request_all(skreq->req, error);
  724. }
  725. static int skd_preop_sg_list(struct skd_device *skdev,
  726. struct skd_request_context *skreq)
  727. {
  728. struct request *req = skreq->req;
  729. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  730. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  731. struct scatterlist *sg = &skreq->sg[0];
  732. int n_sg;
  733. int i;
  734. skreq->sg_byte_count = 0;
  735. /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
  736. skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
  737. n_sg = blk_rq_map_sg(skdev->queue, req, sg);
  738. if (n_sg <= 0)
  739. return -EINVAL;
  740. /*
  741. * Map scatterlist to PCI bus addresses.
  742. * Note PCI might change the number of entries.
  743. */
  744. n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
  745. if (n_sg <= 0)
  746. return -EINVAL;
  747. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  748. skreq->n_sg = n_sg;
  749. for (i = 0; i < n_sg; i++) {
  750. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  751. u32 cnt = sg_dma_len(&sg[i]);
  752. uint64_t dma_addr = sg_dma_address(&sg[i]);
  753. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  754. sgd->byte_count = cnt;
  755. skreq->sg_byte_count += cnt;
  756. sgd->host_side_addr = dma_addr;
  757. sgd->dev_side_addr = 0;
  758. }
  759. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  760. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  761. if (unlikely(skdev->dbg_level > 1)) {
  762. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  763. skdev->name, __func__, __LINE__,
  764. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  765. for (i = 0; i < n_sg; i++) {
  766. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  767. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  768. "addr=0x%llx next=0x%llx\n",
  769. skdev->name, __func__, __LINE__,
  770. i, sgd->byte_count, sgd->control,
  771. sgd->host_side_addr, sgd->next_desc_ptr);
  772. }
  773. }
  774. return 0;
  775. }
  776. static void skd_postop_sg_list(struct skd_device *skdev,
  777. struct skd_request_context *skreq)
  778. {
  779. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  780. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  781. /*
  782. * restore the next ptr for next IO request so we
  783. * don't have to set it every time.
  784. */
  785. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  786. skreq->sksg_dma_address +
  787. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  788. pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
  789. }
  790. static void skd_request_fn_not_online(struct request_queue *q)
  791. {
  792. struct skd_device *skdev = q->queuedata;
  793. int error;
  794. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  795. skd_log_skdev(skdev, "req_not_online");
  796. switch (skdev->state) {
  797. case SKD_DRVR_STATE_PAUSING:
  798. case SKD_DRVR_STATE_PAUSED:
  799. case SKD_DRVR_STATE_STARTING:
  800. case SKD_DRVR_STATE_RESTARTING:
  801. case SKD_DRVR_STATE_WAIT_BOOT:
  802. /* In case of starting, we haven't started the queue,
  803. * so we can't get here... but requests are
  804. * possibly hanging out waiting for us because we
  805. * reported the dev/skd0 already. They'll wait
  806. * forever if connect doesn't complete.
  807. * What to do??? delay dev/skd0 ??
  808. */
  809. case SKD_DRVR_STATE_BUSY:
  810. case SKD_DRVR_STATE_BUSY_IMMINENT:
  811. case SKD_DRVR_STATE_BUSY_ERASE:
  812. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  813. return;
  814. case SKD_DRVR_STATE_BUSY_SANITIZE:
  815. case SKD_DRVR_STATE_STOPPING:
  816. case SKD_DRVR_STATE_SYNCING:
  817. case SKD_DRVR_STATE_FAULT:
  818. case SKD_DRVR_STATE_DISAPPEARED:
  819. default:
  820. error = -EIO;
  821. break;
  822. }
  823. /* If we get here, terminate all pending block requeusts
  824. * with EIO and any scsi pass thru with appropriate sense
  825. */
  826. skd_fail_all_pending(skdev);
  827. }
  828. /*
  829. *****************************************************************************
  830. * TIMER
  831. *****************************************************************************
  832. */
  833. static void skd_timer_tick_not_online(struct skd_device *skdev);
  834. static void skd_timer_tick(ulong arg)
  835. {
  836. struct skd_device *skdev = (struct skd_device *)arg;
  837. u32 timo_slot;
  838. u32 overdue_timestamp;
  839. unsigned long reqflags;
  840. u32 state;
  841. if (skdev->state == SKD_DRVR_STATE_FAULT)
  842. /* The driver has declared fault, and we want it to
  843. * stay that way until driver is reloaded.
  844. */
  845. return;
  846. spin_lock_irqsave(&skdev->lock, reqflags);
  847. state = SKD_READL(skdev, FIT_STATUS);
  848. state &= FIT_SR_DRIVE_STATE_MASK;
  849. if (state != skdev->drive_state)
  850. skd_isr_fwstate(skdev);
  851. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  852. skd_timer_tick_not_online(skdev);
  853. goto timer_func_out;
  854. }
  855. skdev->timeout_stamp++;
  856. timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  857. /*
  858. * All requests that happened during the previous use of
  859. * this slot should be done by now. The previous use was
  860. * over 7 seconds ago.
  861. */
  862. if (skdev->timeout_slot[timo_slot] == 0)
  863. goto timer_func_out;
  864. /* Something is overdue */
  865. overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
  866. pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
  867. skdev->name, __func__, __LINE__,
  868. skdev->timeout_slot[timo_slot], skdev->in_flight);
  869. pr_err("(%s): Overdue IOs (%d), busy %d\n",
  870. skd_name(skdev), skdev->timeout_slot[timo_slot],
  871. skdev->in_flight);
  872. skdev->timer_countdown = SKD_DRAINING_TIMO;
  873. skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
  874. skdev->timo_slot = timo_slot;
  875. blk_stop_queue(skdev->queue);
  876. timer_func_out:
  877. mod_timer(&skdev->timer, (jiffies + HZ));
  878. spin_unlock_irqrestore(&skdev->lock, reqflags);
  879. }
  880. static void skd_timer_tick_not_online(struct skd_device *skdev)
  881. {
  882. switch (skdev->state) {
  883. case SKD_DRVR_STATE_IDLE:
  884. case SKD_DRVR_STATE_LOAD:
  885. break;
  886. case SKD_DRVR_STATE_BUSY_SANITIZE:
  887. pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
  888. skdev->name, __func__, __LINE__,
  889. skdev->drive_state, skdev->state);
  890. /* If we've been in sanitize for 3 seconds, we figure we're not
  891. * going to get anymore completions, so recover requests now
  892. */
  893. if (skdev->timer_countdown > 0) {
  894. skdev->timer_countdown--;
  895. return;
  896. }
  897. skd_recover_requests(skdev, 0);
  898. break;
  899. case SKD_DRVR_STATE_BUSY:
  900. case SKD_DRVR_STATE_BUSY_IMMINENT:
  901. case SKD_DRVR_STATE_BUSY_ERASE:
  902. pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
  903. skdev->name, __func__, __LINE__,
  904. skdev->state, skdev->timer_countdown);
  905. if (skdev->timer_countdown > 0) {
  906. skdev->timer_countdown--;
  907. return;
  908. }
  909. pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
  910. skdev->name, __func__, __LINE__,
  911. skdev->state, skdev->timer_countdown);
  912. skd_restart_device(skdev);
  913. break;
  914. case SKD_DRVR_STATE_WAIT_BOOT:
  915. case SKD_DRVR_STATE_STARTING:
  916. if (skdev->timer_countdown > 0) {
  917. skdev->timer_countdown--;
  918. return;
  919. }
  920. /* For now, we fault the drive. Could attempt resets to
  921. * revcover at some point. */
  922. skdev->state = SKD_DRVR_STATE_FAULT;
  923. pr_err("(%s): DriveFault Connect Timeout (%x)\n",
  924. skd_name(skdev), skdev->drive_state);
  925. /*start the queue so we can respond with error to requests */
  926. /* wakeup anyone waiting for startup complete */
  927. blk_start_queue(skdev->queue);
  928. skdev->gendisk_on = -1;
  929. wake_up_interruptible(&skdev->waitq);
  930. break;
  931. case SKD_DRVR_STATE_ONLINE:
  932. /* shouldn't get here. */
  933. break;
  934. case SKD_DRVR_STATE_PAUSING:
  935. case SKD_DRVR_STATE_PAUSED:
  936. break;
  937. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  938. pr_debug("%s:%s:%d "
  939. "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
  940. skdev->name, __func__, __LINE__,
  941. skdev->timo_slot,
  942. skdev->timer_countdown,
  943. skdev->in_flight,
  944. skdev->timeout_slot[skdev->timo_slot]);
  945. /* if the slot has cleared we can let the I/O continue */
  946. if (skdev->timeout_slot[skdev->timo_slot] == 0) {
  947. pr_debug("%s:%s:%d Slot drained, starting queue.\n",
  948. skdev->name, __func__, __LINE__);
  949. skdev->state = SKD_DRVR_STATE_ONLINE;
  950. blk_start_queue(skdev->queue);
  951. return;
  952. }
  953. if (skdev->timer_countdown > 0) {
  954. skdev->timer_countdown--;
  955. return;
  956. }
  957. skd_restart_device(skdev);
  958. break;
  959. case SKD_DRVR_STATE_RESTARTING:
  960. if (skdev->timer_countdown > 0) {
  961. skdev->timer_countdown--;
  962. return;
  963. }
  964. /* For now, we fault the drive. Could attempt resets to
  965. * revcover at some point. */
  966. skdev->state = SKD_DRVR_STATE_FAULT;
  967. pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
  968. skd_name(skdev), skdev->drive_state);
  969. /*
  970. * Recovering does two things:
  971. * 1. completes IO with error
  972. * 2. reclaims dma resources
  973. * When is it safe to recover requests?
  974. * - if the drive state is faulted
  975. * - if the state is still soft reset after out timeout
  976. * - if the drive registers are dead (state = FF)
  977. * If it is "unsafe", we still need to recover, so we will
  978. * disable pci bus mastering and disable our interrupts.
  979. */
  980. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  981. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  982. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  983. /* It never came out of soft reset. Try to
  984. * recover the requests and then let them
  985. * fail. This is to mitigate hung processes. */
  986. skd_recover_requests(skdev, 0);
  987. else {
  988. pr_err("(%s): Disable BusMaster (%x)\n",
  989. skd_name(skdev), skdev->drive_state);
  990. pci_disable_device(skdev->pdev);
  991. skd_disable_interrupts(skdev);
  992. skd_recover_requests(skdev, 0);
  993. }
  994. /*start the queue so we can respond with error to requests */
  995. /* wakeup anyone waiting for startup complete */
  996. blk_start_queue(skdev->queue);
  997. skdev->gendisk_on = -1;
  998. wake_up_interruptible(&skdev->waitq);
  999. break;
  1000. case SKD_DRVR_STATE_RESUMING:
  1001. case SKD_DRVR_STATE_STOPPING:
  1002. case SKD_DRVR_STATE_SYNCING:
  1003. case SKD_DRVR_STATE_FAULT:
  1004. case SKD_DRVR_STATE_DISAPPEARED:
  1005. default:
  1006. break;
  1007. }
  1008. }
  1009. static int skd_start_timer(struct skd_device *skdev)
  1010. {
  1011. int rc;
  1012. init_timer(&skdev->timer);
  1013. setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
  1014. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  1015. if (rc)
  1016. pr_err("%s: failed to start timer %d\n",
  1017. __func__, rc);
  1018. return rc;
  1019. }
  1020. static void skd_kill_timer(struct skd_device *skdev)
  1021. {
  1022. del_timer_sync(&skdev->timer);
  1023. }
  1024. /*
  1025. *****************************************************************************
  1026. * IOCTL
  1027. *****************************************************************************
  1028. */
  1029. static int skd_ioctl_sg_io(struct skd_device *skdev,
  1030. fmode_t mode, void __user *argp);
  1031. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1032. struct skd_sg_io *sksgio);
  1033. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1034. struct skd_sg_io *sksgio);
  1035. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1036. struct skd_sg_io *sksgio);
  1037. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1038. struct skd_sg_io *sksgio, int dxfer_dir);
  1039. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1040. struct skd_sg_io *sksgio);
  1041. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
  1042. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1043. struct skd_sg_io *sksgio);
  1044. static int skd_sg_io_put_status(struct skd_device *skdev,
  1045. struct skd_sg_io *sksgio);
  1046. static void skd_complete_special(struct skd_device *skdev,
  1047. volatile struct fit_completion_entry_v1
  1048. *skcomp,
  1049. volatile struct fit_comp_error_info *skerr,
  1050. struct skd_special_context *skspcl);
  1051. static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
  1052. uint cmd_in, ulong arg)
  1053. {
  1054. int rc = 0;
  1055. struct gendisk *disk = bdev->bd_disk;
  1056. struct skd_device *skdev = disk->private_data;
  1057. void __user *p = (void *)arg;
  1058. pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
  1059. skdev->name, __func__, __LINE__,
  1060. disk->disk_name, current->comm, mode, cmd_in, arg);
  1061. if (!capable(CAP_SYS_ADMIN))
  1062. return -EPERM;
  1063. switch (cmd_in) {
  1064. case SG_SET_TIMEOUT:
  1065. case SG_GET_TIMEOUT:
  1066. case SG_GET_VERSION_NUM:
  1067. rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
  1068. break;
  1069. case SG_IO:
  1070. rc = skd_ioctl_sg_io(skdev, mode, p);
  1071. break;
  1072. default:
  1073. rc = -ENOTTY;
  1074. break;
  1075. }
  1076. pr_debug("%s:%s:%d %s: completion rc %d\n",
  1077. skdev->name, __func__, __LINE__, disk->disk_name, rc);
  1078. return rc;
  1079. }
  1080. static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
  1081. void __user *argp)
  1082. {
  1083. int rc;
  1084. struct skd_sg_io sksgio;
  1085. memset(&sksgio, 0, sizeof(sksgio));
  1086. sksgio.mode = mode;
  1087. sksgio.argp = argp;
  1088. sksgio.iov = &sksgio.no_iov_iov;
  1089. switch (skdev->state) {
  1090. case SKD_DRVR_STATE_ONLINE:
  1091. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1092. break;
  1093. default:
  1094. pr_debug("%s:%s:%d drive not online\n",
  1095. skdev->name, __func__, __LINE__);
  1096. rc = -ENXIO;
  1097. goto out;
  1098. }
  1099. rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
  1100. if (rc)
  1101. goto out;
  1102. rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
  1103. if (rc)
  1104. goto out;
  1105. rc = skd_sg_io_prep_buffering(skdev, &sksgio);
  1106. if (rc)
  1107. goto out;
  1108. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
  1109. if (rc)
  1110. goto out;
  1111. rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
  1112. if (rc)
  1113. goto out;
  1114. rc = skd_sg_io_await(skdev, &sksgio);
  1115. if (rc)
  1116. goto out;
  1117. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
  1118. if (rc)
  1119. goto out;
  1120. rc = skd_sg_io_put_status(skdev, &sksgio);
  1121. if (rc)
  1122. goto out;
  1123. rc = 0;
  1124. out:
  1125. skd_sg_io_release_skspcl(skdev, &sksgio);
  1126. if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
  1127. kfree(sksgio.iov);
  1128. return rc;
  1129. }
  1130. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1131. struct skd_sg_io *sksgio)
  1132. {
  1133. struct sg_io_hdr *sgp = &sksgio->sg;
  1134. int i, acc;
  1135. if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1136. pr_debug("%s:%s:%d access sg failed %p\n",
  1137. skdev->name, __func__, __LINE__, sksgio->argp);
  1138. return -EFAULT;
  1139. }
  1140. if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1141. pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
  1142. skdev->name, __func__, __LINE__, sksgio->argp);
  1143. return -EFAULT;
  1144. }
  1145. if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
  1146. pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
  1147. skdev->name, __func__, __LINE__, sgp->interface_id);
  1148. return -EINVAL;
  1149. }
  1150. if (sgp->cmd_len > sizeof(sksgio->cdb)) {
  1151. pr_debug("%s:%s:%d cmd_len invalid %d\n",
  1152. skdev->name, __func__, __LINE__, sgp->cmd_len);
  1153. return -EINVAL;
  1154. }
  1155. if (sgp->iovec_count > 256) {
  1156. pr_debug("%s:%s:%d iovec_count invalid %d\n",
  1157. skdev->name, __func__, __LINE__, sgp->iovec_count);
  1158. return -EINVAL;
  1159. }
  1160. if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
  1161. pr_debug("%s:%s:%d dxfer_len invalid %d\n",
  1162. skdev->name, __func__, __LINE__, sgp->dxfer_len);
  1163. return -EINVAL;
  1164. }
  1165. switch (sgp->dxfer_direction) {
  1166. case SG_DXFER_NONE:
  1167. acc = -1;
  1168. break;
  1169. case SG_DXFER_TO_DEV:
  1170. acc = VERIFY_READ;
  1171. break;
  1172. case SG_DXFER_FROM_DEV:
  1173. case SG_DXFER_TO_FROM_DEV:
  1174. acc = VERIFY_WRITE;
  1175. break;
  1176. default:
  1177. pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
  1178. skdev->name, __func__, __LINE__, sgp->dxfer_direction);
  1179. return -EINVAL;
  1180. }
  1181. if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
  1182. pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
  1183. skdev->name, __func__, __LINE__, sgp->cmdp);
  1184. return -EFAULT;
  1185. }
  1186. if (sgp->mx_sb_len != 0) {
  1187. if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
  1188. pr_debug("%s:%s:%d access sbp failed %p\n",
  1189. skdev->name, __func__, __LINE__, sgp->sbp);
  1190. return -EFAULT;
  1191. }
  1192. }
  1193. if (sgp->iovec_count == 0) {
  1194. sksgio->iov[0].iov_base = sgp->dxferp;
  1195. sksgio->iov[0].iov_len = sgp->dxfer_len;
  1196. sksgio->iovcnt = 1;
  1197. sksgio->dxfer_len = sgp->dxfer_len;
  1198. } else {
  1199. struct sg_iovec *iov;
  1200. uint nbytes = sizeof(*iov) * sgp->iovec_count;
  1201. size_t iov_data_len;
  1202. iov = kmalloc(nbytes, GFP_KERNEL);
  1203. if (iov == NULL) {
  1204. pr_debug("%s:%s:%d alloc iovec failed %d\n",
  1205. skdev->name, __func__, __LINE__,
  1206. sgp->iovec_count);
  1207. return -ENOMEM;
  1208. }
  1209. sksgio->iov = iov;
  1210. sksgio->iovcnt = sgp->iovec_count;
  1211. if (copy_from_user(iov, sgp->dxferp, nbytes)) {
  1212. pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
  1213. skdev->name, __func__, __LINE__, sgp->dxferp);
  1214. return -EFAULT;
  1215. }
  1216. /*
  1217. * Sum up the vecs, making sure they don't overflow
  1218. */
  1219. iov_data_len = 0;
  1220. for (i = 0; i < sgp->iovec_count; i++) {
  1221. if (iov_data_len + iov[i].iov_len < iov_data_len)
  1222. return -EINVAL;
  1223. iov_data_len += iov[i].iov_len;
  1224. }
  1225. /* SG_IO howto says that the shorter of the two wins */
  1226. if (sgp->dxfer_len < iov_data_len) {
  1227. sksgio->iovcnt = iov_shorten((struct iovec *)iov,
  1228. sgp->iovec_count,
  1229. sgp->dxfer_len);
  1230. sksgio->dxfer_len = sgp->dxfer_len;
  1231. } else
  1232. sksgio->dxfer_len = iov_data_len;
  1233. }
  1234. if (sgp->dxfer_direction != SG_DXFER_NONE) {
  1235. struct sg_iovec *iov = sksgio->iov;
  1236. for (i = 0; i < sksgio->iovcnt; i++, iov++) {
  1237. if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
  1238. pr_debug("%s:%s:%d access data failed %p/%d\n",
  1239. skdev->name, __func__, __LINE__,
  1240. iov->iov_base, (int)iov->iov_len);
  1241. return -EFAULT;
  1242. }
  1243. }
  1244. }
  1245. return 0;
  1246. }
  1247. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1248. struct skd_sg_io *sksgio)
  1249. {
  1250. struct skd_special_context *skspcl = NULL;
  1251. int rc;
  1252. for (;;) {
  1253. ulong flags;
  1254. spin_lock_irqsave(&skdev->lock, flags);
  1255. skspcl = skdev->skspcl_free_list;
  1256. if (skspcl != NULL) {
  1257. skdev->skspcl_free_list =
  1258. (struct skd_special_context *)skspcl->req.next;
  1259. skspcl->req.id += SKD_ID_INCR;
  1260. skspcl->req.state = SKD_REQ_STATE_SETUP;
  1261. skspcl->orphaned = 0;
  1262. skspcl->req.n_sg = 0;
  1263. }
  1264. spin_unlock_irqrestore(&skdev->lock, flags);
  1265. if (skspcl != NULL) {
  1266. rc = 0;
  1267. break;
  1268. }
  1269. pr_debug("%s:%s:%d blocking\n",
  1270. skdev->name, __func__, __LINE__);
  1271. rc = wait_event_interruptible_timeout(
  1272. skdev->waitq,
  1273. (skdev->skspcl_free_list != NULL),
  1274. msecs_to_jiffies(sksgio->sg.timeout));
  1275. pr_debug("%s:%s:%d unblocking, rc=%d\n",
  1276. skdev->name, __func__, __LINE__, rc);
  1277. if (rc <= 0) {
  1278. if (rc == 0)
  1279. rc = -ETIMEDOUT;
  1280. else
  1281. rc = -EINTR;
  1282. break;
  1283. }
  1284. /*
  1285. * If we get here rc > 0 meaning the timeout to
  1286. * wait_event_interruptible_timeout() had time left, hence the
  1287. * sought event -- non-empty free list -- happened.
  1288. * Retry the allocation.
  1289. */
  1290. }
  1291. sksgio->skspcl = skspcl;
  1292. return rc;
  1293. }
  1294. static int skd_skreq_prep_buffering(struct skd_device *skdev,
  1295. struct skd_request_context *skreq,
  1296. u32 dxfer_len)
  1297. {
  1298. u32 resid = dxfer_len;
  1299. /*
  1300. * The DMA engine must have aligned addresses and byte counts.
  1301. */
  1302. resid += (-resid) & 3;
  1303. skreq->sg_byte_count = resid;
  1304. skreq->n_sg = 0;
  1305. while (resid > 0) {
  1306. u32 nbytes = PAGE_SIZE;
  1307. u32 ix = skreq->n_sg;
  1308. struct scatterlist *sg = &skreq->sg[ix];
  1309. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1310. struct page *page;
  1311. if (nbytes > resid)
  1312. nbytes = resid;
  1313. page = alloc_page(GFP_KERNEL);
  1314. if (page == NULL)
  1315. return -ENOMEM;
  1316. sg_set_page(sg, page, nbytes, 0);
  1317. /* TODO: This should be going through a pci_???()
  1318. * routine to do proper mapping. */
  1319. sksg->control = FIT_SGD_CONTROL_NOT_LAST;
  1320. sksg->byte_count = nbytes;
  1321. sksg->host_side_addr = sg_phys(sg);
  1322. sksg->dev_side_addr = 0;
  1323. sksg->next_desc_ptr = skreq->sksg_dma_address +
  1324. (ix + 1) * sizeof(*sksg);
  1325. skreq->n_sg++;
  1326. resid -= nbytes;
  1327. }
  1328. if (skreq->n_sg > 0) {
  1329. u32 ix = skreq->n_sg - 1;
  1330. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1331. sksg->control = FIT_SGD_CONTROL_LAST;
  1332. sksg->next_desc_ptr = 0;
  1333. }
  1334. if (unlikely(skdev->dbg_level > 1)) {
  1335. u32 i;
  1336. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  1337. skdev->name, __func__, __LINE__,
  1338. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  1339. for (i = 0; i < skreq->n_sg; i++) {
  1340. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  1341. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1342. "addr=0x%llx next=0x%llx\n",
  1343. skdev->name, __func__, __LINE__,
  1344. i, sgd->byte_count, sgd->control,
  1345. sgd->host_side_addr, sgd->next_desc_ptr);
  1346. }
  1347. }
  1348. return 0;
  1349. }
  1350. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1351. struct skd_sg_io *sksgio)
  1352. {
  1353. struct skd_special_context *skspcl = sksgio->skspcl;
  1354. struct skd_request_context *skreq = &skspcl->req;
  1355. u32 dxfer_len = sksgio->dxfer_len;
  1356. int rc;
  1357. rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
  1358. /*
  1359. * Eventually, errors or not, skd_release_special() is called
  1360. * to recover allocations including partial allocations.
  1361. */
  1362. return rc;
  1363. }
  1364. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1365. struct skd_sg_io *sksgio, int dxfer_dir)
  1366. {
  1367. struct skd_special_context *skspcl = sksgio->skspcl;
  1368. u32 iov_ix = 0;
  1369. struct sg_iovec curiov;
  1370. u32 sksg_ix = 0;
  1371. u8 *bufp = NULL;
  1372. u32 buf_len = 0;
  1373. u32 resid = sksgio->dxfer_len;
  1374. int rc;
  1375. curiov.iov_len = 0;
  1376. curiov.iov_base = NULL;
  1377. if (dxfer_dir != sksgio->sg.dxfer_direction) {
  1378. if (dxfer_dir != SG_DXFER_TO_DEV ||
  1379. sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
  1380. return 0;
  1381. }
  1382. while (resid > 0) {
  1383. u32 nbytes = PAGE_SIZE;
  1384. if (curiov.iov_len == 0) {
  1385. curiov = sksgio->iov[iov_ix++];
  1386. continue;
  1387. }
  1388. if (buf_len == 0) {
  1389. struct page *page;
  1390. page = sg_page(&skspcl->req.sg[sksg_ix++]);
  1391. bufp = page_address(page);
  1392. buf_len = PAGE_SIZE;
  1393. }
  1394. nbytes = min_t(u32, nbytes, resid);
  1395. nbytes = min_t(u32, nbytes, curiov.iov_len);
  1396. nbytes = min_t(u32, nbytes, buf_len);
  1397. if (dxfer_dir == SG_DXFER_TO_DEV)
  1398. rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
  1399. else
  1400. rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
  1401. if (rc)
  1402. return -EFAULT;
  1403. resid -= nbytes;
  1404. curiov.iov_len -= nbytes;
  1405. curiov.iov_base += nbytes;
  1406. buf_len -= nbytes;
  1407. }
  1408. return 0;
  1409. }
  1410. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1411. struct skd_sg_io *sksgio)
  1412. {
  1413. struct skd_special_context *skspcl = sksgio->skspcl;
  1414. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  1415. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  1416. memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
  1417. /* Initialize the FIT msg header */
  1418. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1419. fmh->num_protocol_cmds_coalesced = 1;
  1420. /* Initialize the SCSI request */
  1421. if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
  1422. scsi_req->hdr.sg_list_dma_address =
  1423. cpu_to_be64(skspcl->req.sksg_dma_address);
  1424. scsi_req->hdr.tag = skspcl->req.id;
  1425. scsi_req->hdr.sg_list_len_bytes =
  1426. cpu_to_be32(skspcl->req.sg_byte_count);
  1427. memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
  1428. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1429. skd_send_special_fitmsg(skdev, skspcl);
  1430. return 0;
  1431. }
  1432. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
  1433. {
  1434. unsigned long flags;
  1435. int rc;
  1436. rc = wait_event_interruptible_timeout(skdev->waitq,
  1437. (sksgio->skspcl->req.state !=
  1438. SKD_REQ_STATE_BUSY),
  1439. msecs_to_jiffies(sksgio->sg.
  1440. timeout));
  1441. spin_lock_irqsave(&skdev->lock, flags);
  1442. if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
  1443. pr_debug("%s:%s:%d skspcl %p aborted\n",
  1444. skdev->name, __func__, __LINE__, sksgio->skspcl);
  1445. /* Build check cond, sense and let command finish. */
  1446. /* For a timeout, we must fabricate completion and sense
  1447. * data to complete the command */
  1448. sksgio->skspcl->req.completion.status =
  1449. SAM_STAT_CHECK_CONDITION;
  1450. memset(&sksgio->skspcl->req.err_info, 0,
  1451. sizeof(sksgio->skspcl->req.err_info));
  1452. sksgio->skspcl->req.err_info.type = 0x70;
  1453. sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
  1454. sksgio->skspcl->req.err_info.code = 0x44;
  1455. sksgio->skspcl->req.err_info.qual = 0;
  1456. rc = 0;
  1457. } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
  1458. /* No longer on the adapter. We finish. */
  1459. rc = 0;
  1460. else {
  1461. /* Something's gone wrong. Still busy. Timeout or
  1462. * user interrupted (control-C). Mark as an orphan
  1463. * so it will be disposed when completed. */
  1464. sksgio->skspcl->orphaned = 1;
  1465. sksgio->skspcl = NULL;
  1466. if (rc == 0) {
  1467. pr_debug("%s:%s:%d timed out %p (%u ms)\n",
  1468. skdev->name, __func__, __LINE__,
  1469. sksgio, sksgio->sg.timeout);
  1470. rc = -ETIMEDOUT;
  1471. } else {
  1472. pr_debug("%s:%s:%d cntlc %p\n",
  1473. skdev->name, __func__, __LINE__, sksgio);
  1474. rc = -EINTR;
  1475. }
  1476. }
  1477. spin_unlock_irqrestore(&skdev->lock, flags);
  1478. return rc;
  1479. }
  1480. static int skd_sg_io_put_status(struct skd_device *skdev,
  1481. struct skd_sg_io *sksgio)
  1482. {
  1483. struct sg_io_hdr *sgp = &sksgio->sg;
  1484. struct skd_special_context *skspcl = sksgio->skspcl;
  1485. int resid = 0;
  1486. u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
  1487. sgp->status = skspcl->req.completion.status;
  1488. resid = sksgio->dxfer_len - nb;
  1489. sgp->masked_status = sgp->status & STATUS_MASK;
  1490. sgp->msg_status = 0;
  1491. sgp->host_status = 0;
  1492. sgp->driver_status = 0;
  1493. sgp->resid = resid;
  1494. if (sgp->masked_status || sgp->host_status || sgp->driver_status)
  1495. sgp->info |= SG_INFO_CHECK;
  1496. pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
  1497. skdev->name, __func__, __LINE__,
  1498. sgp->status, sgp->masked_status, sgp->resid);
  1499. if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
  1500. if (sgp->mx_sb_len > 0) {
  1501. struct fit_comp_error_info *ei = &skspcl->req.err_info;
  1502. u32 nbytes = sizeof(*ei);
  1503. nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
  1504. sgp->sb_len_wr = nbytes;
  1505. if (__copy_to_user(sgp->sbp, ei, nbytes)) {
  1506. pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
  1507. skdev->name, __func__, __LINE__,
  1508. sgp->sbp);
  1509. return -EFAULT;
  1510. }
  1511. }
  1512. }
  1513. if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
  1514. pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
  1515. skdev->name, __func__, __LINE__, sksgio->argp);
  1516. return -EFAULT;
  1517. }
  1518. return 0;
  1519. }
  1520. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1521. struct skd_sg_io *sksgio)
  1522. {
  1523. struct skd_special_context *skspcl = sksgio->skspcl;
  1524. if (skspcl != NULL) {
  1525. ulong flags;
  1526. sksgio->skspcl = NULL;
  1527. spin_lock_irqsave(&skdev->lock, flags);
  1528. skd_release_special(skdev, skspcl);
  1529. spin_unlock_irqrestore(&skdev->lock, flags);
  1530. }
  1531. return 0;
  1532. }
  1533. /*
  1534. *****************************************************************************
  1535. * INTERNAL REQUESTS -- generated by driver itself
  1536. *****************************************************************************
  1537. */
  1538. static int skd_format_internal_skspcl(struct skd_device *skdev)
  1539. {
  1540. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1541. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1542. struct fit_msg_hdr *fmh;
  1543. uint64_t dma_address;
  1544. struct skd_scsi_request *scsi;
  1545. fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
  1546. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1547. fmh->num_protocol_cmds_coalesced = 1;
  1548. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1549. memset(scsi, 0, sizeof(*scsi));
  1550. dma_address = skspcl->req.sksg_dma_address;
  1551. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  1552. sgd->control = FIT_SGD_CONTROL_LAST;
  1553. sgd->byte_count = 0;
  1554. sgd->host_side_addr = skspcl->db_dma_address;
  1555. sgd->dev_side_addr = 0;
  1556. sgd->next_desc_ptr = 0LL;
  1557. return 1;
  1558. }
  1559. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  1560. static void skd_send_internal_skspcl(struct skd_device *skdev,
  1561. struct skd_special_context *skspcl,
  1562. u8 opcode)
  1563. {
  1564. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1565. struct skd_scsi_request *scsi;
  1566. unsigned char *buf = skspcl->data_buf;
  1567. int i;
  1568. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  1569. /*
  1570. * A refresh is already in progress.
  1571. * Just wait for it to finish.
  1572. */
  1573. return;
  1574. SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
  1575. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1576. skspcl->req.id += SKD_ID_INCR;
  1577. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1578. scsi->hdr.tag = skspcl->req.id;
  1579. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  1580. switch (opcode) {
  1581. case TEST_UNIT_READY:
  1582. scsi->cdb[0] = TEST_UNIT_READY;
  1583. sgd->byte_count = 0;
  1584. scsi->hdr.sg_list_len_bytes = 0;
  1585. break;
  1586. case READ_CAPACITY:
  1587. scsi->cdb[0] = READ_CAPACITY;
  1588. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  1589. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1590. break;
  1591. case INQUIRY:
  1592. scsi->cdb[0] = INQUIRY;
  1593. scsi->cdb[1] = 0x01; /* evpd */
  1594. scsi->cdb[2] = 0x80; /* serial number page */
  1595. scsi->cdb[4] = 0x10;
  1596. sgd->byte_count = 16;
  1597. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1598. break;
  1599. case SYNCHRONIZE_CACHE:
  1600. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  1601. sgd->byte_count = 0;
  1602. scsi->hdr.sg_list_len_bytes = 0;
  1603. break;
  1604. case WRITE_BUFFER:
  1605. scsi->cdb[0] = WRITE_BUFFER;
  1606. scsi->cdb[1] = 0x02;
  1607. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1608. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1609. sgd->byte_count = WR_BUF_SIZE;
  1610. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1611. /* fill incrementing byte pattern */
  1612. for (i = 0; i < sgd->byte_count; i++)
  1613. buf[i] = i & 0xFF;
  1614. break;
  1615. case READ_BUFFER:
  1616. scsi->cdb[0] = READ_BUFFER;
  1617. scsi->cdb[1] = 0x02;
  1618. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1619. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1620. sgd->byte_count = WR_BUF_SIZE;
  1621. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1622. memset(skspcl->data_buf, 0, sgd->byte_count);
  1623. break;
  1624. default:
  1625. SKD_ASSERT("Don't know what to send");
  1626. return;
  1627. }
  1628. skd_send_special_fitmsg(skdev, skspcl);
  1629. }
  1630. static void skd_refresh_device_data(struct skd_device *skdev)
  1631. {
  1632. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1633. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  1634. }
  1635. static int skd_chk_read_buf(struct skd_device *skdev,
  1636. struct skd_special_context *skspcl)
  1637. {
  1638. unsigned char *buf = skspcl->data_buf;
  1639. int i;
  1640. /* check for incrementing byte pattern */
  1641. for (i = 0; i < WR_BUF_SIZE; i++)
  1642. if (buf[i] != (i & 0xFF))
  1643. return 1;
  1644. return 0;
  1645. }
  1646. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  1647. u8 code, u8 qual, u8 fruc)
  1648. {
  1649. /* If the check condition is of special interest, log a message */
  1650. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  1651. && (code == 0x04) && (qual == 0x06)) {
  1652. pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
  1653. "ascq/fruc %02x/%02x/%02x/%02x\n",
  1654. skd_name(skdev), key, code, qual, fruc);
  1655. }
  1656. }
  1657. static void skd_complete_internal(struct skd_device *skdev,
  1658. volatile struct fit_completion_entry_v1
  1659. *skcomp,
  1660. volatile struct fit_comp_error_info *skerr,
  1661. struct skd_special_context *skspcl)
  1662. {
  1663. u8 *buf = skspcl->data_buf;
  1664. u8 status;
  1665. int i;
  1666. struct skd_scsi_request *scsi =
  1667. (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1668. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  1669. pr_debug("%s:%s:%d complete internal %x\n",
  1670. skdev->name, __func__, __LINE__, scsi->cdb[0]);
  1671. skspcl->req.completion = *skcomp;
  1672. skspcl->req.state = SKD_REQ_STATE_IDLE;
  1673. skspcl->req.id += SKD_ID_INCR;
  1674. status = skspcl->req.completion.status;
  1675. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  1676. skerr->qual, skerr->fruc);
  1677. switch (scsi->cdb[0]) {
  1678. case TEST_UNIT_READY:
  1679. if (status == SAM_STAT_GOOD)
  1680. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1681. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1682. (skerr->key == MEDIUM_ERROR))
  1683. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1684. else {
  1685. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1686. pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
  1687. skdev->name, __func__, __LINE__,
  1688. skdev->state);
  1689. return;
  1690. }
  1691. pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
  1692. skdev->name, __func__, __LINE__);
  1693. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1694. }
  1695. break;
  1696. case WRITE_BUFFER:
  1697. if (status == SAM_STAT_GOOD)
  1698. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  1699. else {
  1700. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1701. pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
  1702. skdev->name, __func__, __LINE__,
  1703. skdev->state);
  1704. return;
  1705. }
  1706. pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
  1707. skdev->name, __func__, __LINE__);
  1708. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1709. }
  1710. break;
  1711. case READ_BUFFER:
  1712. if (status == SAM_STAT_GOOD) {
  1713. if (skd_chk_read_buf(skdev, skspcl) == 0)
  1714. skd_send_internal_skspcl(skdev, skspcl,
  1715. READ_CAPACITY);
  1716. else {
  1717. pr_err(
  1718. "(%s):*** W/R Buffer mismatch %d ***\n",
  1719. skd_name(skdev), skdev->connect_retries);
  1720. if (skdev->connect_retries <
  1721. SKD_MAX_CONNECT_RETRIES) {
  1722. skdev->connect_retries++;
  1723. skd_soft_reset(skdev);
  1724. } else {
  1725. pr_err(
  1726. "(%s): W/R Buffer Connect Error\n",
  1727. skd_name(skdev));
  1728. return;
  1729. }
  1730. }
  1731. } else {
  1732. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1733. pr_debug("%s:%s:%d "
  1734. "read buffer failed, don't send anymore state 0x%x\n",
  1735. skdev->name, __func__, __LINE__,
  1736. skdev->state);
  1737. return;
  1738. }
  1739. pr_debug("%s:%s:%d "
  1740. "**** read buffer failed, retry skerr\n",
  1741. skdev->name, __func__, __LINE__);
  1742. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1743. }
  1744. break;
  1745. case READ_CAPACITY:
  1746. skdev->read_cap_is_valid = 0;
  1747. if (status == SAM_STAT_GOOD) {
  1748. skdev->read_cap_last_lba =
  1749. (buf[0] << 24) | (buf[1] << 16) |
  1750. (buf[2] << 8) | buf[3];
  1751. skdev->read_cap_blocksize =
  1752. (buf[4] << 24) | (buf[5] << 16) |
  1753. (buf[6] << 8) | buf[7];
  1754. pr_debug("%s:%s:%d last lba %d, bs %d\n",
  1755. skdev->name, __func__, __LINE__,
  1756. skdev->read_cap_last_lba,
  1757. skdev->read_cap_blocksize);
  1758. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1759. skdev->read_cap_is_valid = 1;
  1760. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1761. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1762. (skerr->key == MEDIUM_ERROR)) {
  1763. skdev->read_cap_last_lba = ~0;
  1764. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1765. pr_debug("%s:%s:%d "
  1766. "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
  1767. skdev->name, __func__, __LINE__);
  1768. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1769. } else {
  1770. pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
  1771. skdev->name, __func__, __LINE__);
  1772. skd_send_internal_skspcl(skdev, skspcl,
  1773. TEST_UNIT_READY);
  1774. }
  1775. break;
  1776. case INQUIRY:
  1777. skdev->inquiry_is_valid = 0;
  1778. if (status == SAM_STAT_GOOD) {
  1779. skdev->inquiry_is_valid = 1;
  1780. for (i = 0; i < 12; i++)
  1781. skdev->inq_serial_num[i] = buf[i + 4];
  1782. skdev->inq_serial_num[12] = 0;
  1783. }
  1784. if (skd_unquiesce_dev(skdev) < 0)
  1785. pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
  1786. skdev->name, __func__, __LINE__);
  1787. /* connection is complete */
  1788. skdev->connect_retries = 0;
  1789. break;
  1790. case SYNCHRONIZE_CACHE:
  1791. if (status == SAM_STAT_GOOD)
  1792. skdev->sync_done = 1;
  1793. else
  1794. skdev->sync_done = -1;
  1795. wake_up_interruptible(&skdev->waitq);
  1796. break;
  1797. default:
  1798. SKD_ASSERT("we didn't send this");
  1799. }
  1800. }
  1801. /*
  1802. *****************************************************************************
  1803. * FIT MESSAGES
  1804. *****************************************************************************
  1805. */
  1806. static void skd_send_fitmsg(struct skd_device *skdev,
  1807. struct skd_fitmsg_context *skmsg)
  1808. {
  1809. u64 qcmd;
  1810. struct fit_msg_hdr *fmh;
  1811. pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
  1812. skdev->name, __func__, __LINE__,
  1813. skmsg->mb_dma_address, skdev->in_flight);
  1814. pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
  1815. skdev->name, __func__, __LINE__,
  1816. skmsg->msg_buf, skmsg->offset);
  1817. qcmd = skmsg->mb_dma_address;
  1818. qcmd |= FIT_QCMD_QID_NORMAL;
  1819. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  1820. skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
  1821. if (unlikely(skdev->dbg_level > 1)) {
  1822. u8 *bp = (u8 *)skmsg->msg_buf;
  1823. int i;
  1824. for (i = 0; i < skmsg->length; i += 8) {
  1825. pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
  1826. "%02x %02x %02x %02x\n",
  1827. skdev->name, __func__, __LINE__,
  1828. i, bp[i + 0], bp[i + 1], bp[i + 2],
  1829. bp[i + 3], bp[i + 4], bp[i + 5],
  1830. bp[i + 6], bp[i + 7]);
  1831. if (i == 0)
  1832. i = 64 - 8;
  1833. }
  1834. }
  1835. if (skmsg->length > 256)
  1836. qcmd |= FIT_QCMD_MSGSIZE_512;
  1837. else if (skmsg->length > 128)
  1838. qcmd |= FIT_QCMD_MSGSIZE_256;
  1839. else if (skmsg->length > 64)
  1840. qcmd |= FIT_QCMD_MSGSIZE_128;
  1841. else
  1842. /*
  1843. * This makes no sense because the FIT msg header is
  1844. * 64 bytes. If the msg is only 64 bytes long it has
  1845. * no payload.
  1846. */
  1847. qcmd |= FIT_QCMD_MSGSIZE_64;
  1848. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1849. }
  1850. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1851. struct skd_special_context *skspcl)
  1852. {
  1853. u64 qcmd;
  1854. if (unlikely(skdev->dbg_level > 1)) {
  1855. u8 *bp = (u8 *)skspcl->msg_buf;
  1856. int i;
  1857. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1858. pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
  1859. "%02x %02x %02x %02x\n",
  1860. skdev->name, __func__, __LINE__, i,
  1861. bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
  1862. bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
  1863. if (i == 0)
  1864. i = 64 - 8;
  1865. }
  1866. pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
  1867. skdev->name, __func__, __LINE__,
  1868. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1869. skspcl->req.sksg_dma_address);
  1870. for (i = 0; i < skspcl->req.n_sg; i++) {
  1871. struct fit_sg_descriptor *sgd =
  1872. &skspcl->req.sksg_list[i];
  1873. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1874. "addr=0x%llx next=0x%llx\n",
  1875. skdev->name, __func__, __LINE__,
  1876. i, sgd->byte_count, sgd->control,
  1877. sgd->host_side_addr, sgd->next_desc_ptr);
  1878. }
  1879. }
  1880. /*
  1881. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1882. * and one 64-byte SSDI command.
  1883. */
  1884. qcmd = skspcl->mb_dma_address;
  1885. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1886. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1887. }
  1888. /*
  1889. *****************************************************************************
  1890. * COMPLETION QUEUE
  1891. *****************************************************************************
  1892. */
  1893. static void skd_complete_other(struct skd_device *skdev,
  1894. volatile struct fit_completion_entry_v1 *skcomp,
  1895. volatile struct fit_comp_error_info *skerr);
  1896. struct sns_info {
  1897. u8 type;
  1898. u8 stat;
  1899. u8 key;
  1900. u8 asc;
  1901. u8 ascq;
  1902. u8 mask;
  1903. enum skd_check_status_action action;
  1904. };
  1905. static struct sns_info skd_chkstat_table[] = {
  1906. /* Good */
  1907. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1908. SKD_CHECK_STATUS_REPORT_GOOD },
  1909. /* Smart alerts */
  1910. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1911. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1912. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1913. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1914. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1915. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1916. /* Retry (with limits) */
  1917. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1918. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1919. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1920. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1921. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1922. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1923. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1924. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1925. /* Busy (or about to be) */
  1926. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1927. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1928. };
  1929. /*
  1930. * Look up status and sense data to decide how to handle the error
  1931. * from the device.
  1932. * mask says which fields must match e.g., mask=0x18 means check
  1933. * type and stat, ignore key, asc, ascq.
  1934. */
  1935. static enum skd_check_status_action
  1936. skd_check_status(struct skd_device *skdev,
  1937. u8 cmp_status, volatile struct fit_comp_error_info *skerr)
  1938. {
  1939. int i, n;
  1940. pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1941. skd_name(skdev), skerr->key, skerr->code, skerr->qual,
  1942. skerr->fruc);
  1943. pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1944. skdev->name, __func__, __LINE__, skerr->type, cmp_status,
  1945. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1946. /* Does the info match an entry in the good category? */
  1947. n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
  1948. for (i = 0; i < n; i++) {
  1949. struct sns_info *sns = &skd_chkstat_table[i];
  1950. if (sns->mask & 0x10)
  1951. if (skerr->type != sns->type)
  1952. continue;
  1953. if (sns->mask & 0x08)
  1954. if (cmp_status != sns->stat)
  1955. continue;
  1956. if (sns->mask & 0x04)
  1957. if (skerr->key != sns->key)
  1958. continue;
  1959. if (sns->mask & 0x02)
  1960. if (skerr->code != sns->asc)
  1961. continue;
  1962. if (sns->mask & 0x01)
  1963. if (skerr->qual != sns->ascq)
  1964. continue;
  1965. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1966. pr_err("(%s): SMART Alert: sense key/asc/ascq "
  1967. "%02x/%02x/%02x\n",
  1968. skd_name(skdev), skerr->key,
  1969. skerr->code, skerr->qual);
  1970. }
  1971. return sns->action;
  1972. }
  1973. /* No other match, so nonzero status means error,
  1974. * zero status means good
  1975. */
  1976. if (cmp_status) {
  1977. pr_debug("%s:%s:%d status check: error\n",
  1978. skdev->name, __func__, __LINE__);
  1979. return SKD_CHECK_STATUS_REPORT_ERROR;
  1980. }
  1981. pr_debug("%s:%s:%d status check good default\n",
  1982. skdev->name, __func__, __LINE__);
  1983. return SKD_CHECK_STATUS_REPORT_GOOD;
  1984. }
  1985. static void skd_resolve_req_exception(struct skd_device *skdev,
  1986. struct skd_request_context *skreq)
  1987. {
  1988. u8 cmp_status = skreq->completion.status;
  1989. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1990. case SKD_CHECK_STATUS_REPORT_GOOD:
  1991. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1992. skd_end_request(skdev, skreq, 0);
  1993. break;
  1994. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1995. skd_log_skreq(skdev, skreq, "retry(busy)");
  1996. blk_requeue_request(skdev->queue, skreq->req);
  1997. pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
  1998. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1999. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  2000. skd_quiesce_dev(skdev);
  2001. break;
  2002. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  2003. if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
  2004. skd_log_skreq(skdev, skreq, "retry");
  2005. blk_requeue_request(skdev->queue, skreq->req);
  2006. break;
  2007. }
  2008. /* fall through to report error */
  2009. case SKD_CHECK_STATUS_REPORT_ERROR:
  2010. default:
  2011. skd_end_request(skdev, skreq, -EIO);
  2012. break;
  2013. }
  2014. }
  2015. /* assume spinlock is already held */
  2016. static void skd_release_skreq(struct skd_device *skdev,
  2017. struct skd_request_context *skreq)
  2018. {
  2019. u32 msg_slot;
  2020. struct skd_fitmsg_context *skmsg;
  2021. u32 timo_slot;
  2022. /*
  2023. * Reclaim the FIT msg buffer if this is
  2024. * the first of the requests it carried to
  2025. * be completed. The FIT msg buffer used to
  2026. * send this request cannot be reused until
  2027. * we are sure the s1120 card has copied
  2028. * it to its memory. The FIT msg might have
  2029. * contained several requests. As soon as
  2030. * any of them are completed we know that
  2031. * the entire FIT msg was transferred.
  2032. * Only the first completed request will
  2033. * match the FIT msg buffer id. The FIT
  2034. * msg buffer id is immediately updated.
  2035. * When subsequent requests complete the FIT
  2036. * msg buffer id won't match, so we know
  2037. * quite cheaply that it is already done.
  2038. */
  2039. msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
  2040. SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
  2041. skmsg = &skdev->skmsg_table[msg_slot];
  2042. if (skmsg->id == skreq->fitmsg_id) {
  2043. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
  2044. SKD_ASSERT(skmsg->outstanding > 0);
  2045. skmsg->outstanding--;
  2046. if (skmsg->outstanding == 0) {
  2047. skmsg->state = SKD_MSG_STATE_IDLE;
  2048. skmsg->id += SKD_ID_INCR;
  2049. skmsg->next = skdev->skmsg_free_list;
  2050. skdev->skmsg_free_list = skmsg;
  2051. }
  2052. }
  2053. /*
  2054. * Decrease the number of active requests.
  2055. * Also decrements the count in the timeout slot.
  2056. */
  2057. SKD_ASSERT(skdev->in_flight > 0);
  2058. skdev->in_flight -= 1;
  2059. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  2060. SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
  2061. skdev->timeout_slot[timo_slot] -= 1;
  2062. /*
  2063. * Reset backpointer
  2064. */
  2065. skreq->req = NULL;
  2066. /*
  2067. * Reclaim the skd_request_context
  2068. */
  2069. skreq->state = SKD_REQ_STATE_IDLE;
  2070. skreq->id += SKD_ID_INCR;
  2071. skreq->next = skdev->skreq_free_list;
  2072. skdev->skreq_free_list = skreq;
  2073. }
  2074. #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
  2075. static void skd_do_inq_page_00(struct skd_device *skdev,
  2076. volatile struct fit_completion_entry_v1 *skcomp,
  2077. volatile struct fit_comp_error_info *skerr,
  2078. uint8_t *cdb, uint8_t *buf)
  2079. {
  2080. uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
  2081. /* Caller requested "supported pages". The driver needs to insert
  2082. * its page.
  2083. */
  2084. pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
  2085. skdev->name, __func__, __LINE__);
  2086. /* If the device rejected the request because the CDB was
  2087. * improperly formed, then just leave.
  2088. */
  2089. if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
  2090. skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
  2091. return;
  2092. /* Get the amount of space the caller allocated */
  2093. max_bytes = (cdb[3] << 8) | cdb[4];
  2094. /* Get the number of pages actually returned by the device */
  2095. drive_pages = (buf[2] << 8) | buf[3];
  2096. drive_bytes = drive_pages + 4;
  2097. new_size = drive_pages + 1;
  2098. /* Supported pages must be in numerical order, so find where
  2099. * the driver page needs to be inserted into the list of
  2100. * pages returned by the device.
  2101. */
  2102. for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
  2103. if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
  2104. return; /* Device using this page code. abort */
  2105. else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
  2106. break;
  2107. }
  2108. if (insert_pt < max_bytes) {
  2109. uint16_t u;
  2110. /* Shift everything up one byte to make room. */
  2111. for (u = new_size + 3; u > insert_pt; u--)
  2112. buf[u] = buf[u - 1];
  2113. buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
  2114. /* SCSI byte order increment of num_returned_bytes by 1 */
  2115. skcomp->num_returned_bytes =
  2116. be32_to_cpu(skcomp->num_returned_bytes) + 1;
  2117. skcomp->num_returned_bytes =
  2118. be32_to_cpu(skcomp->num_returned_bytes);
  2119. }
  2120. /* update page length field to reflect the driver's page too */
  2121. buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
  2122. buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
  2123. }
  2124. static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
  2125. {
  2126. int pcie_reg;
  2127. u16 pci_bus_speed;
  2128. u8 pci_lanes;
  2129. pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  2130. if (pcie_reg) {
  2131. u16 linksta;
  2132. pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
  2133. pci_bus_speed = linksta & 0xF;
  2134. pci_lanes = (linksta & 0x3F0) >> 4;
  2135. } else {
  2136. *speed = STEC_LINK_UNKNOWN;
  2137. *width = 0xFF;
  2138. return;
  2139. }
  2140. switch (pci_bus_speed) {
  2141. case 1:
  2142. *speed = STEC_LINK_2_5GTS;
  2143. break;
  2144. case 2:
  2145. *speed = STEC_LINK_5GTS;
  2146. break;
  2147. case 3:
  2148. *speed = STEC_LINK_8GTS;
  2149. break;
  2150. default:
  2151. *speed = STEC_LINK_UNKNOWN;
  2152. break;
  2153. }
  2154. if (pci_lanes <= 0x20)
  2155. *width = pci_lanes;
  2156. else
  2157. *width = 0xFF;
  2158. }
  2159. static void skd_do_inq_page_da(struct skd_device *skdev,
  2160. volatile struct fit_completion_entry_v1 *skcomp,
  2161. volatile struct fit_comp_error_info *skerr,
  2162. uint8_t *cdb, uint8_t *buf)
  2163. {
  2164. struct pci_dev *pdev = skdev->pdev;
  2165. unsigned max_bytes;
  2166. struct driver_inquiry_data inq;
  2167. u16 val;
  2168. pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
  2169. skdev->name, __func__, __LINE__);
  2170. memset(&inq, 0, sizeof(inq));
  2171. inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
  2172. skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
  2173. inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
  2174. inq.pcie_device_number = PCI_SLOT(pdev->devfn);
  2175. inq.pcie_function_number = PCI_FUNC(pdev->devfn);
  2176. pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
  2177. inq.pcie_vendor_id = cpu_to_be16(val);
  2178. pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
  2179. inq.pcie_device_id = cpu_to_be16(val);
  2180. pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
  2181. inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
  2182. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
  2183. inq.pcie_subsystem_device_id = cpu_to_be16(val);
  2184. /* Driver version, fixed lenth, padded with spaces on the right */
  2185. inq.driver_version_length = sizeof(inq.driver_version);
  2186. memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
  2187. memcpy(inq.driver_version, DRV_VER_COMPL,
  2188. min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
  2189. inq.page_length = cpu_to_be16((sizeof(inq) - 4));
  2190. /* Clear the error set by the device */
  2191. skcomp->status = SAM_STAT_GOOD;
  2192. memset((void *)skerr, 0, sizeof(*skerr));
  2193. /* copy response into output buffer */
  2194. max_bytes = (cdb[3] << 8) | cdb[4];
  2195. memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
  2196. skcomp->num_returned_bytes =
  2197. be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
  2198. }
  2199. static void skd_do_driver_inq(struct skd_device *skdev,
  2200. volatile struct fit_completion_entry_v1 *skcomp,
  2201. volatile struct fit_comp_error_info *skerr,
  2202. uint8_t *cdb, uint8_t *buf)
  2203. {
  2204. if (!buf)
  2205. return;
  2206. else if (cdb[0] != INQUIRY)
  2207. return; /* Not an INQUIRY */
  2208. else if ((cdb[1] & 1) == 0)
  2209. return; /* EVPD not set */
  2210. else if (cdb[2] == 0)
  2211. /* Need to add driver's page to supported pages list */
  2212. skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
  2213. else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
  2214. /* Caller requested driver's page */
  2215. skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
  2216. }
  2217. static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
  2218. {
  2219. if (!sg)
  2220. return NULL;
  2221. if (!sg_page(sg))
  2222. return NULL;
  2223. return sg_virt(sg);
  2224. }
  2225. static void skd_process_scsi_inq(struct skd_device *skdev,
  2226. volatile struct fit_completion_entry_v1
  2227. *skcomp,
  2228. volatile struct fit_comp_error_info *skerr,
  2229. struct skd_special_context *skspcl)
  2230. {
  2231. uint8_t *buf;
  2232. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  2233. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  2234. dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
  2235. skspcl->req.sg_data_dir);
  2236. buf = skd_sg_1st_page_ptr(skspcl->req.sg);
  2237. if (buf)
  2238. skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
  2239. }
  2240. static int skd_isr_completion_posted(struct skd_device *skdev,
  2241. int limit, int *enqueued)
  2242. {
  2243. volatile struct fit_completion_entry_v1 *skcmp = NULL;
  2244. volatile struct fit_comp_error_info *skerr;
  2245. u16 req_id;
  2246. u32 req_slot;
  2247. struct skd_request_context *skreq;
  2248. u16 cmp_cntxt = 0;
  2249. u8 cmp_status = 0;
  2250. u8 cmp_cycle = 0;
  2251. u32 cmp_bytes = 0;
  2252. int rc = 0;
  2253. int processed = 0;
  2254. for (;; ) {
  2255. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  2256. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  2257. cmp_cycle = skcmp->cycle;
  2258. cmp_cntxt = skcmp->tag;
  2259. cmp_status = skcmp->status;
  2260. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  2261. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  2262. pr_debug("%s:%s:%d "
  2263. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
  2264. "busy=%d rbytes=0x%x proto=%d\n",
  2265. skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
  2266. skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
  2267. skdev->in_flight, cmp_bytes, skdev->proto_ver);
  2268. if (cmp_cycle != skdev->skcomp_cycle) {
  2269. pr_debug("%s:%s:%d end of completions\n",
  2270. skdev->name, __func__, __LINE__);
  2271. break;
  2272. }
  2273. /*
  2274. * Update the completion queue head index and possibly
  2275. * the completion cycle count. 8-bit wrap-around.
  2276. */
  2277. skdev->skcomp_ix++;
  2278. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  2279. skdev->skcomp_ix = 0;
  2280. skdev->skcomp_cycle++;
  2281. }
  2282. /*
  2283. * The command context is a unique 32-bit ID. The low order
  2284. * bits help locate the request. The request is usually a
  2285. * r/w request (see skd_start() above) or a special request.
  2286. */
  2287. req_id = cmp_cntxt;
  2288. req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  2289. /* Is this other than a r/w request? */
  2290. if (req_slot >= skdev->num_req_context) {
  2291. /*
  2292. * This is not a completion for a r/w request.
  2293. */
  2294. skd_complete_other(skdev, skcmp, skerr);
  2295. continue;
  2296. }
  2297. skreq = &skdev->skreq_table[req_slot];
  2298. /*
  2299. * Make sure the request ID for the slot matches.
  2300. */
  2301. if (skreq->id != req_id) {
  2302. pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
  2303. skdev->name, __func__, __LINE__,
  2304. req_id, skreq->id);
  2305. {
  2306. u16 new_id = cmp_cntxt;
  2307. pr_err("(%s): Completion mismatch "
  2308. "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  2309. skd_name(skdev), req_id,
  2310. skreq->id, new_id);
  2311. continue;
  2312. }
  2313. }
  2314. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  2315. if (skreq->state == SKD_REQ_STATE_ABORTED) {
  2316. pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
  2317. skdev->name, __func__, __LINE__,
  2318. skreq, skreq->id);
  2319. /* a previously timed out command can
  2320. * now be cleaned up */
  2321. skd_release_skreq(skdev, skreq);
  2322. continue;
  2323. }
  2324. skreq->completion = *skcmp;
  2325. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  2326. skreq->err_info = *skerr;
  2327. skd_log_check_status(skdev, cmp_status, skerr->key,
  2328. skerr->code, skerr->qual,
  2329. skerr->fruc);
  2330. }
  2331. /* Release DMA resources for the request. */
  2332. if (skreq->n_sg > 0)
  2333. skd_postop_sg_list(skdev, skreq);
  2334. if (!skreq->req) {
  2335. pr_debug("%s:%s:%d NULL backptr skdreq %p, "
  2336. "req=0x%x req_id=0x%x\n",
  2337. skdev->name, __func__, __LINE__,
  2338. skreq, skreq->id, req_id);
  2339. } else {
  2340. /*
  2341. * Capture the outcome and post it back to the
  2342. * native request.
  2343. */
  2344. if (likely(cmp_status == SAM_STAT_GOOD))
  2345. skd_end_request(skdev, skreq, 0);
  2346. else
  2347. skd_resolve_req_exception(skdev, skreq);
  2348. }
  2349. /*
  2350. * Release the skreq, its FIT msg (if one), timeout slot,
  2351. * and queue depth.
  2352. */
  2353. skd_release_skreq(skdev, skreq);
  2354. /* skd_isr_comp_limit equal zero means no limit */
  2355. if (limit) {
  2356. if (++processed >= limit) {
  2357. rc = 1;
  2358. break;
  2359. }
  2360. }
  2361. }
  2362. if ((skdev->state == SKD_DRVR_STATE_PAUSING)
  2363. && (skdev->in_flight) == 0) {
  2364. skdev->state = SKD_DRVR_STATE_PAUSED;
  2365. wake_up_interruptible(&skdev->waitq);
  2366. }
  2367. return rc;
  2368. }
  2369. static void skd_complete_other(struct skd_device *skdev,
  2370. volatile struct fit_completion_entry_v1 *skcomp,
  2371. volatile struct fit_comp_error_info *skerr)
  2372. {
  2373. u32 req_id = 0;
  2374. u32 req_table;
  2375. u32 req_slot;
  2376. struct skd_special_context *skspcl;
  2377. req_id = skcomp->tag;
  2378. req_table = req_id & SKD_ID_TABLE_MASK;
  2379. req_slot = req_id & SKD_ID_SLOT_MASK;
  2380. pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
  2381. skdev->name, __func__, __LINE__,
  2382. req_table, req_id, req_slot);
  2383. /*
  2384. * Based on the request id, determine how to dispatch this completion.
  2385. * This swich/case is finding the good cases and forwarding the
  2386. * completion entry. Errors are reported below the switch.
  2387. */
  2388. switch (req_table) {
  2389. case SKD_ID_RW_REQUEST:
  2390. /*
  2391. * The caller, skd_completion_posted_isr() above,
  2392. * handles r/w requests. The only way we get here
  2393. * is if the req_slot is out of bounds.
  2394. */
  2395. break;
  2396. case SKD_ID_SPECIAL_REQUEST:
  2397. /*
  2398. * Make sure the req_slot is in bounds and that the id
  2399. * matches.
  2400. */
  2401. if (req_slot < skdev->n_special) {
  2402. skspcl = &skdev->skspcl_table[req_slot];
  2403. if (skspcl->req.id == req_id &&
  2404. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2405. skd_complete_special(skdev,
  2406. skcomp, skerr, skspcl);
  2407. return;
  2408. }
  2409. }
  2410. break;
  2411. case SKD_ID_INTERNAL:
  2412. if (req_slot == 0) {
  2413. skspcl = &skdev->internal_skspcl;
  2414. if (skspcl->req.id == req_id &&
  2415. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2416. skd_complete_internal(skdev,
  2417. skcomp, skerr, skspcl);
  2418. return;
  2419. }
  2420. }
  2421. break;
  2422. case SKD_ID_FIT_MSG:
  2423. /*
  2424. * These id's should never appear in a completion record.
  2425. */
  2426. break;
  2427. default:
  2428. /*
  2429. * These id's should never appear anywhere;
  2430. */
  2431. break;
  2432. }
  2433. /*
  2434. * If we get here it is a bad or stale id.
  2435. */
  2436. }
  2437. static void skd_complete_special(struct skd_device *skdev,
  2438. volatile struct fit_completion_entry_v1
  2439. *skcomp,
  2440. volatile struct fit_comp_error_info *skerr,
  2441. struct skd_special_context *skspcl)
  2442. {
  2443. pr_debug("%s:%s:%d completing special request %p\n",
  2444. skdev->name, __func__, __LINE__, skspcl);
  2445. if (skspcl->orphaned) {
  2446. /* Discard orphaned request */
  2447. /* ?: Can this release directly or does it need
  2448. * to use a worker? */
  2449. pr_debug("%s:%s:%d release orphaned %p\n",
  2450. skdev->name, __func__, __LINE__, skspcl);
  2451. skd_release_special(skdev, skspcl);
  2452. return;
  2453. }
  2454. skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
  2455. skspcl->req.state = SKD_REQ_STATE_COMPLETED;
  2456. skspcl->req.completion = *skcomp;
  2457. skspcl->req.err_info = *skerr;
  2458. skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
  2459. skerr->code, skerr->qual, skerr->fruc);
  2460. wake_up_interruptible(&skdev->waitq);
  2461. }
  2462. /* assume spinlock is already held */
  2463. static void skd_release_special(struct skd_device *skdev,
  2464. struct skd_special_context *skspcl)
  2465. {
  2466. int i, was_depleted;
  2467. for (i = 0; i < skspcl->req.n_sg; i++) {
  2468. struct page *page = sg_page(&skspcl->req.sg[i]);
  2469. __free_page(page);
  2470. }
  2471. was_depleted = (skdev->skspcl_free_list == NULL);
  2472. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2473. skspcl->req.id += SKD_ID_INCR;
  2474. skspcl->req.next =
  2475. (struct skd_request_context *)skdev->skspcl_free_list;
  2476. skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
  2477. if (was_depleted) {
  2478. pr_debug("%s:%s:%d skspcl was depleted\n",
  2479. skdev->name, __func__, __LINE__);
  2480. /* Free list was depleted. Their might be waiters. */
  2481. wake_up_interruptible(&skdev->waitq);
  2482. }
  2483. }
  2484. static void skd_reset_skcomp(struct skd_device *skdev)
  2485. {
  2486. u32 nbytes;
  2487. struct fit_completion_entry_v1 *skcomp;
  2488. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  2489. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  2490. memset(skdev->skcomp_table, 0, nbytes);
  2491. skdev->skcomp_ix = 0;
  2492. skdev->skcomp_cycle = 1;
  2493. }
  2494. /*
  2495. *****************************************************************************
  2496. * INTERRUPTS
  2497. *****************************************************************************
  2498. */
  2499. static void skd_completion_worker(struct work_struct *work)
  2500. {
  2501. struct skd_device *skdev =
  2502. container_of(work, struct skd_device, completion_worker);
  2503. unsigned long flags;
  2504. int flush_enqueued = 0;
  2505. spin_lock_irqsave(&skdev->lock, flags);
  2506. /*
  2507. * pass in limit=0, which means no limit..
  2508. * process everything in compq
  2509. */
  2510. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  2511. skd_request_fn(skdev->queue);
  2512. spin_unlock_irqrestore(&skdev->lock, flags);
  2513. }
  2514. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  2515. irqreturn_t
  2516. static skd_isr(int irq, void *ptr)
  2517. {
  2518. struct skd_device *skdev;
  2519. u32 intstat;
  2520. u32 ack;
  2521. int rc = 0;
  2522. int deferred = 0;
  2523. int flush_enqueued = 0;
  2524. skdev = (struct skd_device *)ptr;
  2525. spin_lock(&skdev->lock);
  2526. for (;; ) {
  2527. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2528. ack = FIT_INT_DEF_MASK;
  2529. ack &= intstat;
  2530. pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
  2531. skdev->name, __func__, __LINE__, intstat, ack);
  2532. /* As long as there is an int pending on device, keep
  2533. * running loop. When none, get out, but if we've never
  2534. * done any processing, call completion handler?
  2535. */
  2536. if (ack == 0) {
  2537. /* No interrupts on device, but run the completion
  2538. * processor anyway?
  2539. */
  2540. if (rc == 0)
  2541. if (likely (skdev->state
  2542. == SKD_DRVR_STATE_ONLINE))
  2543. deferred = 1;
  2544. break;
  2545. }
  2546. rc = IRQ_HANDLED;
  2547. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  2548. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  2549. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  2550. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  2551. /*
  2552. * If we have already deferred completion
  2553. * processing, don't bother running it again
  2554. */
  2555. if (deferred == 0)
  2556. deferred =
  2557. skd_isr_completion_posted(skdev,
  2558. skd_isr_comp_limit, &flush_enqueued);
  2559. }
  2560. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  2561. skd_isr_fwstate(skdev);
  2562. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  2563. skdev->state ==
  2564. SKD_DRVR_STATE_DISAPPEARED) {
  2565. spin_unlock(&skdev->lock);
  2566. return rc;
  2567. }
  2568. }
  2569. if (intstat & FIT_ISH_MSG_FROM_DEV)
  2570. skd_isr_msg_from_dev(skdev);
  2571. }
  2572. }
  2573. if (unlikely(flush_enqueued))
  2574. skd_request_fn(skdev->queue);
  2575. if (deferred)
  2576. schedule_work(&skdev->completion_worker);
  2577. else if (!flush_enqueued)
  2578. skd_request_fn(skdev->queue);
  2579. spin_unlock(&skdev->lock);
  2580. return rc;
  2581. }
  2582. static void skd_drive_fault(struct skd_device *skdev)
  2583. {
  2584. skdev->state = SKD_DRVR_STATE_FAULT;
  2585. pr_err("(%s): Drive FAULT\n", skd_name(skdev));
  2586. }
  2587. static void skd_drive_disappeared(struct skd_device *skdev)
  2588. {
  2589. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  2590. pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
  2591. }
  2592. static void skd_isr_fwstate(struct skd_device *skdev)
  2593. {
  2594. u32 sense;
  2595. u32 state;
  2596. u32 mtd;
  2597. int prev_driver_state = skdev->state;
  2598. sense = SKD_READL(skdev, FIT_STATUS);
  2599. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2600. pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
  2601. skd_name(skdev),
  2602. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2603. skd_drive_state_to_str(state), state);
  2604. skdev->drive_state = state;
  2605. switch (skdev->drive_state) {
  2606. case FIT_SR_DRIVE_INIT:
  2607. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  2608. skd_disable_interrupts(skdev);
  2609. break;
  2610. }
  2611. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  2612. skd_recover_requests(skdev, 0);
  2613. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  2614. skdev->timer_countdown = SKD_STARTING_TIMO;
  2615. skdev->state = SKD_DRVR_STATE_STARTING;
  2616. skd_soft_reset(skdev);
  2617. break;
  2618. }
  2619. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  2620. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2621. skdev->last_mtd = mtd;
  2622. break;
  2623. case FIT_SR_DRIVE_ONLINE:
  2624. skdev->cur_max_queue_depth = skd_max_queue_depth;
  2625. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  2626. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  2627. skdev->queue_low_water_mark =
  2628. skdev->cur_max_queue_depth * 2 / 3 + 1;
  2629. if (skdev->queue_low_water_mark < 1)
  2630. skdev->queue_low_water_mark = 1;
  2631. pr_info(
  2632. "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
  2633. skd_name(skdev),
  2634. skdev->cur_max_queue_depth,
  2635. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2636. skd_refresh_device_data(skdev);
  2637. break;
  2638. case FIT_SR_DRIVE_BUSY:
  2639. skdev->state = SKD_DRVR_STATE_BUSY;
  2640. skdev->timer_countdown = SKD_BUSY_TIMO;
  2641. skd_quiesce_dev(skdev);
  2642. break;
  2643. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2644. /* set timer for 3 seconds, we'll abort any unfinished
  2645. * commands after that expires
  2646. */
  2647. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2648. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  2649. blk_start_queue(skdev->queue);
  2650. break;
  2651. case FIT_SR_DRIVE_BUSY_ERASE:
  2652. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2653. skdev->timer_countdown = SKD_BUSY_TIMO;
  2654. break;
  2655. case FIT_SR_DRIVE_OFFLINE:
  2656. skdev->state = SKD_DRVR_STATE_IDLE;
  2657. break;
  2658. case FIT_SR_DRIVE_SOFT_RESET:
  2659. switch (skdev->state) {
  2660. case SKD_DRVR_STATE_STARTING:
  2661. case SKD_DRVR_STATE_RESTARTING:
  2662. /* Expected by a caller of skd_soft_reset() */
  2663. break;
  2664. default:
  2665. skdev->state = SKD_DRVR_STATE_RESTARTING;
  2666. break;
  2667. }
  2668. break;
  2669. case FIT_SR_DRIVE_FW_BOOTING:
  2670. pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
  2671. skdev->name, __func__, __LINE__, skdev->name);
  2672. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2673. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2674. break;
  2675. case FIT_SR_DRIVE_DEGRADED:
  2676. case FIT_SR_PCIE_LINK_DOWN:
  2677. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2678. break;
  2679. case FIT_SR_DRIVE_FAULT:
  2680. skd_drive_fault(skdev);
  2681. skd_recover_requests(skdev, 0);
  2682. blk_start_queue(skdev->queue);
  2683. break;
  2684. /* PCIe bus returned all Fs? */
  2685. case 0xFF:
  2686. pr_info("(%s): state=0x%x sense=0x%x\n",
  2687. skd_name(skdev), state, sense);
  2688. skd_drive_disappeared(skdev);
  2689. skd_recover_requests(skdev, 0);
  2690. blk_start_queue(skdev->queue);
  2691. break;
  2692. default:
  2693. /*
  2694. * Uknown FW State. Wait for a state we recognize.
  2695. */
  2696. break;
  2697. }
  2698. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  2699. skd_name(skdev),
  2700. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  2701. skd_skdev_state_to_str(skdev->state), skdev->state);
  2702. }
  2703. static void skd_recover_requests(struct skd_device *skdev, int requeue)
  2704. {
  2705. int i;
  2706. for (i = 0; i < skdev->num_req_context; i++) {
  2707. struct skd_request_context *skreq = &skdev->skreq_table[i];
  2708. if (skreq->state == SKD_REQ_STATE_BUSY) {
  2709. skd_log_skreq(skdev, skreq, "recover");
  2710. SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
  2711. SKD_ASSERT(skreq->req != NULL);
  2712. /* Release DMA resources for the request. */
  2713. if (skreq->n_sg > 0)
  2714. skd_postop_sg_list(skdev, skreq);
  2715. if (requeue &&
  2716. (unsigned long) ++skreq->req->special <
  2717. SKD_MAX_RETRIES)
  2718. blk_requeue_request(skdev->queue, skreq->req);
  2719. else
  2720. skd_end_request(skdev, skreq, -EIO);
  2721. skreq->req = NULL;
  2722. skreq->state = SKD_REQ_STATE_IDLE;
  2723. skreq->id += SKD_ID_INCR;
  2724. }
  2725. if (i > 0)
  2726. skreq[-1].next = skreq;
  2727. skreq->next = NULL;
  2728. }
  2729. skdev->skreq_free_list = skdev->skreq_table;
  2730. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2731. struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
  2732. if (skmsg->state == SKD_MSG_STATE_BUSY) {
  2733. skd_log_skmsg(skdev, skmsg, "salvaged");
  2734. SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
  2735. skmsg->state = SKD_MSG_STATE_IDLE;
  2736. skmsg->id += SKD_ID_INCR;
  2737. }
  2738. if (i > 0)
  2739. skmsg[-1].next = skmsg;
  2740. skmsg->next = NULL;
  2741. }
  2742. skdev->skmsg_free_list = skdev->skmsg_table;
  2743. for (i = 0; i < skdev->n_special; i++) {
  2744. struct skd_special_context *skspcl = &skdev->skspcl_table[i];
  2745. /* If orphaned, reclaim it because it has already been reported
  2746. * to the process as an error (it was just waiting for
  2747. * a completion that didn't come, and now it will never come)
  2748. * If busy, change to a state that will cause it to error
  2749. * out in the wait routine and let it do the normal
  2750. * reporting and reclaiming
  2751. */
  2752. if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2753. if (skspcl->orphaned) {
  2754. pr_debug("%s:%s:%d orphaned %p\n",
  2755. skdev->name, __func__, __LINE__,
  2756. skspcl);
  2757. skd_release_special(skdev, skspcl);
  2758. } else {
  2759. pr_debug("%s:%s:%d not orphaned %p\n",
  2760. skdev->name, __func__, __LINE__,
  2761. skspcl);
  2762. skspcl->req.state = SKD_REQ_STATE_ABORTED;
  2763. }
  2764. }
  2765. }
  2766. skdev->skspcl_free_list = skdev->skspcl_table;
  2767. for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
  2768. skdev->timeout_slot[i] = 0;
  2769. skdev->in_flight = 0;
  2770. }
  2771. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  2772. {
  2773. u32 mfd;
  2774. u32 mtd;
  2775. u32 data;
  2776. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2777. pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
  2778. skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
  2779. /* ignore any mtd that is an ack for something we didn't send */
  2780. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  2781. return;
  2782. switch (FIT_MXD_TYPE(mfd)) {
  2783. case FIT_MTD_FITFW_INIT:
  2784. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  2785. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  2786. pr_err("(%s): protocol mismatch\n",
  2787. skdev->name);
  2788. pr_err("(%s): got=%d support=%d\n",
  2789. skdev->name, skdev->proto_ver,
  2790. FIT_PROTOCOL_VERSION_1);
  2791. pr_err("(%s): please upgrade driver\n",
  2792. skdev->name);
  2793. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  2794. skd_soft_reset(skdev);
  2795. break;
  2796. }
  2797. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  2798. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2799. skdev->last_mtd = mtd;
  2800. break;
  2801. case FIT_MTD_GET_CMDQ_DEPTH:
  2802. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  2803. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  2804. SKD_N_COMPLETION_ENTRY);
  2805. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2806. skdev->last_mtd = mtd;
  2807. break;
  2808. case FIT_MTD_SET_COMPQ_DEPTH:
  2809. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  2810. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  2811. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2812. skdev->last_mtd = mtd;
  2813. break;
  2814. case FIT_MTD_SET_COMPQ_ADDR:
  2815. skd_reset_skcomp(skdev);
  2816. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  2817. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2818. skdev->last_mtd = mtd;
  2819. break;
  2820. case FIT_MTD_CMD_LOG_HOST_ID:
  2821. skdev->connect_time_stamp = get_seconds();
  2822. data = skdev->connect_time_stamp & 0xFFFF;
  2823. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  2824. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2825. skdev->last_mtd = mtd;
  2826. break;
  2827. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  2828. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  2829. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  2830. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  2831. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2832. skdev->last_mtd = mtd;
  2833. break;
  2834. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  2835. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  2836. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  2837. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2838. skdev->last_mtd = mtd;
  2839. pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
  2840. skd_name(skdev),
  2841. skdev->connect_time_stamp, skdev->drive_jiffies);
  2842. break;
  2843. case FIT_MTD_ARM_QUEUE:
  2844. skdev->last_mtd = 0;
  2845. /*
  2846. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  2847. */
  2848. break;
  2849. default:
  2850. break;
  2851. }
  2852. }
  2853. static void skd_disable_interrupts(struct skd_device *skdev)
  2854. {
  2855. u32 sense;
  2856. sense = SKD_READL(skdev, FIT_CONTROL);
  2857. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  2858. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  2859. pr_debug("%s:%s:%d sense 0x%x\n",
  2860. skdev->name, __func__, __LINE__, sense);
  2861. /* Note that the 1s is written. A 1-bit means
  2862. * disable, a 0 means enable.
  2863. */
  2864. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  2865. }
  2866. static void skd_enable_interrupts(struct skd_device *skdev)
  2867. {
  2868. u32 val;
  2869. /* unmask interrupts first */
  2870. val = FIT_ISH_FW_STATE_CHANGE +
  2871. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  2872. /* Note that the compliment of mask is written. A 1-bit means
  2873. * disable, a 0 means enable. */
  2874. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  2875. pr_debug("%s:%s:%d interrupt mask=0x%x\n",
  2876. skdev->name, __func__, __LINE__, ~val);
  2877. val = SKD_READL(skdev, FIT_CONTROL);
  2878. val |= FIT_CR_ENABLE_INTERRUPTS;
  2879. pr_debug("%s:%s:%d control=0x%x\n",
  2880. skdev->name, __func__, __LINE__, val);
  2881. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2882. }
  2883. /*
  2884. *****************************************************************************
  2885. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  2886. *****************************************************************************
  2887. */
  2888. static void skd_soft_reset(struct skd_device *skdev)
  2889. {
  2890. u32 val;
  2891. val = SKD_READL(skdev, FIT_CONTROL);
  2892. val |= (FIT_CR_SOFT_RESET);
  2893. pr_debug("%s:%s:%d control=0x%x\n",
  2894. skdev->name, __func__, __LINE__, val);
  2895. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2896. }
  2897. static void skd_start_device(struct skd_device *skdev)
  2898. {
  2899. unsigned long flags;
  2900. u32 sense;
  2901. u32 state;
  2902. spin_lock_irqsave(&skdev->lock, flags);
  2903. /* ack all ghost interrupts */
  2904. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2905. sense = SKD_READL(skdev, FIT_STATUS);
  2906. pr_debug("%s:%s:%d initial status=0x%x\n",
  2907. skdev->name, __func__, __LINE__, sense);
  2908. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2909. skdev->drive_state = state;
  2910. skdev->last_mtd = 0;
  2911. skdev->state = SKD_DRVR_STATE_STARTING;
  2912. skdev->timer_countdown = SKD_STARTING_TIMO;
  2913. skd_enable_interrupts(skdev);
  2914. switch (skdev->drive_state) {
  2915. case FIT_SR_DRIVE_OFFLINE:
  2916. pr_err("(%s): Drive offline...\n", skd_name(skdev));
  2917. break;
  2918. case FIT_SR_DRIVE_FW_BOOTING:
  2919. pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
  2920. skdev->name, __func__, __LINE__, skdev->name);
  2921. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2922. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2923. break;
  2924. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2925. pr_info("(%s): Start: BUSY_SANITIZE\n",
  2926. skd_name(skdev));
  2927. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2928. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2929. break;
  2930. case FIT_SR_DRIVE_BUSY_ERASE:
  2931. pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
  2932. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2933. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2934. break;
  2935. case FIT_SR_DRIVE_INIT:
  2936. case FIT_SR_DRIVE_ONLINE:
  2937. skd_soft_reset(skdev);
  2938. break;
  2939. case FIT_SR_DRIVE_BUSY:
  2940. pr_err("(%s): Drive Busy...\n", skd_name(skdev));
  2941. skdev->state = SKD_DRVR_STATE_BUSY;
  2942. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2943. break;
  2944. case FIT_SR_DRIVE_SOFT_RESET:
  2945. pr_err("(%s) drive soft reset in prog\n",
  2946. skd_name(skdev));
  2947. break;
  2948. case FIT_SR_DRIVE_FAULT:
  2949. /* Fault state is bad...soft reset won't do it...
  2950. * Hard reset, maybe, but does it work on device?
  2951. * For now, just fault so the system doesn't hang.
  2952. */
  2953. skd_drive_fault(skdev);
  2954. /*start the queue so we can respond with error to requests */
  2955. pr_debug("%s:%s:%d starting %s queue\n",
  2956. skdev->name, __func__, __LINE__, skdev->name);
  2957. blk_start_queue(skdev->queue);
  2958. skdev->gendisk_on = -1;
  2959. wake_up_interruptible(&skdev->waitq);
  2960. break;
  2961. case 0xFF:
  2962. /* Most likely the device isn't there or isn't responding
  2963. * to the BAR1 addresses. */
  2964. skd_drive_disappeared(skdev);
  2965. /*start the queue so we can respond with error to requests */
  2966. pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
  2967. skdev->name, __func__, __LINE__, skdev->name);
  2968. blk_start_queue(skdev->queue);
  2969. skdev->gendisk_on = -1;
  2970. wake_up_interruptible(&skdev->waitq);
  2971. break;
  2972. default:
  2973. pr_err("(%s) Start: unknown state %x\n",
  2974. skd_name(skdev), skdev->drive_state);
  2975. break;
  2976. }
  2977. state = SKD_READL(skdev, FIT_CONTROL);
  2978. pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
  2979. skdev->name, __func__, __LINE__, state);
  2980. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2981. pr_debug("%s:%s:%d Intr Status=0x%x\n",
  2982. skdev->name, __func__, __LINE__, state);
  2983. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  2984. pr_debug("%s:%s:%d Intr Mask=0x%x\n",
  2985. skdev->name, __func__, __LINE__, state);
  2986. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2987. pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
  2988. skdev->name, __func__, __LINE__, state);
  2989. state = SKD_READL(skdev, FIT_HW_VERSION);
  2990. pr_debug("%s:%s:%d HW version=0x%x\n",
  2991. skdev->name, __func__, __LINE__, state);
  2992. spin_unlock_irqrestore(&skdev->lock, flags);
  2993. }
  2994. static void skd_stop_device(struct skd_device *skdev)
  2995. {
  2996. unsigned long flags;
  2997. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2998. u32 dev_state;
  2999. int i;
  3000. spin_lock_irqsave(&skdev->lock, flags);
  3001. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  3002. pr_err("(%s): skd_stop_device not online no sync\n",
  3003. skd_name(skdev));
  3004. goto stop_out;
  3005. }
  3006. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  3007. pr_err("(%s): skd_stop_device no special\n",
  3008. skd_name(skdev));
  3009. goto stop_out;
  3010. }
  3011. skdev->state = SKD_DRVR_STATE_SYNCING;
  3012. skdev->sync_done = 0;
  3013. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  3014. spin_unlock_irqrestore(&skdev->lock, flags);
  3015. wait_event_interruptible_timeout(skdev->waitq,
  3016. (skdev->sync_done), (10 * HZ));
  3017. spin_lock_irqsave(&skdev->lock, flags);
  3018. switch (skdev->sync_done) {
  3019. case 0:
  3020. pr_err("(%s): skd_stop_device no sync\n",
  3021. skd_name(skdev));
  3022. break;
  3023. case 1:
  3024. pr_err("(%s): skd_stop_device sync done\n",
  3025. skd_name(skdev));
  3026. break;
  3027. default:
  3028. pr_err("(%s): skd_stop_device sync error\n",
  3029. skd_name(skdev));
  3030. }
  3031. stop_out:
  3032. skdev->state = SKD_DRVR_STATE_STOPPING;
  3033. spin_unlock_irqrestore(&skdev->lock, flags);
  3034. skd_kill_timer(skdev);
  3035. spin_lock_irqsave(&skdev->lock, flags);
  3036. skd_disable_interrupts(skdev);
  3037. /* ensure all ints on device are cleared */
  3038. /* soft reset the device to unload with a clean slate */
  3039. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3040. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  3041. spin_unlock_irqrestore(&skdev->lock, flags);
  3042. /* poll every 100ms, 1 second timeout */
  3043. for (i = 0; i < 10; i++) {
  3044. dev_state =
  3045. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  3046. if (dev_state == FIT_SR_DRIVE_INIT)
  3047. break;
  3048. set_current_state(TASK_INTERRUPTIBLE);
  3049. schedule_timeout(msecs_to_jiffies(100));
  3050. }
  3051. if (dev_state != FIT_SR_DRIVE_INIT)
  3052. pr_err("(%s): skd_stop_device state error 0x%02x\n",
  3053. skd_name(skdev), dev_state);
  3054. }
  3055. /* assume spinlock is held */
  3056. static void skd_restart_device(struct skd_device *skdev)
  3057. {
  3058. u32 state;
  3059. /* ack all ghost interrupts */
  3060. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3061. state = SKD_READL(skdev, FIT_STATUS);
  3062. pr_debug("%s:%s:%d drive status=0x%x\n",
  3063. skdev->name, __func__, __LINE__, state);
  3064. state &= FIT_SR_DRIVE_STATE_MASK;
  3065. skdev->drive_state = state;
  3066. skdev->last_mtd = 0;
  3067. skdev->state = SKD_DRVR_STATE_RESTARTING;
  3068. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  3069. skd_soft_reset(skdev);
  3070. }
  3071. /* assume spinlock is held */
  3072. static int skd_quiesce_dev(struct skd_device *skdev)
  3073. {
  3074. int rc = 0;
  3075. switch (skdev->state) {
  3076. case SKD_DRVR_STATE_BUSY:
  3077. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3078. pr_debug("%s:%s:%d stopping %s queue\n",
  3079. skdev->name, __func__, __LINE__, skdev->name);
  3080. blk_stop_queue(skdev->queue);
  3081. break;
  3082. case SKD_DRVR_STATE_ONLINE:
  3083. case SKD_DRVR_STATE_STOPPING:
  3084. case SKD_DRVR_STATE_SYNCING:
  3085. case SKD_DRVR_STATE_PAUSING:
  3086. case SKD_DRVR_STATE_PAUSED:
  3087. case SKD_DRVR_STATE_STARTING:
  3088. case SKD_DRVR_STATE_RESTARTING:
  3089. case SKD_DRVR_STATE_RESUMING:
  3090. default:
  3091. rc = -EINVAL;
  3092. pr_debug("%s:%s:%d state [%d] not implemented\n",
  3093. skdev->name, __func__, __LINE__, skdev->state);
  3094. }
  3095. return rc;
  3096. }
  3097. /* assume spinlock is held */
  3098. static int skd_unquiesce_dev(struct skd_device *skdev)
  3099. {
  3100. int prev_driver_state = skdev->state;
  3101. skd_log_skdev(skdev, "unquiesce");
  3102. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  3103. pr_debug("%s:%s:%d **** device already ONLINE\n",
  3104. skdev->name, __func__, __LINE__);
  3105. return 0;
  3106. }
  3107. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  3108. /*
  3109. * If there has been an state change to other than
  3110. * ONLINE, we will rely on controller state change
  3111. * to come back online and restart the queue.
  3112. * The BUSY state means that driver is ready to
  3113. * continue normal processing but waiting for controller
  3114. * to become available.
  3115. */
  3116. skdev->state = SKD_DRVR_STATE_BUSY;
  3117. pr_debug("%s:%s:%d drive BUSY state\n",
  3118. skdev->name, __func__, __LINE__);
  3119. return 0;
  3120. }
  3121. /*
  3122. * Drive has just come online, driver is either in startup,
  3123. * paused performing a task, or bust waiting for hardware.
  3124. */
  3125. switch (skdev->state) {
  3126. case SKD_DRVR_STATE_PAUSED:
  3127. case SKD_DRVR_STATE_BUSY:
  3128. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3129. case SKD_DRVR_STATE_BUSY_ERASE:
  3130. case SKD_DRVR_STATE_STARTING:
  3131. case SKD_DRVR_STATE_RESTARTING:
  3132. case SKD_DRVR_STATE_FAULT:
  3133. case SKD_DRVR_STATE_IDLE:
  3134. case SKD_DRVR_STATE_LOAD:
  3135. skdev->state = SKD_DRVR_STATE_ONLINE;
  3136. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  3137. skd_name(skdev),
  3138. skd_skdev_state_to_str(prev_driver_state),
  3139. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  3140. skdev->state);
  3141. pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
  3142. skdev->name, __func__, __LINE__);
  3143. pr_debug("%s:%s:%d starting %s queue\n",
  3144. skdev->name, __func__, __LINE__, skdev->name);
  3145. pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
  3146. blk_start_queue(skdev->queue);
  3147. skdev->gendisk_on = 1;
  3148. wake_up_interruptible(&skdev->waitq);
  3149. break;
  3150. case SKD_DRVR_STATE_DISAPPEARED:
  3151. default:
  3152. pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
  3153. skdev->name, __func__, __LINE__,
  3154. skdev->state);
  3155. return -EBUSY;
  3156. }
  3157. return 0;
  3158. }
  3159. /*
  3160. *****************************************************************************
  3161. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  3162. *****************************************************************************
  3163. */
  3164. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  3165. {
  3166. struct skd_device *skdev = skd_host_data;
  3167. unsigned long flags;
  3168. spin_lock_irqsave(&skdev->lock, flags);
  3169. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3170. skdev->name, __func__, __LINE__,
  3171. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3172. pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
  3173. irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3174. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  3175. spin_unlock_irqrestore(&skdev->lock, flags);
  3176. return IRQ_HANDLED;
  3177. }
  3178. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  3179. {
  3180. struct skd_device *skdev = skd_host_data;
  3181. unsigned long flags;
  3182. spin_lock_irqsave(&skdev->lock, flags);
  3183. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3184. skdev->name, __func__, __LINE__,
  3185. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3186. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  3187. skd_isr_fwstate(skdev);
  3188. spin_unlock_irqrestore(&skdev->lock, flags);
  3189. return IRQ_HANDLED;
  3190. }
  3191. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  3192. {
  3193. struct skd_device *skdev = skd_host_data;
  3194. unsigned long flags;
  3195. int flush_enqueued = 0;
  3196. int deferred;
  3197. spin_lock_irqsave(&skdev->lock, flags);
  3198. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3199. skdev->name, __func__, __LINE__,
  3200. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3201. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  3202. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  3203. &flush_enqueued);
  3204. if (flush_enqueued)
  3205. skd_request_fn(skdev->queue);
  3206. if (deferred)
  3207. schedule_work(&skdev->completion_worker);
  3208. else if (!flush_enqueued)
  3209. skd_request_fn(skdev->queue);
  3210. spin_unlock_irqrestore(&skdev->lock, flags);
  3211. return IRQ_HANDLED;
  3212. }
  3213. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  3214. {
  3215. struct skd_device *skdev = skd_host_data;
  3216. unsigned long flags;
  3217. spin_lock_irqsave(&skdev->lock, flags);
  3218. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3219. skdev->name, __func__, __LINE__,
  3220. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3221. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  3222. skd_isr_msg_from_dev(skdev);
  3223. spin_unlock_irqrestore(&skdev->lock, flags);
  3224. return IRQ_HANDLED;
  3225. }
  3226. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  3227. {
  3228. struct skd_device *skdev = skd_host_data;
  3229. unsigned long flags;
  3230. spin_lock_irqsave(&skdev->lock, flags);
  3231. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3232. skdev->name, __func__, __LINE__,
  3233. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3234. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  3235. spin_unlock_irqrestore(&skdev->lock, flags);
  3236. return IRQ_HANDLED;
  3237. }
  3238. /*
  3239. *****************************************************************************
  3240. * PCIe MSI/MSI-X SETUP
  3241. *****************************************************************************
  3242. */
  3243. struct skd_msix_entry {
  3244. int have_irq;
  3245. u32 vector;
  3246. u32 entry;
  3247. struct skd_device *rsp;
  3248. char isr_name[30];
  3249. };
  3250. struct skd_init_msix_entry {
  3251. const char *name;
  3252. irq_handler_t handler;
  3253. };
  3254. #define SKD_MAX_MSIX_COUNT 13
  3255. #define SKD_MIN_MSIX_COUNT 7
  3256. #define SKD_BASE_MSIX_IRQ 4
  3257. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  3258. { "(DMA 0)", skd_reserved_isr },
  3259. { "(DMA 1)", skd_reserved_isr },
  3260. { "(DMA 2)", skd_reserved_isr },
  3261. { "(DMA 3)", skd_reserved_isr },
  3262. { "(State Change)", skd_statec_isr },
  3263. { "(COMPL_Q)", skd_comp_q },
  3264. { "(MSG)", skd_msg_isr },
  3265. { "(Reserved)", skd_reserved_isr },
  3266. { "(Reserved)", skd_reserved_isr },
  3267. { "(Queue Full 0)", skd_qfull_isr },
  3268. { "(Queue Full 1)", skd_qfull_isr },
  3269. { "(Queue Full 2)", skd_qfull_isr },
  3270. { "(Queue Full 3)", skd_qfull_isr },
  3271. };
  3272. static void skd_release_msix(struct skd_device *skdev)
  3273. {
  3274. struct skd_msix_entry *qentry;
  3275. int i;
  3276. if (skdev->msix_entries == NULL)
  3277. return;
  3278. for (i = 0; i < skdev->msix_count; i++) {
  3279. qentry = &skdev->msix_entries[i];
  3280. skdev = qentry->rsp;
  3281. if (qentry->have_irq)
  3282. devm_free_irq(&skdev->pdev->dev,
  3283. qentry->vector, qentry->rsp);
  3284. }
  3285. pci_disable_msix(skdev->pdev);
  3286. kfree(skdev->msix_entries);
  3287. skdev->msix_count = 0;
  3288. skdev->msix_entries = NULL;
  3289. }
  3290. static int skd_acquire_msix(struct skd_device *skdev)
  3291. {
  3292. int i, rc;
  3293. struct pci_dev *pdev;
  3294. struct msix_entry *entries = NULL;
  3295. struct skd_msix_entry *qentry;
  3296. pdev = skdev->pdev;
  3297. skdev->msix_count = SKD_MAX_MSIX_COUNT;
  3298. entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
  3299. GFP_KERNEL);
  3300. if (!entries)
  3301. return -ENOMEM;
  3302. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
  3303. entries[i].entry = i;
  3304. rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
  3305. if (rc < 0)
  3306. goto msix_out;
  3307. if (rc) {
  3308. if (rc < SKD_MIN_MSIX_COUNT) {
  3309. pr_err("(%s): failed to enable MSI-X %d\n",
  3310. skd_name(skdev), rc);
  3311. goto msix_out;
  3312. }
  3313. pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
  3314. skdev->name, __func__, __LINE__,
  3315. pci_name(pdev), skdev->name, rc);
  3316. skdev->msix_count = rc;
  3317. rc = pci_enable_msix(pdev, entries, skdev->msix_count);
  3318. if (rc) {
  3319. pr_err("(%s): failed to enable MSI-X "
  3320. "support (%d) %d\n",
  3321. skd_name(skdev), skdev->msix_count, rc);
  3322. goto msix_out;
  3323. }
  3324. }
  3325. skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
  3326. skdev->msix_count, GFP_KERNEL);
  3327. if (!skdev->msix_entries) {
  3328. rc = -ENOMEM;
  3329. skdev->msix_count = 0;
  3330. pr_err("(%s): msix table allocation error\n",
  3331. skd_name(skdev));
  3332. goto msix_out;
  3333. }
  3334. qentry = skdev->msix_entries;
  3335. for (i = 0; i < skdev->msix_count; i++) {
  3336. qentry->vector = entries[i].vector;
  3337. qentry->entry = entries[i].entry;
  3338. qentry->rsp = NULL;
  3339. qentry->have_irq = 0;
  3340. pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
  3341. skdev->name, __func__, __LINE__,
  3342. pci_name(pdev), skdev->name,
  3343. i, qentry->vector, qentry->entry);
  3344. qentry++;
  3345. }
  3346. /* Enable MSI-X vectors for the base queue */
  3347. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  3348. qentry = &skdev->msix_entries[i];
  3349. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  3350. "%s%d-msix %s", DRV_NAME, skdev->devno,
  3351. msix_entries[i].name);
  3352. rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
  3353. msix_entries[i].handler, 0,
  3354. qentry->isr_name, skdev);
  3355. if (rc) {
  3356. pr_err("(%s): Unable to register(%d) MSI-X "
  3357. "handler %d: %s\n",
  3358. skd_name(skdev), rc, i, qentry->isr_name);
  3359. goto msix_out;
  3360. } else {
  3361. qentry->have_irq = 1;
  3362. qentry->rsp = skdev;
  3363. }
  3364. }
  3365. pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
  3366. skdev->name, __func__, __LINE__,
  3367. pci_name(pdev), skdev->name, skdev->msix_count);
  3368. return 0;
  3369. msix_out:
  3370. if (entries)
  3371. kfree(entries);
  3372. skd_release_msix(skdev);
  3373. return rc;
  3374. }
  3375. static int skd_acquire_irq(struct skd_device *skdev)
  3376. {
  3377. int rc;
  3378. struct pci_dev *pdev;
  3379. pdev = skdev->pdev;
  3380. skdev->msix_count = 0;
  3381. RETRY_IRQ_TYPE:
  3382. switch (skdev->irq_type) {
  3383. case SKD_IRQ_MSIX:
  3384. rc = skd_acquire_msix(skdev);
  3385. if (!rc)
  3386. pr_info("(%s): MSI-X %d irqs enabled\n",
  3387. skd_name(skdev), skdev->msix_count);
  3388. else {
  3389. pr_err(
  3390. "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
  3391. skd_name(skdev), rc);
  3392. skdev->irq_type = SKD_IRQ_MSI;
  3393. goto RETRY_IRQ_TYPE;
  3394. }
  3395. break;
  3396. case SKD_IRQ_MSI:
  3397. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
  3398. DRV_NAME, skdev->devno);
  3399. rc = pci_enable_msi(pdev);
  3400. if (!rc) {
  3401. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
  3402. skdev->isr_name, skdev);
  3403. if (rc) {
  3404. pci_disable_msi(pdev);
  3405. pr_err(
  3406. "(%s): failed to allocate the MSI interrupt %d\n",
  3407. skd_name(skdev), rc);
  3408. goto RETRY_IRQ_LEGACY;
  3409. }
  3410. pr_info("(%s): MSI irq %d enabled\n",
  3411. skd_name(skdev), pdev->irq);
  3412. } else {
  3413. RETRY_IRQ_LEGACY:
  3414. pr_err(
  3415. "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
  3416. skd_name(skdev), rc);
  3417. skdev->irq_type = SKD_IRQ_LEGACY;
  3418. goto RETRY_IRQ_TYPE;
  3419. }
  3420. break;
  3421. case SKD_IRQ_LEGACY:
  3422. snprintf(skdev->isr_name, sizeof(skdev->isr_name),
  3423. "%s%d-legacy", DRV_NAME, skdev->devno);
  3424. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  3425. IRQF_SHARED, skdev->isr_name, skdev);
  3426. if (!rc)
  3427. pr_info("(%s): LEGACY irq %d enabled\n",
  3428. skd_name(skdev), pdev->irq);
  3429. else
  3430. pr_err("(%s): request LEGACY irq error %d\n",
  3431. skd_name(skdev), rc);
  3432. break;
  3433. default:
  3434. pr_info("(%s): irq_type %d invalid, re-set to %d\n",
  3435. skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
  3436. skdev->irq_type = SKD_IRQ_LEGACY;
  3437. goto RETRY_IRQ_TYPE;
  3438. }
  3439. return rc;
  3440. }
  3441. static void skd_release_irq(struct skd_device *skdev)
  3442. {
  3443. switch (skdev->irq_type) {
  3444. case SKD_IRQ_MSIX:
  3445. skd_release_msix(skdev);
  3446. break;
  3447. case SKD_IRQ_MSI:
  3448. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3449. pci_disable_msi(skdev->pdev);
  3450. break;
  3451. case SKD_IRQ_LEGACY:
  3452. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3453. break;
  3454. default:
  3455. pr_err("(%s): wrong irq type %d!",
  3456. skd_name(skdev), skdev->irq_type);
  3457. break;
  3458. }
  3459. }
  3460. /*
  3461. *****************************************************************************
  3462. * CONSTRUCT
  3463. *****************************************************************************
  3464. */
  3465. static int skd_cons_skcomp(struct skd_device *skdev);
  3466. static int skd_cons_skmsg(struct skd_device *skdev);
  3467. static int skd_cons_skreq(struct skd_device *skdev);
  3468. static int skd_cons_skspcl(struct skd_device *skdev);
  3469. static int skd_cons_sksb(struct skd_device *skdev);
  3470. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3471. u32 n_sg,
  3472. dma_addr_t *ret_dma_addr);
  3473. static int skd_cons_disk(struct skd_device *skdev);
  3474. #define SKD_N_DEV_TABLE 16u
  3475. static u32 skd_next_devno;
  3476. static struct skd_device *skd_construct(struct pci_dev *pdev)
  3477. {
  3478. struct skd_device *skdev;
  3479. int blk_major = skd_major;
  3480. int rc;
  3481. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  3482. if (!skdev) {
  3483. pr_err(PFX "(%s): memory alloc failure\n",
  3484. pci_name(pdev));
  3485. return NULL;
  3486. }
  3487. skdev->state = SKD_DRVR_STATE_LOAD;
  3488. skdev->pdev = pdev;
  3489. skdev->devno = skd_next_devno++;
  3490. skdev->major = blk_major;
  3491. skdev->irq_type = skd_isr_type;
  3492. sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
  3493. skdev->dev_max_queue_depth = 0;
  3494. skdev->num_req_context = skd_max_queue_depth;
  3495. skdev->num_fitmsg_context = skd_max_queue_depth;
  3496. skdev->n_special = skd_max_pass_thru;
  3497. skdev->cur_max_queue_depth = 1;
  3498. skdev->queue_low_water_mark = 1;
  3499. skdev->proto_ver = 99;
  3500. skdev->sgs_per_request = skd_sgs_per_request;
  3501. skdev->dbg_level = skd_dbg_level;
  3502. atomic_set(&skdev->device_count, 0);
  3503. spin_lock_init(&skdev->lock);
  3504. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  3505. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3506. rc = skd_cons_skcomp(skdev);
  3507. if (rc < 0)
  3508. goto err_out;
  3509. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3510. rc = skd_cons_skmsg(skdev);
  3511. if (rc < 0)
  3512. goto err_out;
  3513. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3514. rc = skd_cons_skreq(skdev);
  3515. if (rc < 0)
  3516. goto err_out;
  3517. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3518. rc = skd_cons_skspcl(skdev);
  3519. if (rc < 0)
  3520. goto err_out;
  3521. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3522. rc = skd_cons_sksb(skdev);
  3523. if (rc < 0)
  3524. goto err_out;
  3525. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3526. rc = skd_cons_disk(skdev);
  3527. if (rc < 0)
  3528. goto err_out;
  3529. pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
  3530. return skdev;
  3531. err_out:
  3532. pr_debug("%s:%s:%d construct failed\n",
  3533. skdev->name, __func__, __LINE__);
  3534. skd_destruct(skdev);
  3535. return NULL;
  3536. }
  3537. static int skd_cons_skcomp(struct skd_device *skdev)
  3538. {
  3539. int rc = 0;
  3540. struct fit_completion_entry_v1 *skcomp;
  3541. u32 nbytes;
  3542. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  3543. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  3544. pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
  3545. skdev->name, __func__, __LINE__,
  3546. nbytes, SKD_N_COMPLETION_ENTRY);
  3547. skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
  3548. &skdev->cq_dma_address);
  3549. if (skcomp == NULL) {
  3550. rc = -ENOMEM;
  3551. goto err_out;
  3552. }
  3553. memset(skcomp, 0, nbytes);
  3554. skdev->skcomp_table = skcomp;
  3555. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  3556. sizeof(*skcomp) *
  3557. SKD_N_COMPLETION_ENTRY);
  3558. err_out:
  3559. return rc;
  3560. }
  3561. static int skd_cons_skmsg(struct skd_device *skdev)
  3562. {
  3563. int rc = 0;
  3564. u32 i;
  3565. pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
  3566. skdev->name, __func__, __LINE__,
  3567. sizeof(struct skd_fitmsg_context),
  3568. skdev->num_fitmsg_context,
  3569. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  3570. skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
  3571. *skdev->num_fitmsg_context, GFP_KERNEL);
  3572. if (skdev->skmsg_table == NULL) {
  3573. rc = -ENOMEM;
  3574. goto err_out;
  3575. }
  3576. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3577. struct skd_fitmsg_context *skmsg;
  3578. skmsg = &skdev->skmsg_table[i];
  3579. skmsg->id = i + SKD_ID_FIT_MSG;
  3580. skmsg->state = SKD_MSG_STATE_IDLE;
  3581. skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
  3582. SKD_N_FITMSG_BYTES + 64,
  3583. &skmsg->mb_dma_address);
  3584. if (skmsg->msg_buf == NULL) {
  3585. rc = -ENOMEM;
  3586. goto err_out;
  3587. }
  3588. skmsg->offset = (u32)((u64)skmsg->msg_buf &
  3589. (~FIT_QCMD_BASE_ADDRESS_MASK));
  3590. skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3591. skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
  3592. FIT_QCMD_BASE_ADDRESS_MASK);
  3593. skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3594. skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
  3595. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  3596. skmsg->next = &skmsg[1];
  3597. }
  3598. /* Free list is in order starting with the 0th entry. */
  3599. skdev->skmsg_table[i - 1].next = NULL;
  3600. skdev->skmsg_free_list = skdev->skmsg_table;
  3601. err_out:
  3602. return rc;
  3603. }
  3604. static int skd_cons_skreq(struct skd_device *skdev)
  3605. {
  3606. int rc = 0;
  3607. u32 i;
  3608. pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
  3609. skdev->name, __func__, __LINE__,
  3610. sizeof(struct skd_request_context),
  3611. skdev->num_req_context,
  3612. sizeof(struct skd_request_context) * skdev->num_req_context);
  3613. skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
  3614. * skdev->num_req_context, GFP_KERNEL);
  3615. if (skdev->skreq_table == NULL) {
  3616. rc = -ENOMEM;
  3617. goto err_out;
  3618. }
  3619. pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
  3620. skdev->name, __func__, __LINE__,
  3621. skdev->sgs_per_request, sizeof(struct scatterlist),
  3622. skdev->sgs_per_request * sizeof(struct scatterlist));
  3623. for (i = 0; i < skdev->num_req_context; i++) {
  3624. struct skd_request_context *skreq;
  3625. skreq = &skdev->skreq_table[i];
  3626. skreq->id = i + SKD_ID_RW_REQUEST;
  3627. skreq->state = SKD_REQ_STATE_IDLE;
  3628. skreq->sg = kzalloc(sizeof(struct scatterlist) *
  3629. skdev->sgs_per_request, GFP_KERNEL);
  3630. if (skreq->sg == NULL) {
  3631. rc = -ENOMEM;
  3632. goto err_out;
  3633. }
  3634. sg_init_table(skreq->sg, skdev->sgs_per_request);
  3635. skreq->sksg_list = skd_cons_sg_list(skdev,
  3636. skdev->sgs_per_request,
  3637. &skreq->sksg_dma_address);
  3638. if (skreq->sksg_list == NULL) {
  3639. rc = -ENOMEM;
  3640. goto err_out;
  3641. }
  3642. skreq->next = &skreq[1];
  3643. }
  3644. /* Free list is in order starting with the 0th entry. */
  3645. skdev->skreq_table[i - 1].next = NULL;
  3646. skdev->skreq_free_list = skdev->skreq_table;
  3647. err_out:
  3648. return rc;
  3649. }
  3650. static int skd_cons_skspcl(struct skd_device *skdev)
  3651. {
  3652. int rc = 0;
  3653. u32 i, nbytes;
  3654. pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
  3655. skdev->name, __func__, __LINE__,
  3656. sizeof(struct skd_special_context),
  3657. skdev->n_special,
  3658. sizeof(struct skd_special_context) * skdev->n_special);
  3659. skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
  3660. * skdev->n_special, GFP_KERNEL);
  3661. if (skdev->skspcl_table == NULL) {
  3662. rc = -ENOMEM;
  3663. goto err_out;
  3664. }
  3665. for (i = 0; i < skdev->n_special; i++) {
  3666. struct skd_special_context *skspcl;
  3667. skspcl = &skdev->skspcl_table[i];
  3668. skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
  3669. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3670. skspcl->req.next = &skspcl[1].req;
  3671. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3672. skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
  3673. &skspcl->mb_dma_address);
  3674. if (skspcl->msg_buf == NULL) {
  3675. rc = -ENOMEM;
  3676. goto err_out;
  3677. }
  3678. memset(skspcl->msg_buf, 0, nbytes);
  3679. skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
  3680. SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
  3681. if (skspcl->req.sg == NULL) {
  3682. rc = -ENOMEM;
  3683. goto err_out;
  3684. }
  3685. skspcl->req.sksg_list = skd_cons_sg_list(skdev,
  3686. SKD_N_SG_PER_SPECIAL,
  3687. &skspcl->req.
  3688. sksg_dma_address);
  3689. if (skspcl->req.sksg_list == NULL) {
  3690. rc = -ENOMEM;
  3691. goto err_out;
  3692. }
  3693. }
  3694. /* Free list is in order starting with the 0th entry. */
  3695. skdev->skspcl_table[i - 1].req.next = NULL;
  3696. skdev->skspcl_free_list = skdev->skspcl_table;
  3697. return rc;
  3698. err_out:
  3699. return rc;
  3700. }
  3701. static int skd_cons_sksb(struct skd_device *skdev)
  3702. {
  3703. int rc = 0;
  3704. struct skd_special_context *skspcl;
  3705. u32 nbytes;
  3706. skspcl = &skdev->internal_skspcl;
  3707. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  3708. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3709. nbytes = SKD_N_INTERNAL_BYTES;
  3710. skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
  3711. &skspcl->db_dma_address);
  3712. if (skspcl->data_buf == NULL) {
  3713. rc = -ENOMEM;
  3714. goto err_out;
  3715. }
  3716. memset(skspcl->data_buf, 0, nbytes);
  3717. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3718. skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
  3719. &skspcl->mb_dma_address);
  3720. if (skspcl->msg_buf == NULL) {
  3721. rc = -ENOMEM;
  3722. goto err_out;
  3723. }
  3724. memset(skspcl->msg_buf, 0, nbytes);
  3725. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  3726. &skspcl->req.sksg_dma_address);
  3727. if (skspcl->req.sksg_list == NULL) {
  3728. rc = -ENOMEM;
  3729. goto err_out;
  3730. }
  3731. if (!skd_format_internal_skspcl(skdev)) {
  3732. rc = -EINVAL;
  3733. goto err_out;
  3734. }
  3735. err_out:
  3736. return rc;
  3737. }
  3738. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3739. u32 n_sg,
  3740. dma_addr_t *ret_dma_addr)
  3741. {
  3742. struct fit_sg_descriptor *sg_list;
  3743. u32 nbytes;
  3744. nbytes = sizeof(*sg_list) * n_sg;
  3745. sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
  3746. if (sg_list != NULL) {
  3747. uint64_t dma_address = *ret_dma_addr;
  3748. u32 i;
  3749. memset(sg_list, 0, nbytes);
  3750. for (i = 0; i < n_sg - 1; i++) {
  3751. uint64_t ndp_off;
  3752. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  3753. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  3754. }
  3755. sg_list[i].next_desc_ptr = 0LL;
  3756. }
  3757. return sg_list;
  3758. }
  3759. static int skd_cons_disk(struct skd_device *skdev)
  3760. {
  3761. int rc = 0;
  3762. struct gendisk *disk;
  3763. struct request_queue *q;
  3764. unsigned long flags;
  3765. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  3766. if (!disk) {
  3767. rc = -ENOMEM;
  3768. goto err_out;
  3769. }
  3770. skdev->disk = disk;
  3771. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  3772. disk->major = skdev->major;
  3773. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  3774. disk->fops = &skd_blockdev_ops;
  3775. disk->private_data = skdev;
  3776. q = blk_init_queue(skd_request_fn, &skdev->lock);
  3777. if (!q) {
  3778. rc = -ENOMEM;
  3779. goto err_out;
  3780. }
  3781. skdev->queue = q;
  3782. disk->queue = q;
  3783. q->queuedata = skdev;
  3784. blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
  3785. blk_queue_max_segments(q, skdev->sgs_per_request);
  3786. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  3787. /* set sysfs ptimal_io_size to 8K */
  3788. blk_queue_io_opt(q, 8192);
  3789. /* DISCARD Flag initialization. */
  3790. q->limits.discard_granularity = 8192;
  3791. q->limits.discard_alignment = 0;
  3792. q->limits.max_discard_sectors = UINT_MAX >> 9;
  3793. q->limits.discard_zeroes_data = 1;
  3794. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  3795. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  3796. spin_lock_irqsave(&skdev->lock, flags);
  3797. pr_debug("%s:%s:%d stopping %s queue\n",
  3798. skdev->name, __func__, __LINE__, skdev->name);
  3799. blk_stop_queue(skdev->queue);
  3800. spin_unlock_irqrestore(&skdev->lock, flags);
  3801. err_out:
  3802. return rc;
  3803. }
  3804. /*
  3805. *****************************************************************************
  3806. * DESTRUCT (FREE)
  3807. *****************************************************************************
  3808. */
  3809. static void skd_free_skcomp(struct skd_device *skdev);
  3810. static void skd_free_skmsg(struct skd_device *skdev);
  3811. static void skd_free_skreq(struct skd_device *skdev);
  3812. static void skd_free_skspcl(struct skd_device *skdev);
  3813. static void skd_free_sksb(struct skd_device *skdev);
  3814. static void skd_free_sg_list(struct skd_device *skdev,
  3815. struct fit_sg_descriptor *sg_list,
  3816. u32 n_sg, dma_addr_t dma_addr);
  3817. static void skd_free_disk(struct skd_device *skdev);
  3818. static void skd_destruct(struct skd_device *skdev)
  3819. {
  3820. if (skdev == NULL)
  3821. return;
  3822. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3823. skd_free_disk(skdev);
  3824. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3825. skd_free_sksb(skdev);
  3826. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3827. skd_free_skspcl(skdev);
  3828. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3829. skd_free_skreq(skdev);
  3830. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3831. skd_free_skmsg(skdev);
  3832. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3833. skd_free_skcomp(skdev);
  3834. pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
  3835. kfree(skdev);
  3836. }
  3837. static void skd_free_skcomp(struct skd_device *skdev)
  3838. {
  3839. if (skdev->skcomp_table != NULL) {
  3840. u32 nbytes;
  3841. nbytes = sizeof(skdev->skcomp_table[0]) *
  3842. SKD_N_COMPLETION_ENTRY;
  3843. pci_free_consistent(skdev->pdev, nbytes,
  3844. skdev->skcomp_table, skdev->cq_dma_address);
  3845. }
  3846. skdev->skcomp_table = NULL;
  3847. skdev->cq_dma_address = 0;
  3848. }
  3849. static void skd_free_skmsg(struct skd_device *skdev)
  3850. {
  3851. u32 i;
  3852. if (skdev->skmsg_table == NULL)
  3853. return;
  3854. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3855. struct skd_fitmsg_context *skmsg;
  3856. skmsg = &skdev->skmsg_table[i];
  3857. if (skmsg->msg_buf != NULL) {
  3858. skmsg->msg_buf += skmsg->offset;
  3859. skmsg->mb_dma_address += skmsg->offset;
  3860. pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
  3861. skmsg->msg_buf,
  3862. skmsg->mb_dma_address);
  3863. }
  3864. skmsg->msg_buf = NULL;
  3865. skmsg->mb_dma_address = 0;
  3866. }
  3867. kfree(skdev->skmsg_table);
  3868. skdev->skmsg_table = NULL;
  3869. }
  3870. static void skd_free_skreq(struct skd_device *skdev)
  3871. {
  3872. u32 i;
  3873. if (skdev->skreq_table == NULL)
  3874. return;
  3875. for (i = 0; i < skdev->num_req_context; i++) {
  3876. struct skd_request_context *skreq;
  3877. skreq = &skdev->skreq_table[i];
  3878. skd_free_sg_list(skdev, skreq->sksg_list,
  3879. skdev->sgs_per_request,
  3880. skreq->sksg_dma_address);
  3881. skreq->sksg_list = NULL;
  3882. skreq->sksg_dma_address = 0;
  3883. kfree(skreq->sg);
  3884. }
  3885. kfree(skdev->skreq_table);
  3886. skdev->skreq_table = NULL;
  3887. }
  3888. static void skd_free_skspcl(struct skd_device *skdev)
  3889. {
  3890. u32 i;
  3891. u32 nbytes;
  3892. if (skdev->skspcl_table == NULL)
  3893. return;
  3894. for (i = 0; i < skdev->n_special; i++) {
  3895. struct skd_special_context *skspcl;
  3896. skspcl = &skdev->skspcl_table[i];
  3897. if (skspcl->msg_buf != NULL) {
  3898. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3899. pci_free_consistent(skdev->pdev, nbytes,
  3900. skspcl->msg_buf,
  3901. skspcl->mb_dma_address);
  3902. }
  3903. skspcl->msg_buf = NULL;
  3904. skspcl->mb_dma_address = 0;
  3905. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  3906. SKD_N_SG_PER_SPECIAL,
  3907. skspcl->req.sksg_dma_address);
  3908. skspcl->req.sksg_list = NULL;
  3909. skspcl->req.sksg_dma_address = 0;
  3910. kfree(skspcl->req.sg);
  3911. }
  3912. kfree(skdev->skspcl_table);
  3913. skdev->skspcl_table = NULL;
  3914. }
  3915. static void skd_free_sksb(struct skd_device *skdev)
  3916. {
  3917. struct skd_special_context *skspcl;
  3918. u32 nbytes;
  3919. skspcl = &skdev->internal_skspcl;
  3920. if (skspcl->data_buf != NULL) {
  3921. nbytes = SKD_N_INTERNAL_BYTES;
  3922. pci_free_consistent(skdev->pdev, nbytes,
  3923. skspcl->data_buf, skspcl->db_dma_address);
  3924. }
  3925. skspcl->data_buf = NULL;
  3926. skspcl->db_dma_address = 0;
  3927. if (skspcl->msg_buf != NULL) {
  3928. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3929. pci_free_consistent(skdev->pdev, nbytes,
  3930. skspcl->msg_buf, skspcl->mb_dma_address);
  3931. }
  3932. skspcl->msg_buf = NULL;
  3933. skspcl->mb_dma_address = 0;
  3934. skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
  3935. skspcl->req.sksg_dma_address);
  3936. skspcl->req.sksg_list = NULL;
  3937. skspcl->req.sksg_dma_address = 0;
  3938. }
  3939. static void skd_free_sg_list(struct skd_device *skdev,
  3940. struct fit_sg_descriptor *sg_list,
  3941. u32 n_sg, dma_addr_t dma_addr)
  3942. {
  3943. if (sg_list != NULL) {
  3944. u32 nbytes;
  3945. nbytes = sizeof(*sg_list) * n_sg;
  3946. pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
  3947. }
  3948. }
  3949. static void skd_free_disk(struct skd_device *skdev)
  3950. {
  3951. struct gendisk *disk = skdev->disk;
  3952. if (disk != NULL) {
  3953. struct request_queue *q = disk->queue;
  3954. if (disk->flags & GENHD_FL_UP)
  3955. del_gendisk(disk);
  3956. if (q)
  3957. blk_cleanup_queue(q);
  3958. put_disk(disk);
  3959. }
  3960. skdev->disk = NULL;
  3961. }
  3962. /*
  3963. *****************************************************************************
  3964. * BLOCK DEVICE (BDEV) GLUE
  3965. *****************************************************************************
  3966. */
  3967. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  3968. {
  3969. struct skd_device *skdev;
  3970. u64 capacity;
  3971. skdev = bdev->bd_disk->private_data;
  3972. pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
  3973. skdev->name, __func__, __LINE__,
  3974. bdev->bd_disk->disk_name, current->comm);
  3975. if (skdev->read_cap_is_valid) {
  3976. capacity = get_capacity(skdev->disk);
  3977. geo->heads = 64;
  3978. geo->sectors = 255;
  3979. geo->cylinders = (capacity) / (255 * 64);
  3980. return 0;
  3981. }
  3982. return -EIO;
  3983. }
  3984. static int skd_bdev_attach(struct skd_device *skdev)
  3985. {
  3986. pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
  3987. add_disk(skdev->disk);
  3988. return 0;
  3989. }
  3990. static const struct block_device_operations skd_blockdev_ops = {
  3991. .owner = THIS_MODULE,
  3992. .ioctl = skd_bdev_ioctl,
  3993. .getgeo = skd_bdev_getgeo,
  3994. };
  3995. /*
  3996. *****************************************************************************
  3997. * PCIe DRIVER GLUE
  3998. *****************************************************************************
  3999. */
  4000. static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
  4001. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  4002. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  4003. { 0 } /* terminate list */
  4004. };
  4005. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  4006. static char *skd_pci_info(struct skd_device *skdev, char *str)
  4007. {
  4008. int pcie_reg;
  4009. strcpy(str, "PCIe (");
  4010. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  4011. if (pcie_reg) {
  4012. char lwstr[6];
  4013. uint16_t pcie_lstat, lspeed, lwidth;
  4014. pcie_reg += 0x12;
  4015. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  4016. lspeed = pcie_lstat & (0xF);
  4017. lwidth = (pcie_lstat & 0x3F0) >> 4;
  4018. if (lspeed == 1)
  4019. strcat(str, "2.5GT/s ");
  4020. else if (lspeed == 2)
  4021. strcat(str, "5.0GT/s ");
  4022. else
  4023. strcat(str, "<unknown> ");
  4024. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  4025. strcat(str, lwstr);
  4026. }
  4027. return str;
  4028. }
  4029. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  4030. {
  4031. int i;
  4032. int rc = 0;
  4033. char pci_str[32];
  4034. struct skd_device *skdev;
  4035. pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
  4036. DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
  4037. pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
  4038. pci_name(pdev), pdev->vendor, pdev->device);
  4039. rc = pci_enable_device(pdev);
  4040. if (rc)
  4041. return rc;
  4042. rc = pci_request_regions(pdev, DRV_NAME);
  4043. if (rc)
  4044. goto err_out;
  4045. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4046. if (!rc) {
  4047. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4048. pr_err("(%s): consistent DMA mask error %d\n",
  4049. pci_name(pdev), rc);
  4050. }
  4051. } else {
  4052. (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
  4053. if (rc) {
  4054. pr_err("(%s): DMA mask error %d\n",
  4055. pci_name(pdev), rc);
  4056. goto err_out_regions;
  4057. }
  4058. }
  4059. if (!skd_major) {
  4060. rc = register_blkdev(0, DRV_NAME);
  4061. if (rc < 0)
  4062. goto err_out_regions;
  4063. BUG_ON(!rc);
  4064. skd_major = rc;
  4065. }
  4066. skdev = skd_construct(pdev);
  4067. if (skdev == NULL) {
  4068. rc = -ENOMEM;
  4069. goto err_out_regions;
  4070. }
  4071. skd_pci_info(skdev, pci_str);
  4072. pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
  4073. pci_set_master(pdev);
  4074. rc = pci_enable_pcie_error_reporting(pdev);
  4075. if (rc) {
  4076. pr_err(
  4077. "(%s): bad enable of PCIe error reporting rc=%d\n",
  4078. skd_name(skdev), rc);
  4079. skdev->pcie_error_reporting_is_enabled = 0;
  4080. } else
  4081. skdev->pcie_error_reporting_is_enabled = 1;
  4082. pci_set_drvdata(pdev, skdev);
  4083. skdev->disk->driverfs_dev = &pdev->dev;
  4084. for (i = 0; i < SKD_MAX_BARS; i++) {
  4085. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4086. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4087. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4088. skdev->mem_size[i]);
  4089. if (!skdev->mem_map[i]) {
  4090. pr_err("(%s): Unable to map adapter memory!\n",
  4091. skd_name(skdev));
  4092. rc = -ENODEV;
  4093. goto err_out_iounmap;
  4094. }
  4095. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4096. skdev->name, __func__, __LINE__,
  4097. skdev->mem_map[i],
  4098. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4099. }
  4100. rc = skd_acquire_irq(skdev);
  4101. if (rc) {
  4102. pr_err("(%s): interrupt resource error %d\n",
  4103. skd_name(skdev), rc);
  4104. goto err_out_iounmap;
  4105. }
  4106. rc = skd_start_timer(skdev);
  4107. if (rc)
  4108. goto err_out_timer;
  4109. init_waitqueue_head(&skdev->waitq);
  4110. skd_start_device(skdev);
  4111. rc = wait_event_interruptible_timeout(skdev->waitq,
  4112. (skdev->gendisk_on),
  4113. (SKD_START_WAIT_SECONDS * HZ));
  4114. if (skdev->gendisk_on > 0) {
  4115. /* device came on-line after reset */
  4116. skd_bdev_attach(skdev);
  4117. rc = 0;
  4118. } else {
  4119. /* we timed out, something is wrong with the device,
  4120. don't add the disk structure */
  4121. pr_err(
  4122. "(%s): error: waiting for s1120 timed out %d!\n",
  4123. skd_name(skdev), rc);
  4124. /* in case of no error; we timeout with ENXIO */
  4125. if (!rc)
  4126. rc = -ENXIO;
  4127. goto err_out_timer;
  4128. }
  4129. #ifdef SKD_VMK_POLL_HANDLER
  4130. if (skdev->irq_type == SKD_IRQ_MSIX) {
  4131. /* MSIX completion handler is being used for coredump */
  4132. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4133. skdev->msix_entries[5].vector,
  4134. skd_comp_q, skdev);
  4135. } else {
  4136. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4137. skdev->pdev->irq, skd_isr,
  4138. skdev);
  4139. }
  4140. #endif /* SKD_VMK_POLL_HANDLER */
  4141. return rc;
  4142. err_out_timer:
  4143. skd_stop_device(skdev);
  4144. skd_release_irq(skdev);
  4145. err_out_iounmap:
  4146. for (i = 0; i < SKD_MAX_BARS; i++)
  4147. if (skdev->mem_map[i])
  4148. iounmap(skdev->mem_map[i]);
  4149. if (skdev->pcie_error_reporting_is_enabled)
  4150. pci_disable_pcie_error_reporting(pdev);
  4151. skd_destruct(skdev);
  4152. err_out_regions:
  4153. pci_release_regions(pdev);
  4154. err_out:
  4155. pci_disable_device(pdev);
  4156. pci_set_drvdata(pdev, NULL);
  4157. return rc;
  4158. }
  4159. static void skd_pci_remove(struct pci_dev *pdev)
  4160. {
  4161. int i;
  4162. struct skd_device *skdev;
  4163. skdev = pci_get_drvdata(pdev);
  4164. if (!skdev) {
  4165. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4166. return;
  4167. }
  4168. skd_stop_device(skdev);
  4169. skd_release_irq(skdev);
  4170. for (i = 0; i < SKD_MAX_BARS; i++)
  4171. if (skdev->mem_map[i])
  4172. iounmap((u32 *)skdev->mem_map[i]);
  4173. if (skdev->pcie_error_reporting_is_enabled)
  4174. pci_disable_pcie_error_reporting(pdev);
  4175. skd_destruct(skdev);
  4176. pci_release_regions(pdev);
  4177. pci_disable_device(pdev);
  4178. pci_set_drvdata(pdev, NULL);
  4179. return;
  4180. }
  4181. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  4182. {
  4183. int i;
  4184. struct skd_device *skdev;
  4185. skdev = pci_get_drvdata(pdev);
  4186. if (!skdev) {
  4187. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4188. return -EIO;
  4189. }
  4190. skd_stop_device(skdev);
  4191. skd_release_irq(skdev);
  4192. for (i = 0; i < SKD_MAX_BARS; i++)
  4193. if (skdev->mem_map[i])
  4194. iounmap((u32 *)skdev->mem_map[i]);
  4195. if (skdev->pcie_error_reporting_is_enabled)
  4196. pci_disable_pcie_error_reporting(pdev);
  4197. pci_release_regions(pdev);
  4198. pci_save_state(pdev);
  4199. pci_disable_device(pdev);
  4200. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4201. return 0;
  4202. }
  4203. static int skd_pci_resume(struct pci_dev *pdev)
  4204. {
  4205. int i;
  4206. int rc = 0;
  4207. struct skd_device *skdev;
  4208. skdev = pci_get_drvdata(pdev);
  4209. if (!skdev) {
  4210. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4211. return -1;
  4212. }
  4213. pci_set_power_state(pdev, PCI_D0);
  4214. pci_enable_wake(pdev, PCI_D0, 0);
  4215. pci_restore_state(pdev);
  4216. rc = pci_enable_device(pdev);
  4217. if (rc)
  4218. return rc;
  4219. rc = pci_request_regions(pdev, DRV_NAME);
  4220. if (rc)
  4221. goto err_out;
  4222. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4223. if (!rc) {
  4224. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4225. pr_err("(%s): consistent DMA mask error %d\n",
  4226. pci_name(pdev), rc);
  4227. }
  4228. } else {
  4229. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4230. if (rc) {
  4231. pr_err("(%s): DMA mask error %d\n",
  4232. pci_name(pdev), rc);
  4233. goto err_out_regions;
  4234. }
  4235. }
  4236. pci_set_master(pdev);
  4237. rc = pci_enable_pcie_error_reporting(pdev);
  4238. if (rc) {
  4239. pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
  4240. skdev->name, rc);
  4241. skdev->pcie_error_reporting_is_enabled = 0;
  4242. } else
  4243. skdev->pcie_error_reporting_is_enabled = 1;
  4244. for (i = 0; i < SKD_MAX_BARS; i++) {
  4245. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4246. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4247. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4248. skdev->mem_size[i]);
  4249. if (!skdev->mem_map[i]) {
  4250. pr_err("(%s): Unable to map adapter memory!\n",
  4251. skd_name(skdev));
  4252. rc = -ENODEV;
  4253. goto err_out_iounmap;
  4254. }
  4255. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4256. skdev->name, __func__, __LINE__,
  4257. skdev->mem_map[i],
  4258. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4259. }
  4260. rc = skd_acquire_irq(skdev);
  4261. if (rc) {
  4262. pr_err("(%s): interrupt resource error %d\n",
  4263. pci_name(pdev), rc);
  4264. goto err_out_iounmap;
  4265. }
  4266. rc = skd_start_timer(skdev);
  4267. if (rc)
  4268. goto err_out_timer;
  4269. init_waitqueue_head(&skdev->waitq);
  4270. skd_start_device(skdev);
  4271. return rc;
  4272. err_out_timer:
  4273. skd_stop_device(skdev);
  4274. skd_release_irq(skdev);
  4275. err_out_iounmap:
  4276. for (i = 0; i < SKD_MAX_BARS; i++)
  4277. if (skdev->mem_map[i])
  4278. iounmap(skdev->mem_map[i]);
  4279. if (skdev->pcie_error_reporting_is_enabled)
  4280. pci_disable_pcie_error_reporting(pdev);
  4281. err_out_regions:
  4282. pci_release_regions(pdev);
  4283. err_out:
  4284. pci_disable_device(pdev);
  4285. return rc;
  4286. }
  4287. static void skd_pci_shutdown(struct pci_dev *pdev)
  4288. {
  4289. struct skd_device *skdev;
  4290. pr_err("skd_pci_shutdown called\n");
  4291. skdev = pci_get_drvdata(pdev);
  4292. if (!skdev) {
  4293. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4294. return;
  4295. }
  4296. pr_err("%s: calling stop\n", skd_name(skdev));
  4297. skd_stop_device(skdev);
  4298. }
  4299. static struct pci_driver skd_driver = {
  4300. .name = DRV_NAME,
  4301. .id_table = skd_pci_tbl,
  4302. .probe = skd_pci_probe,
  4303. .remove = skd_pci_remove,
  4304. .suspend = skd_pci_suspend,
  4305. .resume = skd_pci_resume,
  4306. .shutdown = skd_pci_shutdown,
  4307. };
  4308. /*
  4309. *****************************************************************************
  4310. * LOGGING SUPPORT
  4311. *****************************************************************************
  4312. */
  4313. static const char *skd_name(struct skd_device *skdev)
  4314. {
  4315. memset(skdev->id_str, 0, sizeof(skdev->id_str));
  4316. if (skdev->inquiry_is_valid)
  4317. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
  4318. skdev->name, skdev->inq_serial_num,
  4319. pci_name(skdev->pdev));
  4320. else
  4321. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
  4322. skdev->name, pci_name(skdev->pdev));
  4323. return skdev->id_str;
  4324. }
  4325. const char *skd_drive_state_to_str(int state)
  4326. {
  4327. switch (state) {
  4328. case FIT_SR_DRIVE_OFFLINE:
  4329. return "OFFLINE";
  4330. case FIT_SR_DRIVE_INIT:
  4331. return "INIT";
  4332. case FIT_SR_DRIVE_ONLINE:
  4333. return "ONLINE";
  4334. case FIT_SR_DRIVE_BUSY:
  4335. return "BUSY";
  4336. case FIT_SR_DRIVE_FAULT:
  4337. return "FAULT";
  4338. case FIT_SR_DRIVE_DEGRADED:
  4339. return "DEGRADED";
  4340. case FIT_SR_PCIE_LINK_DOWN:
  4341. return "INK_DOWN";
  4342. case FIT_SR_DRIVE_SOFT_RESET:
  4343. return "SOFT_RESET";
  4344. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  4345. return "NEED_FW";
  4346. case FIT_SR_DRIVE_INIT_FAULT:
  4347. return "INIT_FAULT";
  4348. case FIT_SR_DRIVE_BUSY_SANITIZE:
  4349. return "BUSY_SANITIZE";
  4350. case FIT_SR_DRIVE_BUSY_ERASE:
  4351. return "BUSY_ERASE";
  4352. case FIT_SR_DRIVE_FW_BOOTING:
  4353. return "FW_BOOTING";
  4354. default:
  4355. return "???";
  4356. }
  4357. }
  4358. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  4359. {
  4360. switch (state) {
  4361. case SKD_DRVR_STATE_LOAD:
  4362. return "LOAD";
  4363. case SKD_DRVR_STATE_IDLE:
  4364. return "IDLE";
  4365. case SKD_DRVR_STATE_BUSY:
  4366. return "BUSY";
  4367. case SKD_DRVR_STATE_STARTING:
  4368. return "STARTING";
  4369. case SKD_DRVR_STATE_ONLINE:
  4370. return "ONLINE";
  4371. case SKD_DRVR_STATE_PAUSING:
  4372. return "PAUSING";
  4373. case SKD_DRVR_STATE_PAUSED:
  4374. return "PAUSED";
  4375. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  4376. return "DRAINING_TIMEOUT";
  4377. case SKD_DRVR_STATE_RESTARTING:
  4378. return "RESTARTING";
  4379. case SKD_DRVR_STATE_RESUMING:
  4380. return "RESUMING";
  4381. case SKD_DRVR_STATE_STOPPING:
  4382. return "STOPPING";
  4383. case SKD_DRVR_STATE_SYNCING:
  4384. return "SYNCING";
  4385. case SKD_DRVR_STATE_FAULT:
  4386. return "FAULT";
  4387. case SKD_DRVR_STATE_DISAPPEARED:
  4388. return "DISAPPEARED";
  4389. case SKD_DRVR_STATE_BUSY_ERASE:
  4390. return "BUSY_ERASE";
  4391. case SKD_DRVR_STATE_BUSY_SANITIZE:
  4392. return "BUSY_SANITIZE";
  4393. case SKD_DRVR_STATE_BUSY_IMMINENT:
  4394. return "BUSY_IMMINENT";
  4395. case SKD_DRVR_STATE_WAIT_BOOT:
  4396. return "WAIT_BOOT";
  4397. default:
  4398. return "???";
  4399. }
  4400. }
  4401. const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
  4402. {
  4403. switch (state) {
  4404. case SKD_MSG_STATE_IDLE:
  4405. return "IDLE";
  4406. case SKD_MSG_STATE_BUSY:
  4407. return "BUSY";
  4408. default:
  4409. return "???";
  4410. }
  4411. }
  4412. const char *skd_skreq_state_to_str(enum skd_req_state state)
  4413. {
  4414. switch (state) {
  4415. case SKD_REQ_STATE_IDLE:
  4416. return "IDLE";
  4417. case SKD_REQ_STATE_SETUP:
  4418. return "SETUP";
  4419. case SKD_REQ_STATE_BUSY:
  4420. return "BUSY";
  4421. case SKD_REQ_STATE_COMPLETED:
  4422. return "COMPLETED";
  4423. case SKD_REQ_STATE_TIMEOUT:
  4424. return "TIMEOUT";
  4425. case SKD_REQ_STATE_ABORTED:
  4426. return "ABORTED";
  4427. default:
  4428. return "???";
  4429. }
  4430. }
  4431. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  4432. {
  4433. pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
  4434. skdev->name, __func__, __LINE__, skdev->name, skdev, event);
  4435. pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
  4436. skdev->name, __func__, __LINE__,
  4437. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  4438. skd_skdev_state_to_str(skdev->state), skdev->state);
  4439. pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
  4440. skdev->name, __func__, __LINE__,
  4441. skdev->in_flight, skdev->cur_max_queue_depth,
  4442. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  4443. pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
  4444. skdev->name, __func__, __LINE__,
  4445. skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
  4446. }
  4447. static void skd_log_skmsg(struct skd_device *skdev,
  4448. struct skd_fitmsg_context *skmsg, const char *event)
  4449. {
  4450. pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
  4451. skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
  4452. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
  4453. skdev->name, __func__, __LINE__,
  4454. skd_skmsg_state_to_str(skmsg->state), skmsg->state,
  4455. skmsg->id, skmsg->length);
  4456. }
  4457. static void skd_log_skreq(struct skd_device *skdev,
  4458. struct skd_request_context *skreq, const char *event)
  4459. {
  4460. pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
  4461. skdev->name, __func__, __LINE__, skdev->name, skreq, event);
  4462. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  4463. skdev->name, __func__, __LINE__,
  4464. skd_skreq_state_to_str(skreq->state), skreq->state,
  4465. skreq->id, skreq->fitmsg_id);
  4466. pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
  4467. skdev->name, __func__, __LINE__,
  4468. skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
  4469. if (skreq->req != NULL) {
  4470. struct request *req = skreq->req;
  4471. u32 lba = (u32)blk_rq_pos(req);
  4472. u32 count = blk_rq_sectors(req);
  4473. pr_debug("%s:%s:%d "
  4474. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
  4475. skdev->name, __func__, __LINE__,
  4476. req, lba, lba, count, count,
  4477. (int)rq_data_dir(req));
  4478. } else
  4479. pr_debug("%s:%s:%d req=NULL\n",
  4480. skdev->name, __func__, __LINE__);
  4481. }
  4482. /*
  4483. *****************************************************************************
  4484. * MODULE GLUE
  4485. *****************************************************************************
  4486. */
  4487. static int __init skd_init(void)
  4488. {
  4489. pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
  4490. switch (skd_isr_type) {
  4491. case SKD_IRQ_LEGACY:
  4492. case SKD_IRQ_MSI:
  4493. case SKD_IRQ_MSIX:
  4494. break;
  4495. default:
  4496. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  4497. skd_isr_type, SKD_IRQ_DEFAULT);
  4498. skd_isr_type = SKD_IRQ_DEFAULT;
  4499. }
  4500. if (skd_max_queue_depth < 1 ||
  4501. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  4502. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  4503. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  4504. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  4505. }
  4506. if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
  4507. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  4508. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  4509. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  4510. }
  4511. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  4512. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  4513. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  4514. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  4515. }
  4516. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  4517. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  4518. skd_dbg_level, 0);
  4519. skd_dbg_level = 0;
  4520. }
  4521. if (skd_isr_comp_limit < 0) {
  4522. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  4523. skd_isr_comp_limit, 0);
  4524. skd_isr_comp_limit = 0;
  4525. }
  4526. if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
  4527. pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
  4528. skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
  4529. skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  4530. }
  4531. return pci_register_driver(&skd_driver);
  4532. }
  4533. static void __exit skd_exit(void)
  4534. {
  4535. pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
  4536. pci_unregister_driver(&skd_driver);
  4537. if (skd_major)
  4538. unregister_blkdev(skd_major, DRV_NAME);
  4539. }
  4540. module_init(skd_init);
  4541. module_exit(skd_exit);