vxge-config.c 134 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2010 Exar Corp.
  13. ******************************************************************************/
  14. #include <linux/vmalloc.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/pci.h>
  17. #include <linux/pci_hotplug.h>
  18. #include <linux/slab.h>
  19. #include "vxge-traffic.h"
  20. #include "vxge-config.h"
  21. #include "vxge-main.h"
  22. #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
  23. status = __vxge_hw_vpath_stats_access(vpath, \
  24. VXGE_HW_STATS_OP_READ, \
  25. offset, \
  26. &val64); \
  27. if (status != VXGE_HW_OK) \
  28. return status; \
  29. }
  30. static void
  31. vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
  32. {
  33. u64 val64;
  34. val64 = readq(&vp_reg->rxmac_vcfg0);
  35. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  36. writeq(val64, &vp_reg->rxmac_vcfg0);
  37. val64 = readq(&vp_reg->rxmac_vcfg0);
  38. }
  39. /*
  40. * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
  41. */
  42. int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
  43. {
  44. struct vxge_hw_vpath_reg __iomem *vp_reg;
  45. struct __vxge_hw_virtualpath *vpath;
  46. u64 val64, rxd_count, rxd_spat;
  47. int count = 0, total_count = 0;
  48. vpath = &hldev->virtual_paths[vp_id];
  49. vp_reg = vpath->vp_reg;
  50. vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
  51. /* Check that the ring controller for this vpath has enough free RxDs
  52. * to send frames to the host. This is done by reading the
  53. * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
  54. * RXD_SPAT value for the vpath.
  55. */
  56. val64 = readq(&vp_reg->prc_cfg6);
  57. rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
  58. /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
  59. * leg room.
  60. */
  61. rxd_spat *= 2;
  62. do {
  63. mdelay(1);
  64. rxd_count = readq(&vp_reg->prc_rxd_doorbell);
  65. /* Check that the ring controller for this vpath does
  66. * not have any frame in its pipeline.
  67. */
  68. val64 = readq(&vp_reg->frm_in_progress_cnt);
  69. if ((rxd_count <= rxd_spat) || (val64 > 0))
  70. count = 0;
  71. else
  72. count++;
  73. total_count++;
  74. } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
  75. (total_count < VXGE_HW_MAX_POLLING_COUNT));
  76. if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
  77. printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
  78. __func__);
  79. return total_count;
  80. }
  81. /* vxge_hw_device_wait_receive_idle - This function waits until all frames
  82. * stored in the frame buffer for each vpath assigned to the given
  83. * function (hldev) have been sent to the host.
  84. */
  85. void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
  86. {
  87. int i, total_count = 0;
  88. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  89. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  90. continue;
  91. total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
  92. if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
  93. break;
  94. }
  95. }
  96. /*
  97. * __vxge_hw_device_register_poll
  98. * Will poll certain register for specified amount of time.
  99. * Will poll until masked bit is not cleared.
  100. */
  101. static enum vxge_hw_status
  102. __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
  103. {
  104. u64 val64;
  105. u32 i = 0;
  106. enum vxge_hw_status ret = VXGE_HW_FAIL;
  107. udelay(10);
  108. do {
  109. val64 = readq(reg);
  110. if (!(val64 & mask))
  111. return VXGE_HW_OK;
  112. udelay(100);
  113. } while (++i <= 9);
  114. i = 0;
  115. do {
  116. val64 = readq(reg);
  117. if (!(val64 & mask))
  118. return VXGE_HW_OK;
  119. mdelay(1);
  120. } while (++i <= max_millis);
  121. return ret;
  122. }
  123. static inline enum vxge_hw_status
  124. __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
  125. u64 mask, u32 max_millis)
  126. {
  127. __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
  128. wmb();
  129. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
  130. wmb();
  131. return __vxge_hw_device_register_poll(addr, mask, max_millis);
  132. }
  133. static enum vxge_hw_status
  134. vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
  135. u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
  136. u64 *steer_ctrl)
  137. {
  138. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  139. enum vxge_hw_status status;
  140. u64 val64;
  141. u32 retry = 0, max_retry = 3;
  142. spin_lock(&vpath->lock);
  143. if (!vpath->vp_open) {
  144. spin_unlock(&vpath->lock);
  145. max_retry = 100;
  146. }
  147. writeq(*data0, &vp_reg->rts_access_steer_data0);
  148. writeq(*data1, &vp_reg->rts_access_steer_data1);
  149. wmb();
  150. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
  151. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
  152. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
  153. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  154. *steer_ctrl;
  155. status = __vxge_hw_pio_mem_write64(val64,
  156. &vp_reg->rts_access_steer_ctrl,
  157. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  158. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  159. /* The __vxge_hw_device_register_poll can udelay for a significant
  160. * amount of time, blocking other process from the CPU. If it delays
  161. * for ~5secs, a NMI error can occur. A way around this is to give up
  162. * the processor via msleep, but this is not allowed is under lock.
  163. * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
  164. * 1sec and sleep for 10ms until the firmware operation has completed
  165. * or timed-out.
  166. */
  167. while ((status != VXGE_HW_OK) && retry++ < max_retry) {
  168. if (!vpath->vp_open)
  169. msleep(20);
  170. status = __vxge_hw_device_register_poll(
  171. &vp_reg->rts_access_steer_ctrl,
  172. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  173. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  174. }
  175. if (status != VXGE_HW_OK)
  176. goto out;
  177. val64 = readq(&vp_reg->rts_access_steer_ctrl);
  178. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  179. *data0 = readq(&vp_reg->rts_access_steer_data0);
  180. *data1 = readq(&vp_reg->rts_access_steer_data1);
  181. *steer_ctrl = val64;
  182. } else
  183. status = VXGE_HW_FAIL;
  184. out:
  185. if (vpath->vp_open)
  186. spin_unlock(&vpath->lock);
  187. return status;
  188. }
  189. enum vxge_hw_status
  190. vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
  191. u32 *minor, u32 *build)
  192. {
  193. u64 data0 = 0, data1 = 0, steer_ctrl = 0;
  194. struct __vxge_hw_virtualpath *vpath;
  195. enum vxge_hw_status status;
  196. vpath = &hldev->virtual_paths[hldev->first_vp_id];
  197. status = vxge_hw_vpath_fw_api(vpath,
  198. VXGE_HW_FW_UPGRADE_ACTION,
  199. VXGE_HW_FW_UPGRADE_MEMO,
  200. VXGE_HW_FW_UPGRADE_OFFSET_READ,
  201. &data0, &data1, &steer_ctrl);
  202. if (status != VXGE_HW_OK)
  203. return status;
  204. *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
  205. *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
  206. *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
  207. return status;
  208. }
  209. enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
  210. {
  211. u64 data0 = 0, data1 = 0, steer_ctrl = 0;
  212. struct __vxge_hw_virtualpath *vpath;
  213. enum vxge_hw_status status;
  214. u32 ret;
  215. vpath = &hldev->virtual_paths[hldev->first_vp_id];
  216. status = vxge_hw_vpath_fw_api(vpath,
  217. VXGE_HW_FW_UPGRADE_ACTION,
  218. VXGE_HW_FW_UPGRADE_MEMO,
  219. VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
  220. &data0, &data1, &steer_ctrl);
  221. if (status != VXGE_HW_OK) {
  222. vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
  223. goto exit;
  224. }
  225. ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
  226. if (ret != 1) {
  227. vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
  228. __func__, ret);
  229. status = VXGE_HW_FAIL;
  230. }
  231. exit:
  232. return status;
  233. }
  234. enum vxge_hw_status
  235. vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
  236. {
  237. u64 data0 = 0, data1 = 0, steer_ctrl = 0;
  238. struct __vxge_hw_virtualpath *vpath;
  239. enum vxge_hw_status status;
  240. int ret_code, sec_code;
  241. vpath = &hldev->virtual_paths[hldev->first_vp_id];
  242. /* send upgrade start command */
  243. status = vxge_hw_vpath_fw_api(vpath,
  244. VXGE_HW_FW_UPGRADE_ACTION,
  245. VXGE_HW_FW_UPGRADE_MEMO,
  246. VXGE_HW_FW_UPGRADE_OFFSET_START,
  247. &data0, &data1, &steer_ctrl);
  248. if (status != VXGE_HW_OK) {
  249. vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
  250. __func__);
  251. return status;
  252. }
  253. /* Transfer fw image to adapter 16 bytes at a time */
  254. for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
  255. steer_ctrl = 0;
  256. /* The next 128bits of fwdata to be loaded onto the adapter */
  257. data0 = *((u64 *)fwdata);
  258. data1 = *((u64 *)fwdata + 1);
  259. status = vxge_hw_vpath_fw_api(vpath,
  260. VXGE_HW_FW_UPGRADE_ACTION,
  261. VXGE_HW_FW_UPGRADE_MEMO,
  262. VXGE_HW_FW_UPGRADE_OFFSET_SEND,
  263. &data0, &data1, &steer_ctrl);
  264. if (status != VXGE_HW_OK) {
  265. vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
  266. __func__);
  267. goto out;
  268. }
  269. ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
  270. switch (ret_code) {
  271. case VXGE_HW_FW_UPGRADE_OK:
  272. /* All OK, send next 16 bytes. */
  273. break;
  274. case VXGE_FW_UPGRADE_BYTES2SKIP:
  275. /* skip bytes in the stream */
  276. fwdata += (data0 >> 8) & 0xFFFFFFFF;
  277. break;
  278. case VXGE_HW_FW_UPGRADE_DONE:
  279. goto out;
  280. case VXGE_HW_FW_UPGRADE_ERR:
  281. sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
  282. switch (sec_code) {
  283. case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
  284. case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
  285. printk(KERN_ERR
  286. "corrupted data from .ncf file\n");
  287. break;
  288. case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
  289. case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
  290. case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
  291. case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
  292. case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
  293. printk(KERN_ERR "invalid .ncf file\n");
  294. break;
  295. case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
  296. printk(KERN_ERR "buffer overflow\n");
  297. break;
  298. case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
  299. printk(KERN_ERR "failed to flash the image\n");
  300. break;
  301. case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
  302. printk(KERN_ERR
  303. "generic error. Unknown error type\n");
  304. break;
  305. default:
  306. printk(KERN_ERR "Unknown error of type %d\n",
  307. sec_code);
  308. break;
  309. }
  310. status = VXGE_HW_FAIL;
  311. goto out;
  312. default:
  313. printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
  314. status = VXGE_HW_FAIL;
  315. goto out;
  316. }
  317. /* point to next 16 bytes */
  318. fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
  319. }
  320. out:
  321. return status;
  322. }
  323. enum vxge_hw_status
  324. vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
  325. struct eprom_image *img)
  326. {
  327. u64 data0 = 0, data1 = 0, steer_ctrl = 0;
  328. struct __vxge_hw_virtualpath *vpath;
  329. enum vxge_hw_status status;
  330. int i;
  331. vpath = &hldev->virtual_paths[hldev->first_vp_id];
  332. for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
  333. data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
  334. data1 = steer_ctrl = 0;
  335. status = vxge_hw_vpath_fw_api(vpath,
  336. VXGE_HW_FW_API_GET_EPROM_REV,
  337. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  338. 0, &data0, &data1, &steer_ctrl);
  339. if (status != VXGE_HW_OK)
  340. break;
  341. img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
  342. img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
  343. img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
  344. img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
  345. }
  346. return status;
  347. }
  348. /*
  349. * __vxge_hw_channel_free - Free memory allocated for channel
  350. * This function deallocates memory from the channel and various arrays
  351. * in the channel
  352. */
  353. static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
  354. {
  355. kfree(channel->work_arr);
  356. kfree(channel->free_arr);
  357. kfree(channel->reserve_arr);
  358. kfree(channel->orig_arr);
  359. kfree(channel);
  360. }
  361. /*
  362. * __vxge_hw_channel_initialize - Initialize a channel
  363. * This function initializes a channel by properly setting the
  364. * various references
  365. */
  366. static enum vxge_hw_status
  367. __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
  368. {
  369. u32 i;
  370. struct __vxge_hw_virtualpath *vpath;
  371. vpath = channel->vph->vpath;
  372. if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
  373. for (i = 0; i < channel->length; i++)
  374. channel->orig_arr[i] = channel->reserve_arr[i];
  375. }
  376. switch (channel->type) {
  377. case VXGE_HW_CHANNEL_TYPE_FIFO:
  378. vpath->fifoh = (struct __vxge_hw_fifo *)channel;
  379. channel->stats = &((struct __vxge_hw_fifo *)
  380. channel)->stats->common_stats;
  381. break;
  382. case VXGE_HW_CHANNEL_TYPE_RING:
  383. vpath->ringh = (struct __vxge_hw_ring *)channel;
  384. channel->stats = &((struct __vxge_hw_ring *)
  385. channel)->stats->common_stats;
  386. break;
  387. default:
  388. break;
  389. }
  390. return VXGE_HW_OK;
  391. }
  392. /*
  393. * __vxge_hw_channel_reset - Resets a channel
  394. * This function resets a channel by properly setting the various references
  395. */
  396. static enum vxge_hw_status
  397. __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
  398. {
  399. u32 i;
  400. for (i = 0; i < channel->length; i++) {
  401. if (channel->reserve_arr != NULL)
  402. channel->reserve_arr[i] = channel->orig_arr[i];
  403. if (channel->free_arr != NULL)
  404. channel->free_arr[i] = NULL;
  405. if (channel->work_arr != NULL)
  406. channel->work_arr[i] = NULL;
  407. }
  408. channel->free_ptr = channel->length;
  409. channel->reserve_ptr = channel->length;
  410. channel->reserve_top = 0;
  411. channel->post_index = 0;
  412. channel->compl_index = 0;
  413. return VXGE_HW_OK;
  414. }
  415. /*
  416. * __vxge_hw_device_pci_e_init
  417. * Initialize certain PCI/PCI-X configuration registers
  418. * with recommended values. Save config space for future hw resets.
  419. */
  420. static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
  421. {
  422. u16 cmd = 0;
  423. /* Set the PErr Repconse bit and SERR in PCI command register. */
  424. pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
  425. cmd |= 0x140;
  426. pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
  427. pci_save_state(hldev->pdev);
  428. }
  429. /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  430. * in progress
  431. * This routine checks the vpath reset in progress register is turned zero
  432. */
  433. static enum vxge_hw_status
  434. __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
  435. {
  436. enum vxge_hw_status status;
  437. status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
  438. VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
  439. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  440. return status;
  441. }
  442. /*
  443. * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
  444. * Set the swapper bits appropriately for the lagacy section.
  445. */
  446. static enum vxge_hw_status
  447. __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
  448. {
  449. u64 val64;
  450. enum vxge_hw_status status = VXGE_HW_OK;
  451. val64 = readq(&legacy_reg->toc_swapper_fb);
  452. wmb();
  453. switch (val64) {
  454. case VXGE_HW_SWAPPER_INITIAL_VALUE:
  455. return status;
  456. case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
  457. writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
  458. &legacy_reg->pifm_rd_swap_en);
  459. writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
  460. &legacy_reg->pifm_rd_flip_en);
  461. writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
  462. &legacy_reg->pifm_wr_swap_en);
  463. writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
  464. &legacy_reg->pifm_wr_flip_en);
  465. break;
  466. case VXGE_HW_SWAPPER_BYTE_SWAPPED:
  467. writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
  468. &legacy_reg->pifm_rd_swap_en);
  469. writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
  470. &legacy_reg->pifm_wr_swap_en);
  471. break;
  472. case VXGE_HW_SWAPPER_BIT_FLIPPED:
  473. writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
  474. &legacy_reg->pifm_rd_flip_en);
  475. writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
  476. &legacy_reg->pifm_wr_flip_en);
  477. break;
  478. }
  479. wmb();
  480. val64 = readq(&legacy_reg->toc_swapper_fb);
  481. if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
  482. status = VXGE_HW_ERR_SWAPPER_CTRL;
  483. return status;
  484. }
  485. /*
  486. * __vxge_hw_device_toc_get
  487. * This routine sets the swapper and reads the toc pointer and returns the
  488. * memory mapped address of the toc
  489. */
  490. static struct vxge_hw_toc_reg __iomem *
  491. __vxge_hw_device_toc_get(void __iomem *bar0)
  492. {
  493. u64 val64;
  494. struct vxge_hw_toc_reg __iomem *toc = NULL;
  495. enum vxge_hw_status status;
  496. struct vxge_hw_legacy_reg __iomem *legacy_reg =
  497. (struct vxge_hw_legacy_reg __iomem *)bar0;
  498. status = __vxge_hw_legacy_swapper_set(legacy_reg);
  499. if (status != VXGE_HW_OK)
  500. goto exit;
  501. val64 = readq(&legacy_reg->toc_first_pointer);
  502. toc = bar0 + val64;
  503. exit:
  504. return toc;
  505. }
  506. /*
  507. * __vxge_hw_device_reg_addr_get
  508. * This routine sets the swapper and reads the toc pointer and initializes the
  509. * register location pointers in the device object. It waits until the ric is
  510. * completed initializing registers.
  511. */
  512. static enum vxge_hw_status
  513. __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
  514. {
  515. u64 val64;
  516. u32 i;
  517. enum vxge_hw_status status = VXGE_HW_OK;
  518. hldev->legacy_reg = hldev->bar0;
  519. hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
  520. if (hldev->toc_reg == NULL) {
  521. status = VXGE_HW_FAIL;
  522. goto exit;
  523. }
  524. val64 = readq(&hldev->toc_reg->toc_common_pointer);
  525. hldev->common_reg = hldev->bar0 + val64;
  526. val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
  527. hldev->mrpcim_reg = hldev->bar0 + val64;
  528. for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
  529. val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
  530. hldev->srpcim_reg[i] = hldev->bar0 + val64;
  531. }
  532. for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
  533. val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
  534. hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
  535. }
  536. for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
  537. val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
  538. hldev->vpath_reg[i] = hldev->bar0 + val64;
  539. }
  540. val64 = readq(&hldev->toc_reg->toc_kdfc);
  541. switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
  542. case 0:
  543. hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
  544. break;
  545. default:
  546. break;
  547. }
  548. status = __vxge_hw_device_vpath_reset_in_prog_check(
  549. (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
  550. exit:
  551. return status;
  552. }
  553. /*
  554. * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
  555. * This routine returns the Access Rights of the driver
  556. */
  557. static u32
  558. __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
  559. {
  560. u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
  561. switch (host_type) {
  562. case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
  563. if (func_id == 0) {
  564. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  565. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  566. }
  567. break;
  568. case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
  569. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  570. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  571. break;
  572. case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
  573. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  574. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  575. break;
  576. case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
  577. case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
  578. case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
  579. break;
  580. case VXGE_HW_SR_VH_FUNCTION0:
  581. case VXGE_HW_VH_NORMAL_FUNCTION:
  582. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  583. break;
  584. }
  585. return access_rights;
  586. }
  587. /*
  588. * __vxge_hw_device_is_privilaged
  589. * This routine checks if the device function is privilaged or not
  590. */
  591. enum vxge_hw_status
  592. __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
  593. {
  594. if (__vxge_hw_device_access_rights_get(host_type,
  595. func_id) &
  596. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
  597. return VXGE_HW_OK;
  598. else
  599. return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
  600. }
  601. /*
  602. * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
  603. * Returns the function number of the vpath.
  604. */
  605. static u32
  606. __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
  607. {
  608. u64 val64;
  609. val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
  610. return
  611. (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
  612. }
  613. /*
  614. * __vxge_hw_device_host_info_get
  615. * This routine returns the host type assignments
  616. */
  617. static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
  618. {
  619. u64 val64;
  620. u32 i;
  621. val64 = readq(&hldev->common_reg->host_type_assignments);
  622. hldev->host_type =
  623. (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
  624. hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
  625. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  626. if (!(hldev->vpath_assignments & vxge_mBIT(i)))
  627. continue;
  628. hldev->func_id =
  629. __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
  630. hldev->access_rights = __vxge_hw_device_access_rights_get(
  631. hldev->host_type, hldev->func_id);
  632. hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
  633. hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
  634. hldev->first_vp_id = i;
  635. break;
  636. }
  637. }
  638. /*
  639. * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
  640. * link width and signalling rate.
  641. */
  642. static enum vxge_hw_status
  643. __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
  644. {
  645. int exp_cap;
  646. u16 lnk;
  647. /* Get the negotiated link width and speed from PCI config space */
  648. exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
  649. pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
  650. if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
  651. return VXGE_HW_ERR_INVALID_PCI_INFO;
  652. switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
  653. case PCIE_LNK_WIDTH_RESRV:
  654. case PCIE_LNK_X1:
  655. case PCIE_LNK_X2:
  656. case PCIE_LNK_X4:
  657. case PCIE_LNK_X8:
  658. break;
  659. default:
  660. return VXGE_HW_ERR_INVALID_PCI_INFO;
  661. }
  662. return VXGE_HW_OK;
  663. }
  664. /*
  665. * __vxge_hw_device_initialize
  666. * Initialize Titan-V hardware.
  667. */
  668. static enum vxge_hw_status
  669. __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
  670. {
  671. enum vxge_hw_status status = VXGE_HW_OK;
  672. if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
  673. hldev->func_id)) {
  674. /* Validate the pci-e link width and speed */
  675. status = __vxge_hw_verify_pci_e_info(hldev);
  676. if (status != VXGE_HW_OK)
  677. goto exit;
  678. }
  679. exit:
  680. return status;
  681. }
  682. /*
  683. * __vxge_hw_vpath_fw_ver_get - Get the fw version
  684. * Returns FW Version
  685. */
  686. static enum vxge_hw_status
  687. __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
  688. struct vxge_hw_device_hw_info *hw_info)
  689. {
  690. struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
  691. struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
  692. struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
  693. struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
  694. u64 data0, data1 = 0, steer_ctrl = 0;
  695. enum vxge_hw_status status;
  696. status = vxge_hw_vpath_fw_api(vpath,
  697. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
  698. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  699. 0, &data0, &data1, &steer_ctrl);
  700. if (status != VXGE_HW_OK)
  701. goto exit;
  702. fw_date->day =
  703. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
  704. fw_date->month =
  705. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
  706. fw_date->year =
  707. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
  708. snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
  709. fw_date->month, fw_date->day, fw_date->year);
  710. fw_version->major =
  711. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
  712. fw_version->minor =
  713. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
  714. fw_version->build =
  715. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
  716. snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
  717. fw_version->major, fw_version->minor, fw_version->build);
  718. flash_date->day =
  719. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
  720. flash_date->month =
  721. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
  722. flash_date->year =
  723. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
  724. snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
  725. flash_date->month, flash_date->day, flash_date->year);
  726. flash_version->major =
  727. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
  728. flash_version->minor =
  729. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
  730. flash_version->build =
  731. (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
  732. snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
  733. flash_version->major, flash_version->minor,
  734. flash_version->build);
  735. exit:
  736. return status;
  737. }
  738. /*
  739. * __vxge_hw_vpath_card_info_get - Get the serial numbers,
  740. * part number and product description.
  741. */
  742. static enum vxge_hw_status
  743. __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
  744. struct vxge_hw_device_hw_info *hw_info)
  745. {
  746. enum vxge_hw_status status;
  747. u64 data0, data1 = 0, steer_ctrl = 0;
  748. u8 *serial_number = hw_info->serial_number;
  749. u8 *part_number = hw_info->part_number;
  750. u8 *product_desc = hw_info->product_desc;
  751. u32 i, j = 0;
  752. data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
  753. status = vxge_hw_vpath_fw_api(vpath,
  754. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
  755. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  756. 0, &data0, &data1, &steer_ctrl);
  757. if (status != VXGE_HW_OK)
  758. return status;
  759. ((u64 *)serial_number)[0] = be64_to_cpu(data0);
  760. ((u64 *)serial_number)[1] = be64_to_cpu(data1);
  761. data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
  762. data1 = steer_ctrl = 0;
  763. status = vxge_hw_vpath_fw_api(vpath,
  764. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
  765. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  766. 0, &data0, &data1, &steer_ctrl);
  767. if (status != VXGE_HW_OK)
  768. return status;
  769. ((u64 *)part_number)[0] = be64_to_cpu(data0);
  770. ((u64 *)part_number)[1] = be64_to_cpu(data1);
  771. for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
  772. i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
  773. data0 = i;
  774. data1 = steer_ctrl = 0;
  775. status = vxge_hw_vpath_fw_api(vpath,
  776. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
  777. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  778. 0, &data0, &data1, &steer_ctrl);
  779. if (status != VXGE_HW_OK)
  780. return status;
  781. ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
  782. ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
  783. }
  784. return status;
  785. }
  786. /*
  787. * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
  788. * Returns pci function mode
  789. */
  790. static enum vxge_hw_status
  791. __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
  792. struct vxge_hw_device_hw_info *hw_info)
  793. {
  794. u64 data0, data1 = 0, steer_ctrl = 0;
  795. enum vxge_hw_status status;
  796. data0 = 0;
  797. status = vxge_hw_vpath_fw_api(vpath,
  798. VXGE_HW_FW_API_GET_FUNC_MODE,
  799. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  800. 0, &data0, &data1, &steer_ctrl);
  801. if (status != VXGE_HW_OK)
  802. return status;
  803. hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
  804. return status;
  805. }
  806. /*
  807. * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
  808. * from MAC address table.
  809. */
  810. static enum vxge_hw_status
  811. __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
  812. u8 *macaddr, u8 *macaddr_mask)
  813. {
  814. u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  815. data0 = 0, data1 = 0, steer_ctrl = 0;
  816. enum vxge_hw_status status;
  817. int i;
  818. do {
  819. status = vxge_hw_vpath_fw_api(vpath, action,
  820. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  821. 0, &data0, &data1, &steer_ctrl);
  822. if (status != VXGE_HW_OK)
  823. goto exit;
  824. data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
  825. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
  826. data1);
  827. for (i = ETH_ALEN; i > 0; i--) {
  828. macaddr[i - 1] = (u8) (data0 & 0xFF);
  829. data0 >>= 8;
  830. macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
  831. data1 >>= 8;
  832. }
  833. action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
  834. data0 = 0, data1 = 0, steer_ctrl = 0;
  835. } while (!is_valid_ether_addr(macaddr));
  836. exit:
  837. return status;
  838. }
  839. /**
  840. * vxge_hw_device_hw_info_get - Get the hw information
  841. * Returns the vpath mask that has the bits set for each vpath allocated
  842. * for the driver, FW version information, and the first mac address for
  843. * each vpath
  844. */
  845. enum vxge_hw_status __devinit
  846. vxge_hw_device_hw_info_get(void __iomem *bar0,
  847. struct vxge_hw_device_hw_info *hw_info)
  848. {
  849. u32 i;
  850. u64 val64;
  851. struct vxge_hw_toc_reg __iomem *toc;
  852. struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
  853. struct vxge_hw_common_reg __iomem *common_reg;
  854. struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
  855. enum vxge_hw_status status;
  856. struct __vxge_hw_virtualpath vpath;
  857. memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
  858. toc = __vxge_hw_device_toc_get(bar0);
  859. if (toc == NULL) {
  860. status = VXGE_HW_ERR_CRITICAL;
  861. goto exit;
  862. }
  863. val64 = readq(&toc->toc_common_pointer);
  864. common_reg = bar0 + val64;
  865. status = __vxge_hw_device_vpath_reset_in_prog_check(
  866. (u64 __iomem *)&common_reg->vpath_rst_in_prog);
  867. if (status != VXGE_HW_OK)
  868. goto exit;
  869. hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
  870. val64 = readq(&common_reg->host_type_assignments);
  871. hw_info->host_type =
  872. (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
  873. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  874. if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
  875. continue;
  876. val64 = readq(&toc->toc_vpmgmt_pointer[i]);
  877. vpmgmt_reg = bar0 + val64;
  878. hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
  879. if (__vxge_hw_device_access_rights_get(hw_info->host_type,
  880. hw_info->func_id) &
  881. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
  882. val64 = readq(&toc->toc_mrpcim_pointer);
  883. mrpcim_reg = bar0 + val64;
  884. writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
  885. wmb();
  886. }
  887. val64 = readq(&toc->toc_vpath_pointer[i]);
  888. spin_lock_init(&vpath.lock);
  889. vpath.vp_reg = bar0 + val64;
  890. vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
  891. status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
  892. if (status != VXGE_HW_OK)
  893. goto exit;
  894. status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
  895. if (status != VXGE_HW_OK)
  896. goto exit;
  897. status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
  898. if (status != VXGE_HW_OK)
  899. goto exit;
  900. break;
  901. }
  902. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  903. if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
  904. continue;
  905. val64 = readq(&toc->toc_vpath_pointer[i]);
  906. vpath.vp_reg = bar0 + val64;
  907. vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
  908. status = __vxge_hw_vpath_addr_get(&vpath,
  909. hw_info->mac_addrs[i],
  910. hw_info->mac_addr_masks[i]);
  911. if (status != VXGE_HW_OK)
  912. goto exit;
  913. }
  914. exit:
  915. return status;
  916. }
  917. /*
  918. * __vxge_hw_blockpool_destroy - Deallocates the block pool
  919. */
  920. static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
  921. {
  922. struct __vxge_hw_device *hldev;
  923. struct list_head *p, *n;
  924. u16 ret;
  925. if (blockpool == NULL) {
  926. ret = 1;
  927. goto exit;
  928. }
  929. hldev = blockpool->hldev;
  930. list_for_each_safe(p, n, &blockpool->free_block_list) {
  931. pci_unmap_single(hldev->pdev,
  932. ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
  933. ((struct __vxge_hw_blockpool_entry *)p)->length,
  934. PCI_DMA_BIDIRECTIONAL);
  935. vxge_os_dma_free(hldev->pdev,
  936. ((struct __vxge_hw_blockpool_entry *)p)->memblock,
  937. &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
  938. list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
  939. kfree(p);
  940. blockpool->pool_size--;
  941. }
  942. list_for_each_safe(p, n, &blockpool->free_entry_list) {
  943. list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
  944. kfree((void *)p);
  945. }
  946. ret = 0;
  947. exit:
  948. return;
  949. }
  950. /*
  951. * __vxge_hw_blockpool_create - Create block pool
  952. */
  953. static enum vxge_hw_status
  954. __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
  955. struct __vxge_hw_blockpool *blockpool,
  956. u32 pool_size,
  957. u32 pool_max)
  958. {
  959. u32 i;
  960. struct __vxge_hw_blockpool_entry *entry = NULL;
  961. void *memblock;
  962. dma_addr_t dma_addr;
  963. struct pci_dev *dma_handle;
  964. struct pci_dev *acc_handle;
  965. enum vxge_hw_status status = VXGE_HW_OK;
  966. if (blockpool == NULL) {
  967. status = VXGE_HW_FAIL;
  968. goto blockpool_create_exit;
  969. }
  970. blockpool->hldev = hldev;
  971. blockpool->block_size = VXGE_HW_BLOCK_SIZE;
  972. blockpool->pool_size = 0;
  973. blockpool->pool_max = pool_max;
  974. blockpool->req_out = 0;
  975. INIT_LIST_HEAD(&blockpool->free_block_list);
  976. INIT_LIST_HEAD(&blockpool->free_entry_list);
  977. for (i = 0; i < pool_size + pool_max; i++) {
  978. entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
  979. GFP_KERNEL);
  980. if (entry == NULL) {
  981. __vxge_hw_blockpool_destroy(blockpool);
  982. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  983. goto blockpool_create_exit;
  984. }
  985. list_add(&entry->item, &blockpool->free_entry_list);
  986. }
  987. for (i = 0; i < pool_size; i++) {
  988. memblock = vxge_os_dma_malloc(
  989. hldev->pdev,
  990. VXGE_HW_BLOCK_SIZE,
  991. &dma_handle,
  992. &acc_handle);
  993. if (memblock == NULL) {
  994. __vxge_hw_blockpool_destroy(blockpool);
  995. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  996. goto blockpool_create_exit;
  997. }
  998. dma_addr = pci_map_single(hldev->pdev, memblock,
  999. VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
  1000. if (unlikely(pci_dma_mapping_error(hldev->pdev,
  1001. dma_addr))) {
  1002. vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
  1003. __vxge_hw_blockpool_destroy(blockpool);
  1004. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  1005. goto blockpool_create_exit;
  1006. }
  1007. if (!list_empty(&blockpool->free_entry_list))
  1008. entry = (struct __vxge_hw_blockpool_entry *)
  1009. list_first_entry(&blockpool->free_entry_list,
  1010. struct __vxge_hw_blockpool_entry,
  1011. item);
  1012. if (entry == NULL)
  1013. entry =
  1014. kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
  1015. GFP_KERNEL);
  1016. if (entry != NULL) {
  1017. list_del(&entry->item);
  1018. entry->length = VXGE_HW_BLOCK_SIZE;
  1019. entry->memblock = memblock;
  1020. entry->dma_addr = dma_addr;
  1021. entry->acc_handle = acc_handle;
  1022. entry->dma_handle = dma_handle;
  1023. list_add(&entry->item,
  1024. &blockpool->free_block_list);
  1025. blockpool->pool_size++;
  1026. } else {
  1027. __vxge_hw_blockpool_destroy(blockpool);
  1028. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  1029. goto blockpool_create_exit;
  1030. }
  1031. }
  1032. blockpool_create_exit:
  1033. return status;
  1034. }
  1035. /*
  1036. * __vxge_hw_device_fifo_config_check - Check fifo configuration.
  1037. * Check the fifo configuration
  1038. */
  1039. static enum vxge_hw_status
  1040. __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
  1041. {
  1042. if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
  1043. (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
  1044. return VXGE_HW_BADCFG_FIFO_BLOCKS;
  1045. return VXGE_HW_OK;
  1046. }
  1047. /*
  1048. * __vxge_hw_device_vpath_config_check - Check vpath configuration.
  1049. * Check the vpath configuration
  1050. */
  1051. static enum vxge_hw_status
  1052. __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
  1053. {
  1054. enum vxge_hw_status status;
  1055. if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
  1056. (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
  1057. return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
  1058. status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
  1059. if (status != VXGE_HW_OK)
  1060. return status;
  1061. if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
  1062. ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
  1063. (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
  1064. return VXGE_HW_BADCFG_VPATH_MTU;
  1065. if ((vp_config->rpa_strip_vlan_tag !=
  1066. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
  1067. (vp_config->rpa_strip_vlan_tag !=
  1068. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
  1069. (vp_config->rpa_strip_vlan_tag !=
  1070. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
  1071. return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
  1072. return VXGE_HW_OK;
  1073. }
  1074. /*
  1075. * __vxge_hw_device_config_check - Check device configuration.
  1076. * Check the device configuration
  1077. */
  1078. static enum vxge_hw_status
  1079. __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
  1080. {
  1081. u32 i;
  1082. enum vxge_hw_status status;
  1083. if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
  1084. (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
  1085. (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
  1086. (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
  1087. return VXGE_HW_BADCFG_INTR_MODE;
  1088. if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
  1089. (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
  1090. return VXGE_HW_BADCFG_RTS_MAC_EN;
  1091. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  1092. status = __vxge_hw_device_vpath_config_check(
  1093. &new_config->vp_config[i]);
  1094. if (status != VXGE_HW_OK)
  1095. return status;
  1096. }
  1097. return VXGE_HW_OK;
  1098. }
  1099. /*
  1100. * vxge_hw_device_initialize - Initialize Titan device.
  1101. * Initialize Titan device. Note that all the arguments of this public API
  1102. * are 'IN', including @hldev. Driver cooperates with
  1103. * OS to find new Titan device, locate its PCI and memory spaces.
  1104. *
  1105. * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
  1106. * to enable the latter to perform Titan hardware initialization.
  1107. */
  1108. enum vxge_hw_status __devinit
  1109. vxge_hw_device_initialize(
  1110. struct __vxge_hw_device **devh,
  1111. struct vxge_hw_device_attr *attr,
  1112. struct vxge_hw_device_config *device_config)
  1113. {
  1114. u32 i;
  1115. u32 nblocks = 0;
  1116. struct __vxge_hw_device *hldev = NULL;
  1117. enum vxge_hw_status status = VXGE_HW_OK;
  1118. status = __vxge_hw_device_config_check(device_config);
  1119. if (status != VXGE_HW_OK)
  1120. goto exit;
  1121. hldev = vzalloc(sizeof(struct __vxge_hw_device));
  1122. if (hldev == NULL) {
  1123. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  1124. goto exit;
  1125. }
  1126. hldev->magic = VXGE_HW_DEVICE_MAGIC;
  1127. vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
  1128. /* apply config */
  1129. memcpy(&hldev->config, device_config,
  1130. sizeof(struct vxge_hw_device_config));
  1131. hldev->bar0 = attr->bar0;
  1132. hldev->pdev = attr->pdev;
  1133. hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
  1134. hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
  1135. hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
  1136. __vxge_hw_device_pci_e_init(hldev);
  1137. status = __vxge_hw_device_reg_addr_get(hldev);
  1138. if (status != VXGE_HW_OK) {
  1139. vfree(hldev);
  1140. goto exit;
  1141. }
  1142. __vxge_hw_device_host_info_get(hldev);
  1143. /* Incrementing for stats blocks */
  1144. nblocks++;
  1145. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  1146. if (!(hldev->vpath_assignments & vxge_mBIT(i)))
  1147. continue;
  1148. if (device_config->vp_config[i].ring.enable ==
  1149. VXGE_HW_RING_ENABLE)
  1150. nblocks += device_config->vp_config[i].ring.ring_blocks;
  1151. if (device_config->vp_config[i].fifo.enable ==
  1152. VXGE_HW_FIFO_ENABLE)
  1153. nblocks += device_config->vp_config[i].fifo.fifo_blocks;
  1154. nblocks++;
  1155. }
  1156. if (__vxge_hw_blockpool_create(hldev,
  1157. &hldev->block_pool,
  1158. device_config->dma_blockpool_initial + nblocks,
  1159. device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
  1160. vxge_hw_device_terminate(hldev);
  1161. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  1162. goto exit;
  1163. }
  1164. status = __vxge_hw_device_initialize(hldev);
  1165. if (status != VXGE_HW_OK) {
  1166. vxge_hw_device_terminate(hldev);
  1167. goto exit;
  1168. }
  1169. *devh = hldev;
  1170. exit:
  1171. return status;
  1172. }
  1173. /*
  1174. * vxge_hw_device_terminate - Terminate Titan device.
  1175. * Terminate HW device.
  1176. */
  1177. void
  1178. vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
  1179. {
  1180. vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
  1181. hldev->magic = VXGE_HW_DEVICE_DEAD;
  1182. __vxge_hw_blockpool_destroy(&hldev->block_pool);
  1183. vfree(hldev);
  1184. }
  1185. /*
  1186. * __vxge_hw_vpath_stats_access - Get the statistics from the given location
  1187. * and offset and perform an operation
  1188. */
  1189. static enum vxge_hw_status
  1190. __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
  1191. u32 operation, u32 offset, u64 *stat)
  1192. {
  1193. u64 val64;
  1194. enum vxge_hw_status status = VXGE_HW_OK;
  1195. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1196. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1197. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1198. goto vpath_stats_access_exit;
  1199. }
  1200. vp_reg = vpath->vp_reg;
  1201. val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
  1202. VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
  1203. VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
  1204. status = __vxge_hw_pio_mem_write64(val64,
  1205. &vp_reg->xmac_stats_access_cmd,
  1206. VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
  1207. vpath->hldev->config.device_poll_millis);
  1208. if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
  1209. *stat = readq(&vp_reg->xmac_stats_access_data);
  1210. else
  1211. *stat = 0;
  1212. vpath_stats_access_exit:
  1213. return status;
  1214. }
  1215. /*
  1216. * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
  1217. */
  1218. static enum vxge_hw_status
  1219. __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
  1220. struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
  1221. {
  1222. u64 *val64;
  1223. int i;
  1224. u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
  1225. enum vxge_hw_status status = VXGE_HW_OK;
  1226. val64 = (u64 *)vpath_tx_stats;
  1227. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1228. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1229. goto exit;
  1230. }
  1231. for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
  1232. status = __vxge_hw_vpath_stats_access(vpath,
  1233. VXGE_HW_STATS_OP_READ,
  1234. offset, val64);
  1235. if (status != VXGE_HW_OK)
  1236. goto exit;
  1237. offset++;
  1238. val64++;
  1239. }
  1240. exit:
  1241. return status;
  1242. }
  1243. /*
  1244. * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
  1245. */
  1246. static enum vxge_hw_status
  1247. __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
  1248. struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
  1249. {
  1250. u64 *val64;
  1251. enum vxge_hw_status status = VXGE_HW_OK;
  1252. int i;
  1253. u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
  1254. val64 = (u64 *) vpath_rx_stats;
  1255. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1256. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1257. goto exit;
  1258. }
  1259. for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
  1260. status = __vxge_hw_vpath_stats_access(vpath,
  1261. VXGE_HW_STATS_OP_READ,
  1262. offset >> 3, val64);
  1263. if (status != VXGE_HW_OK)
  1264. goto exit;
  1265. offset += 8;
  1266. val64++;
  1267. }
  1268. exit:
  1269. return status;
  1270. }
  1271. /*
  1272. * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
  1273. */
  1274. static enum vxge_hw_status
  1275. __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
  1276. struct vxge_hw_vpath_stats_hw_info *hw_stats)
  1277. {
  1278. u64 val64;
  1279. enum vxge_hw_status status = VXGE_HW_OK;
  1280. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1281. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1282. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1283. goto exit;
  1284. }
  1285. vp_reg = vpath->vp_reg;
  1286. val64 = readq(&vp_reg->vpath_debug_stats0);
  1287. hw_stats->ini_num_mwr_sent =
  1288. (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
  1289. val64 = readq(&vp_reg->vpath_debug_stats1);
  1290. hw_stats->ini_num_mrd_sent =
  1291. (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
  1292. val64 = readq(&vp_reg->vpath_debug_stats2);
  1293. hw_stats->ini_num_cpl_rcvd =
  1294. (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
  1295. val64 = readq(&vp_reg->vpath_debug_stats3);
  1296. hw_stats->ini_num_mwr_byte_sent =
  1297. VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
  1298. val64 = readq(&vp_reg->vpath_debug_stats4);
  1299. hw_stats->ini_num_cpl_byte_rcvd =
  1300. VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
  1301. val64 = readq(&vp_reg->vpath_debug_stats5);
  1302. hw_stats->wrcrdtarb_xoff =
  1303. (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
  1304. val64 = readq(&vp_reg->vpath_debug_stats6);
  1305. hw_stats->rdcrdtarb_xoff =
  1306. (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
  1307. val64 = readq(&vp_reg->vpath_genstats_count01);
  1308. hw_stats->vpath_genstats_count0 =
  1309. (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
  1310. val64);
  1311. val64 = readq(&vp_reg->vpath_genstats_count01);
  1312. hw_stats->vpath_genstats_count1 =
  1313. (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
  1314. val64);
  1315. val64 = readq(&vp_reg->vpath_genstats_count23);
  1316. hw_stats->vpath_genstats_count2 =
  1317. (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
  1318. val64);
  1319. val64 = readq(&vp_reg->vpath_genstats_count01);
  1320. hw_stats->vpath_genstats_count3 =
  1321. (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
  1322. val64);
  1323. val64 = readq(&vp_reg->vpath_genstats_count4);
  1324. hw_stats->vpath_genstats_count4 =
  1325. (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
  1326. val64);
  1327. val64 = readq(&vp_reg->vpath_genstats_count5);
  1328. hw_stats->vpath_genstats_count5 =
  1329. (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
  1330. val64);
  1331. status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
  1332. if (status != VXGE_HW_OK)
  1333. goto exit;
  1334. status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
  1335. if (status != VXGE_HW_OK)
  1336. goto exit;
  1337. VXGE_HW_VPATH_STATS_PIO_READ(
  1338. VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
  1339. hw_stats->prog_event_vnum0 =
  1340. (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
  1341. hw_stats->prog_event_vnum1 =
  1342. (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
  1343. VXGE_HW_VPATH_STATS_PIO_READ(
  1344. VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
  1345. hw_stats->prog_event_vnum2 =
  1346. (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
  1347. hw_stats->prog_event_vnum3 =
  1348. (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
  1349. val64 = readq(&vp_reg->rx_multi_cast_stats);
  1350. hw_stats->rx_multi_cast_frame_discard =
  1351. (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
  1352. val64 = readq(&vp_reg->rx_frm_transferred);
  1353. hw_stats->rx_frm_transferred =
  1354. (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
  1355. val64 = readq(&vp_reg->rxd_returned);
  1356. hw_stats->rxd_returned =
  1357. (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
  1358. val64 = readq(&vp_reg->dbg_stats_rx_mpa);
  1359. hw_stats->rx_mpa_len_fail_frms =
  1360. (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
  1361. hw_stats->rx_mpa_mrk_fail_frms =
  1362. (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
  1363. hw_stats->rx_mpa_crc_fail_frms =
  1364. (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
  1365. val64 = readq(&vp_reg->dbg_stats_rx_fau);
  1366. hw_stats->rx_permitted_frms =
  1367. (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
  1368. hw_stats->rx_vp_reset_discarded_frms =
  1369. (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
  1370. hw_stats->rx_wol_frms =
  1371. (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
  1372. val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
  1373. hw_stats->tx_vp_reset_discarded_frms =
  1374. (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
  1375. val64);
  1376. exit:
  1377. return status;
  1378. }
  1379. /*
  1380. * vxge_hw_device_stats_get - Get the device hw statistics.
  1381. * Returns the vpath h/w stats for the device.
  1382. */
  1383. enum vxge_hw_status
  1384. vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
  1385. struct vxge_hw_device_stats_hw_info *hw_stats)
  1386. {
  1387. u32 i;
  1388. enum vxge_hw_status status = VXGE_HW_OK;
  1389. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  1390. if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
  1391. (hldev->virtual_paths[i].vp_open ==
  1392. VXGE_HW_VP_NOT_OPEN))
  1393. continue;
  1394. memcpy(hldev->virtual_paths[i].hw_stats_sav,
  1395. hldev->virtual_paths[i].hw_stats,
  1396. sizeof(struct vxge_hw_vpath_stats_hw_info));
  1397. status = __vxge_hw_vpath_stats_get(
  1398. &hldev->virtual_paths[i],
  1399. hldev->virtual_paths[i].hw_stats);
  1400. }
  1401. memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
  1402. sizeof(struct vxge_hw_device_stats_hw_info));
  1403. return status;
  1404. }
  1405. /*
  1406. * vxge_hw_driver_stats_get - Get the device sw statistics.
  1407. * Returns the vpath s/w stats for the device.
  1408. */
  1409. enum vxge_hw_status vxge_hw_driver_stats_get(
  1410. struct __vxge_hw_device *hldev,
  1411. struct vxge_hw_device_stats_sw_info *sw_stats)
  1412. {
  1413. enum vxge_hw_status status = VXGE_HW_OK;
  1414. memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
  1415. sizeof(struct vxge_hw_device_stats_sw_info));
  1416. return status;
  1417. }
  1418. /*
  1419. * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
  1420. * and offset and perform an operation
  1421. * Get the statistics from the given location and offset.
  1422. */
  1423. enum vxge_hw_status
  1424. vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
  1425. u32 operation, u32 location, u32 offset, u64 *stat)
  1426. {
  1427. u64 val64;
  1428. enum vxge_hw_status status = VXGE_HW_OK;
  1429. status = __vxge_hw_device_is_privilaged(hldev->host_type,
  1430. hldev->func_id);
  1431. if (status != VXGE_HW_OK)
  1432. goto exit;
  1433. val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
  1434. VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
  1435. VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
  1436. VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
  1437. status = __vxge_hw_pio_mem_write64(val64,
  1438. &hldev->mrpcim_reg->xmac_stats_sys_cmd,
  1439. VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
  1440. hldev->config.device_poll_millis);
  1441. if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
  1442. *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
  1443. else
  1444. *stat = 0;
  1445. exit:
  1446. return status;
  1447. }
  1448. /*
  1449. * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
  1450. * Get the Statistics on aggregate port
  1451. */
  1452. static enum vxge_hw_status
  1453. vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
  1454. struct vxge_hw_xmac_aggr_stats *aggr_stats)
  1455. {
  1456. u64 *val64;
  1457. int i;
  1458. u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
  1459. enum vxge_hw_status status = VXGE_HW_OK;
  1460. val64 = (u64 *)aggr_stats;
  1461. status = __vxge_hw_device_is_privilaged(hldev->host_type,
  1462. hldev->func_id);
  1463. if (status != VXGE_HW_OK)
  1464. goto exit;
  1465. for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
  1466. status = vxge_hw_mrpcim_stats_access(hldev,
  1467. VXGE_HW_STATS_OP_READ,
  1468. VXGE_HW_STATS_LOC_AGGR,
  1469. ((offset + (104 * port)) >> 3), val64);
  1470. if (status != VXGE_HW_OK)
  1471. goto exit;
  1472. offset += 8;
  1473. val64++;
  1474. }
  1475. exit:
  1476. return status;
  1477. }
  1478. /*
  1479. * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
  1480. * Get the Statistics on port
  1481. */
  1482. static enum vxge_hw_status
  1483. vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
  1484. struct vxge_hw_xmac_port_stats *port_stats)
  1485. {
  1486. u64 *val64;
  1487. enum vxge_hw_status status = VXGE_HW_OK;
  1488. int i;
  1489. u32 offset = 0x0;
  1490. val64 = (u64 *) port_stats;
  1491. status = __vxge_hw_device_is_privilaged(hldev->host_type,
  1492. hldev->func_id);
  1493. if (status != VXGE_HW_OK)
  1494. goto exit;
  1495. for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
  1496. status = vxge_hw_mrpcim_stats_access(hldev,
  1497. VXGE_HW_STATS_OP_READ,
  1498. VXGE_HW_STATS_LOC_AGGR,
  1499. ((offset + (608 * port)) >> 3), val64);
  1500. if (status != VXGE_HW_OK)
  1501. goto exit;
  1502. offset += 8;
  1503. val64++;
  1504. }
  1505. exit:
  1506. return status;
  1507. }
  1508. /*
  1509. * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
  1510. * Get the XMAC Statistics
  1511. */
  1512. enum vxge_hw_status
  1513. vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
  1514. struct vxge_hw_xmac_stats *xmac_stats)
  1515. {
  1516. enum vxge_hw_status status = VXGE_HW_OK;
  1517. u32 i;
  1518. status = vxge_hw_device_xmac_aggr_stats_get(hldev,
  1519. 0, &xmac_stats->aggr_stats[0]);
  1520. if (status != VXGE_HW_OK)
  1521. goto exit;
  1522. status = vxge_hw_device_xmac_aggr_stats_get(hldev,
  1523. 1, &xmac_stats->aggr_stats[1]);
  1524. if (status != VXGE_HW_OK)
  1525. goto exit;
  1526. for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
  1527. status = vxge_hw_device_xmac_port_stats_get(hldev,
  1528. i, &xmac_stats->port_stats[i]);
  1529. if (status != VXGE_HW_OK)
  1530. goto exit;
  1531. }
  1532. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  1533. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  1534. continue;
  1535. status = __vxge_hw_vpath_xmac_tx_stats_get(
  1536. &hldev->virtual_paths[i],
  1537. &xmac_stats->vpath_tx_stats[i]);
  1538. if (status != VXGE_HW_OK)
  1539. goto exit;
  1540. status = __vxge_hw_vpath_xmac_rx_stats_get(
  1541. &hldev->virtual_paths[i],
  1542. &xmac_stats->vpath_rx_stats[i]);
  1543. if (status != VXGE_HW_OK)
  1544. goto exit;
  1545. }
  1546. exit:
  1547. return status;
  1548. }
  1549. /*
  1550. * vxge_hw_device_debug_set - Set the debug module, level and timestamp
  1551. * This routine is used to dynamically change the debug output
  1552. */
  1553. void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
  1554. enum vxge_debug_level level, u32 mask)
  1555. {
  1556. if (hldev == NULL)
  1557. return;
  1558. #if defined(VXGE_DEBUG_TRACE_MASK) || \
  1559. defined(VXGE_DEBUG_ERR_MASK)
  1560. hldev->debug_module_mask = mask;
  1561. hldev->debug_level = level;
  1562. #endif
  1563. #if defined(VXGE_DEBUG_ERR_MASK)
  1564. hldev->level_err = level & VXGE_ERR;
  1565. #endif
  1566. #if defined(VXGE_DEBUG_TRACE_MASK)
  1567. hldev->level_trace = level & VXGE_TRACE;
  1568. #endif
  1569. }
  1570. /*
  1571. * vxge_hw_device_error_level_get - Get the error level
  1572. * This routine returns the current error level set
  1573. */
  1574. u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
  1575. {
  1576. #if defined(VXGE_DEBUG_ERR_MASK)
  1577. if (hldev == NULL)
  1578. return VXGE_ERR;
  1579. else
  1580. return hldev->level_err;
  1581. #else
  1582. return 0;
  1583. #endif
  1584. }
  1585. /*
  1586. * vxge_hw_device_trace_level_get - Get the trace level
  1587. * This routine returns the current trace level set
  1588. */
  1589. u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
  1590. {
  1591. #if defined(VXGE_DEBUG_TRACE_MASK)
  1592. if (hldev == NULL)
  1593. return VXGE_TRACE;
  1594. else
  1595. return hldev->level_trace;
  1596. #else
  1597. return 0;
  1598. #endif
  1599. }
  1600. /*
  1601. * vxge_hw_getpause_data -Pause frame frame generation and reception.
  1602. * Returns the Pause frame generation and reception capability of the NIC.
  1603. */
  1604. enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
  1605. u32 port, u32 *tx, u32 *rx)
  1606. {
  1607. u64 val64;
  1608. enum vxge_hw_status status = VXGE_HW_OK;
  1609. if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
  1610. status = VXGE_HW_ERR_INVALID_DEVICE;
  1611. goto exit;
  1612. }
  1613. if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
  1614. status = VXGE_HW_ERR_INVALID_PORT;
  1615. goto exit;
  1616. }
  1617. if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
  1618. status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
  1619. goto exit;
  1620. }
  1621. val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
  1622. if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
  1623. *tx = 1;
  1624. if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
  1625. *rx = 1;
  1626. exit:
  1627. return status;
  1628. }
  1629. /*
  1630. * vxge_hw_device_setpause_data - set/reset pause frame generation.
  1631. * It can be used to set or reset Pause frame generation or reception
  1632. * support of the NIC.
  1633. */
  1634. enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
  1635. u32 port, u32 tx, u32 rx)
  1636. {
  1637. u64 val64;
  1638. enum vxge_hw_status status = VXGE_HW_OK;
  1639. if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
  1640. status = VXGE_HW_ERR_INVALID_DEVICE;
  1641. goto exit;
  1642. }
  1643. if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
  1644. status = VXGE_HW_ERR_INVALID_PORT;
  1645. goto exit;
  1646. }
  1647. status = __vxge_hw_device_is_privilaged(hldev->host_type,
  1648. hldev->func_id);
  1649. if (status != VXGE_HW_OK)
  1650. goto exit;
  1651. val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
  1652. if (tx)
  1653. val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
  1654. else
  1655. val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
  1656. if (rx)
  1657. val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
  1658. else
  1659. val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
  1660. writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
  1661. exit:
  1662. return status;
  1663. }
  1664. u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
  1665. {
  1666. int link_width, exp_cap;
  1667. u16 lnk;
  1668. exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
  1669. pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
  1670. link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
  1671. return link_width;
  1672. }
  1673. /*
  1674. * __vxge_hw_ring_block_memblock_idx - Return the memblock index
  1675. * This function returns the index of memory block
  1676. */
  1677. static inline u32
  1678. __vxge_hw_ring_block_memblock_idx(u8 *block)
  1679. {
  1680. return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
  1681. }
  1682. /*
  1683. * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
  1684. * This function sets index to a memory block
  1685. */
  1686. static inline void
  1687. __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
  1688. {
  1689. *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
  1690. }
  1691. /*
  1692. * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
  1693. * in RxD block
  1694. * Sets the next block pointer in RxD block
  1695. */
  1696. static inline void
  1697. __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
  1698. {
  1699. *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
  1700. }
  1701. /*
  1702. * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
  1703. * first block
  1704. * Returns the dma address of the first RxD block
  1705. */
  1706. static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
  1707. {
  1708. struct vxge_hw_mempool_dma *dma_object;
  1709. dma_object = ring->mempool->memblocks_dma_arr;
  1710. vxge_assert(dma_object != NULL);
  1711. return dma_object->addr;
  1712. }
  1713. /*
  1714. * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
  1715. * This function returns the dma address of a given item
  1716. */
  1717. static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
  1718. void *item)
  1719. {
  1720. u32 memblock_idx;
  1721. void *memblock;
  1722. struct vxge_hw_mempool_dma *memblock_dma_object;
  1723. ptrdiff_t dma_item_offset;
  1724. /* get owner memblock index */
  1725. memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
  1726. /* get owner memblock by memblock index */
  1727. memblock = mempoolh->memblocks_arr[memblock_idx];
  1728. /* get memblock DMA object by memblock index */
  1729. memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
  1730. /* calculate offset in the memblock of this item */
  1731. dma_item_offset = (u8 *)item - (u8 *)memblock;
  1732. return memblock_dma_object->addr + dma_item_offset;
  1733. }
  1734. /*
  1735. * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
  1736. * This function returns the dma address of a given item
  1737. */
  1738. static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
  1739. struct __vxge_hw_ring *ring, u32 from,
  1740. u32 to)
  1741. {
  1742. u8 *to_item , *from_item;
  1743. dma_addr_t to_dma;
  1744. /* get "from" RxD block */
  1745. from_item = mempoolh->items_arr[from];
  1746. vxge_assert(from_item);
  1747. /* get "to" RxD block */
  1748. to_item = mempoolh->items_arr[to];
  1749. vxge_assert(to_item);
  1750. /* return address of the beginning of previous RxD block */
  1751. to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
  1752. /* set next pointer for this RxD block to point on
  1753. * previous item's DMA start address */
  1754. __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
  1755. }
  1756. /*
  1757. * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
  1758. * block callback
  1759. * This function is callback passed to __vxge_hw_mempool_create to create memory
  1760. * pool for RxD block
  1761. */
  1762. static void
  1763. __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
  1764. u32 memblock_index,
  1765. struct vxge_hw_mempool_dma *dma_object,
  1766. u32 index, u32 is_last)
  1767. {
  1768. u32 i;
  1769. void *item = mempoolh->items_arr[index];
  1770. struct __vxge_hw_ring *ring =
  1771. (struct __vxge_hw_ring *)mempoolh->userdata;
  1772. /* format rxds array */
  1773. for (i = 0; i < ring->rxds_per_block; i++) {
  1774. void *rxdblock_priv;
  1775. void *uld_priv;
  1776. struct vxge_hw_ring_rxd_1 *rxdp;
  1777. u32 reserve_index = ring->channel.reserve_ptr -
  1778. (index * ring->rxds_per_block + i + 1);
  1779. u32 memblock_item_idx;
  1780. ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
  1781. i * ring->rxd_size;
  1782. /* Note: memblock_item_idx is index of the item within
  1783. * the memblock. For instance, in case of three RxD-blocks
  1784. * per memblock this value can be 0, 1 or 2. */
  1785. rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
  1786. memblock_index, item,
  1787. &memblock_item_idx);
  1788. rxdp = ring->channel.reserve_arr[reserve_index];
  1789. uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
  1790. /* pre-format Host_Control */
  1791. rxdp->host_control = (u64)(size_t)uld_priv;
  1792. }
  1793. __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
  1794. if (is_last) {
  1795. /* link last one with first one */
  1796. __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
  1797. }
  1798. if (index > 0) {
  1799. /* link this RxD block with previous one */
  1800. __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
  1801. }
  1802. }
  1803. /*
  1804. * __vxge_hw_ring_replenish - Initial replenish of RxDs
  1805. * This function replenishes the RxDs from reserve array to work array
  1806. */
  1807. enum vxge_hw_status
  1808. vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
  1809. {
  1810. void *rxd;
  1811. struct __vxge_hw_channel *channel;
  1812. enum vxge_hw_status status = VXGE_HW_OK;
  1813. channel = &ring->channel;
  1814. while (vxge_hw_channel_dtr_count(channel) > 0) {
  1815. status = vxge_hw_ring_rxd_reserve(ring, &rxd);
  1816. vxge_assert(status == VXGE_HW_OK);
  1817. if (ring->rxd_init) {
  1818. status = ring->rxd_init(rxd, channel->userdata);
  1819. if (status != VXGE_HW_OK) {
  1820. vxge_hw_ring_rxd_free(ring, rxd);
  1821. goto exit;
  1822. }
  1823. }
  1824. vxge_hw_ring_rxd_post(ring, rxd);
  1825. }
  1826. status = VXGE_HW_OK;
  1827. exit:
  1828. return status;
  1829. }
  1830. /*
  1831. * __vxge_hw_channel_allocate - Allocate memory for channel
  1832. * This function allocates required memory for the channel and various arrays
  1833. * in the channel
  1834. */
  1835. static struct __vxge_hw_channel *
  1836. __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
  1837. enum __vxge_hw_channel_type type,
  1838. u32 length, u32 per_dtr_space,
  1839. void *userdata)
  1840. {
  1841. struct __vxge_hw_channel *channel;
  1842. struct __vxge_hw_device *hldev;
  1843. int size = 0;
  1844. u32 vp_id;
  1845. hldev = vph->vpath->hldev;
  1846. vp_id = vph->vpath->vp_id;
  1847. switch (type) {
  1848. case VXGE_HW_CHANNEL_TYPE_FIFO:
  1849. size = sizeof(struct __vxge_hw_fifo);
  1850. break;
  1851. case VXGE_HW_CHANNEL_TYPE_RING:
  1852. size = sizeof(struct __vxge_hw_ring);
  1853. break;
  1854. default:
  1855. break;
  1856. }
  1857. channel = kzalloc(size, GFP_KERNEL);
  1858. if (channel == NULL)
  1859. goto exit0;
  1860. INIT_LIST_HEAD(&channel->item);
  1861. channel->common_reg = hldev->common_reg;
  1862. channel->first_vp_id = hldev->first_vp_id;
  1863. channel->type = type;
  1864. channel->devh = hldev;
  1865. channel->vph = vph;
  1866. channel->userdata = userdata;
  1867. channel->per_dtr_space = per_dtr_space;
  1868. channel->length = length;
  1869. channel->vp_id = vp_id;
  1870. channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
  1871. if (channel->work_arr == NULL)
  1872. goto exit1;
  1873. channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
  1874. if (channel->free_arr == NULL)
  1875. goto exit1;
  1876. channel->free_ptr = length;
  1877. channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
  1878. if (channel->reserve_arr == NULL)
  1879. goto exit1;
  1880. channel->reserve_ptr = length;
  1881. channel->reserve_top = 0;
  1882. channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
  1883. if (channel->orig_arr == NULL)
  1884. goto exit1;
  1885. return channel;
  1886. exit1:
  1887. __vxge_hw_channel_free(channel);
  1888. exit0:
  1889. return NULL;
  1890. }
  1891. /*
  1892. * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
  1893. * Adds a block to block pool
  1894. */
  1895. static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
  1896. void *block_addr,
  1897. u32 length,
  1898. struct pci_dev *dma_h,
  1899. struct pci_dev *acc_handle)
  1900. {
  1901. struct __vxge_hw_blockpool *blockpool;
  1902. struct __vxge_hw_blockpool_entry *entry = NULL;
  1903. dma_addr_t dma_addr;
  1904. enum vxge_hw_status status = VXGE_HW_OK;
  1905. u32 req_out;
  1906. blockpool = &devh->block_pool;
  1907. if (block_addr == NULL) {
  1908. blockpool->req_out--;
  1909. status = VXGE_HW_FAIL;
  1910. goto exit;
  1911. }
  1912. dma_addr = pci_map_single(devh->pdev, block_addr, length,
  1913. PCI_DMA_BIDIRECTIONAL);
  1914. if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
  1915. vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
  1916. blockpool->req_out--;
  1917. status = VXGE_HW_FAIL;
  1918. goto exit;
  1919. }
  1920. if (!list_empty(&blockpool->free_entry_list))
  1921. entry = (struct __vxge_hw_blockpool_entry *)
  1922. list_first_entry(&blockpool->free_entry_list,
  1923. struct __vxge_hw_blockpool_entry,
  1924. item);
  1925. if (entry == NULL)
  1926. entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
  1927. else
  1928. list_del(&entry->item);
  1929. if (entry != NULL) {
  1930. entry->length = length;
  1931. entry->memblock = block_addr;
  1932. entry->dma_addr = dma_addr;
  1933. entry->acc_handle = acc_handle;
  1934. entry->dma_handle = dma_h;
  1935. list_add(&entry->item, &blockpool->free_block_list);
  1936. blockpool->pool_size++;
  1937. status = VXGE_HW_OK;
  1938. } else
  1939. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  1940. blockpool->req_out--;
  1941. req_out = blockpool->req_out;
  1942. exit:
  1943. return;
  1944. }
  1945. static inline void
  1946. vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
  1947. {
  1948. gfp_t flags;
  1949. void *vaddr;
  1950. if (in_interrupt())
  1951. flags = GFP_ATOMIC | GFP_DMA;
  1952. else
  1953. flags = GFP_KERNEL | GFP_DMA;
  1954. vaddr = kmalloc((size), flags);
  1955. vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
  1956. }
  1957. /*
  1958. * __vxge_hw_blockpool_blocks_add - Request additional blocks
  1959. */
  1960. static
  1961. void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
  1962. {
  1963. u32 nreq = 0, i;
  1964. if ((blockpool->pool_size + blockpool->req_out) <
  1965. VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
  1966. nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
  1967. blockpool->req_out += nreq;
  1968. }
  1969. for (i = 0; i < nreq; i++)
  1970. vxge_os_dma_malloc_async(
  1971. ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
  1972. blockpool->hldev, VXGE_HW_BLOCK_SIZE);
  1973. }
  1974. /*
  1975. * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
  1976. * Allocates a block of memory of given size, either from block pool
  1977. * or by calling vxge_os_dma_malloc()
  1978. */
  1979. static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
  1980. struct vxge_hw_mempool_dma *dma_object)
  1981. {
  1982. struct __vxge_hw_blockpool_entry *entry = NULL;
  1983. struct __vxge_hw_blockpool *blockpool;
  1984. void *memblock = NULL;
  1985. enum vxge_hw_status status = VXGE_HW_OK;
  1986. blockpool = &devh->block_pool;
  1987. if (size != blockpool->block_size) {
  1988. memblock = vxge_os_dma_malloc(devh->pdev, size,
  1989. &dma_object->handle,
  1990. &dma_object->acc_handle);
  1991. if (memblock == NULL) {
  1992. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  1993. goto exit;
  1994. }
  1995. dma_object->addr = pci_map_single(devh->pdev, memblock, size,
  1996. PCI_DMA_BIDIRECTIONAL);
  1997. if (unlikely(pci_dma_mapping_error(devh->pdev,
  1998. dma_object->addr))) {
  1999. vxge_os_dma_free(devh->pdev, memblock,
  2000. &dma_object->acc_handle);
  2001. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2002. goto exit;
  2003. }
  2004. } else {
  2005. if (!list_empty(&blockpool->free_block_list))
  2006. entry = (struct __vxge_hw_blockpool_entry *)
  2007. list_first_entry(&blockpool->free_block_list,
  2008. struct __vxge_hw_blockpool_entry,
  2009. item);
  2010. if (entry != NULL) {
  2011. list_del(&entry->item);
  2012. dma_object->addr = entry->dma_addr;
  2013. dma_object->handle = entry->dma_handle;
  2014. dma_object->acc_handle = entry->acc_handle;
  2015. memblock = entry->memblock;
  2016. list_add(&entry->item,
  2017. &blockpool->free_entry_list);
  2018. blockpool->pool_size--;
  2019. }
  2020. if (memblock != NULL)
  2021. __vxge_hw_blockpool_blocks_add(blockpool);
  2022. }
  2023. exit:
  2024. return memblock;
  2025. }
  2026. /*
  2027. * __vxge_hw_blockpool_blocks_remove - Free additional blocks
  2028. */
  2029. static void
  2030. __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
  2031. {
  2032. struct list_head *p, *n;
  2033. list_for_each_safe(p, n, &blockpool->free_block_list) {
  2034. if (blockpool->pool_size < blockpool->pool_max)
  2035. break;
  2036. pci_unmap_single(
  2037. ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
  2038. ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
  2039. ((struct __vxge_hw_blockpool_entry *)p)->length,
  2040. PCI_DMA_BIDIRECTIONAL);
  2041. vxge_os_dma_free(
  2042. ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
  2043. ((struct __vxge_hw_blockpool_entry *)p)->memblock,
  2044. &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
  2045. list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
  2046. list_add(p, &blockpool->free_entry_list);
  2047. blockpool->pool_size--;
  2048. }
  2049. }
  2050. /*
  2051. * __vxge_hw_blockpool_free - Frees the memory allcoated with
  2052. * __vxge_hw_blockpool_malloc
  2053. */
  2054. static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
  2055. void *memblock, u32 size,
  2056. struct vxge_hw_mempool_dma *dma_object)
  2057. {
  2058. struct __vxge_hw_blockpool_entry *entry = NULL;
  2059. struct __vxge_hw_blockpool *blockpool;
  2060. enum vxge_hw_status status = VXGE_HW_OK;
  2061. blockpool = &devh->block_pool;
  2062. if (size != blockpool->block_size) {
  2063. pci_unmap_single(devh->pdev, dma_object->addr, size,
  2064. PCI_DMA_BIDIRECTIONAL);
  2065. vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
  2066. } else {
  2067. if (!list_empty(&blockpool->free_entry_list))
  2068. entry = (struct __vxge_hw_blockpool_entry *)
  2069. list_first_entry(&blockpool->free_entry_list,
  2070. struct __vxge_hw_blockpool_entry,
  2071. item);
  2072. if (entry == NULL)
  2073. entry = vmalloc(sizeof(
  2074. struct __vxge_hw_blockpool_entry));
  2075. else
  2076. list_del(&entry->item);
  2077. if (entry != NULL) {
  2078. entry->length = size;
  2079. entry->memblock = memblock;
  2080. entry->dma_addr = dma_object->addr;
  2081. entry->acc_handle = dma_object->acc_handle;
  2082. entry->dma_handle = dma_object->handle;
  2083. list_add(&entry->item,
  2084. &blockpool->free_block_list);
  2085. blockpool->pool_size++;
  2086. status = VXGE_HW_OK;
  2087. } else
  2088. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2089. if (status == VXGE_HW_OK)
  2090. __vxge_hw_blockpool_blocks_remove(blockpool);
  2091. }
  2092. }
  2093. /*
  2094. * vxge_hw_mempool_destroy
  2095. */
  2096. static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
  2097. {
  2098. u32 i, j;
  2099. struct __vxge_hw_device *devh = mempool->devh;
  2100. for (i = 0; i < mempool->memblocks_allocated; i++) {
  2101. struct vxge_hw_mempool_dma *dma_object;
  2102. vxge_assert(mempool->memblocks_arr[i]);
  2103. vxge_assert(mempool->memblocks_dma_arr + i);
  2104. dma_object = mempool->memblocks_dma_arr + i;
  2105. for (j = 0; j < mempool->items_per_memblock; j++) {
  2106. u32 index = i * mempool->items_per_memblock + j;
  2107. /* to skip last partially filled(if any) memblock */
  2108. if (index >= mempool->items_current)
  2109. break;
  2110. }
  2111. vfree(mempool->memblocks_priv_arr[i]);
  2112. __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
  2113. mempool->memblock_size, dma_object);
  2114. }
  2115. vfree(mempool->items_arr);
  2116. vfree(mempool->memblocks_dma_arr);
  2117. vfree(mempool->memblocks_priv_arr);
  2118. vfree(mempool->memblocks_arr);
  2119. vfree(mempool);
  2120. }
  2121. /*
  2122. * __vxge_hw_mempool_grow
  2123. * Will resize mempool up to %num_allocate value.
  2124. */
  2125. static enum vxge_hw_status
  2126. __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
  2127. u32 *num_allocated)
  2128. {
  2129. u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
  2130. u32 n_items = mempool->items_per_memblock;
  2131. u32 start_block_idx = mempool->memblocks_allocated;
  2132. u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
  2133. enum vxge_hw_status status = VXGE_HW_OK;
  2134. *num_allocated = 0;
  2135. if (end_block_idx > mempool->memblocks_max) {
  2136. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2137. goto exit;
  2138. }
  2139. for (i = start_block_idx; i < end_block_idx; i++) {
  2140. u32 j;
  2141. u32 is_last = ((end_block_idx - 1) == i);
  2142. struct vxge_hw_mempool_dma *dma_object =
  2143. mempool->memblocks_dma_arr + i;
  2144. void *the_memblock;
  2145. /* allocate memblock's private part. Each DMA memblock
  2146. * has a space allocated for item's private usage upon
  2147. * mempool's user request. Each time mempool grows, it will
  2148. * allocate new memblock and its private part at once.
  2149. * This helps to minimize memory usage a lot. */
  2150. mempool->memblocks_priv_arr[i] =
  2151. vzalloc(mempool->items_priv_size * n_items);
  2152. if (mempool->memblocks_priv_arr[i] == NULL) {
  2153. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2154. goto exit;
  2155. }
  2156. /* allocate DMA-capable memblock */
  2157. mempool->memblocks_arr[i] =
  2158. __vxge_hw_blockpool_malloc(mempool->devh,
  2159. mempool->memblock_size, dma_object);
  2160. if (mempool->memblocks_arr[i] == NULL) {
  2161. vfree(mempool->memblocks_priv_arr[i]);
  2162. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2163. goto exit;
  2164. }
  2165. (*num_allocated)++;
  2166. mempool->memblocks_allocated++;
  2167. memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
  2168. the_memblock = mempool->memblocks_arr[i];
  2169. /* fill the items hash array */
  2170. for (j = 0; j < n_items; j++) {
  2171. u32 index = i * n_items + j;
  2172. if (first_time && index >= mempool->items_initial)
  2173. break;
  2174. mempool->items_arr[index] =
  2175. ((char *)the_memblock + j*mempool->item_size);
  2176. /* let caller to do more job on each item */
  2177. if (mempool->item_func_alloc != NULL)
  2178. mempool->item_func_alloc(mempool, i,
  2179. dma_object, index, is_last);
  2180. mempool->items_current = index + 1;
  2181. }
  2182. if (first_time && mempool->items_current ==
  2183. mempool->items_initial)
  2184. break;
  2185. }
  2186. exit:
  2187. return status;
  2188. }
  2189. /*
  2190. * vxge_hw_mempool_create
  2191. * This function will create memory pool object. Pool may grow but will
  2192. * never shrink. Pool consists of number of dynamically allocated blocks
  2193. * with size enough to hold %items_initial number of items. Memory is
  2194. * DMA-able but client must map/unmap before interoperating with the device.
  2195. */
  2196. static struct vxge_hw_mempool *
  2197. __vxge_hw_mempool_create(struct __vxge_hw_device *devh,
  2198. u32 memblock_size,
  2199. u32 item_size,
  2200. u32 items_priv_size,
  2201. u32 items_initial,
  2202. u32 items_max,
  2203. struct vxge_hw_mempool_cbs *mp_callback,
  2204. void *userdata)
  2205. {
  2206. enum vxge_hw_status status = VXGE_HW_OK;
  2207. u32 memblocks_to_allocate;
  2208. struct vxge_hw_mempool *mempool = NULL;
  2209. u32 allocated;
  2210. if (memblock_size < item_size) {
  2211. status = VXGE_HW_FAIL;
  2212. goto exit;
  2213. }
  2214. mempool = vzalloc(sizeof(struct vxge_hw_mempool));
  2215. if (mempool == NULL) {
  2216. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2217. goto exit;
  2218. }
  2219. mempool->devh = devh;
  2220. mempool->memblock_size = memblock_size;
  2221. mempool->items_max = items_max;
  2222. mempool->items_initial = items_initial;
  2223. mempool->item_size = item_size;
  2224. mempool->items_priv_size = items_priv_size;
  2225. mempool->item_func_alloc = mp_callback->item_func_alloc;
  2226. mempool->userdata = userdata;
  2227. mempool->memblocks_allocated = 0;
  2228. mempool->items_per_memblock = memblock_size / item_size;
  2229. mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
  2230. mempool->items_per_memblock;
  2231. /* allocate array of memblocks */
  2232. mempool->memblocks_arr =
  2233. vzalloc(sizeof(void *) * mempool->memblocks_max);
  2234. if (mempool->memblocks_arr == NULL) {
  2235. __vxge_hw_mempool_destroy(mempool);
  2236. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2237. mempool = NULL;
  2238. goto exit;
  2239. }
  2240. /* allocate array of private parts of items per memblocks */
  2241. mempool->memblocks_priv_arr =
  2242. vzalloc(sizeof(void *) * mempool->memblocks_max);
  2243. if (mempool->memblocks_priv_arr == NULL) {
  2244. __vxge_hw_mempool_destroy(mempool);
  2245. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2246. mempool = NULL;
  2247. goto exit;
  2248. }
  2249. /* allocate array of memblocks DMA objects */
  2250. mempool->memblocks_dma_arr =
  2251. vzalloc(sizeof(struct vxge_hw_mempool_dma) *
  2252. mempool->memblocks_max);
  2253. if (mempool->memblocks_dma_arr == NULL) {
  2254. __vxge_hw_mempool_destroy(mempool);
  2255. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2256. mempool = NULL;
  2257. goto exit;
  2258. }
  2259. /* allocate hash array of items */
  2260. mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
  2261. if (mempool->items_arr == NULL) {
  2262. __vxge_hw_mempool_destroy(mempool);
  2263. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2264. mempool = NULL;
  2265. goto exit;
  2266. }
  2267. /* calculate initial number of memblocks */
  2268. memblocks_to_allocate = (mempool->items_initial +
  2269. mempool->items_per_memblock - 1) /
  2270. mempool->items_per_memblock;
  2271. /* pre-allocate the mempool */
  2272. status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
  2273. &allocated);
  2274. if (status != VXGE_HW_OK) {
  2275. __vxge_hw_mempool_destroy(mempool);
  2276. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2277. mempool = NULL;
  2278. goto exit;
  2279. }
  2280. exit:
  2281. return mempool;
  2282. }
  2283. /*
  2284. * __vxge_hw_ring_abort - Returns the RxD
  2285. * This function terminates the RxDs of ring
  2286. */
  2287. static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
  2288. {
  2289. void *rxdh;
  2290. struct __vxge_hw_channel *channel;
  2291. channel = &ring->channel;
  2292. for (;;) {
  2293. vxge_hw_channel_dtr_try_complete(channel, &rxdh);
  2294. if (rxdh == NULL)
  2295. break;
  2296. vxge_hw_channel_dtr_complete(channel);
  2297. if (ring->rxd_term)
  2298. ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
  2299. channel->userdata);
  2300. vxge_hw_channel_dtr_free(channel, rxdh);
  2301. }
  2302. return VXGE_HW_OK;
  2303. }
  2304. /*
  2305. * __vxge_hw_ring_reset - Resets the ring
  2306. * This function resets the ring during vpath reset operation
  2307. */
  2308. static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
  2309. {
  2310. enum vxge_hw_status status = VXGE_HW_OK;
  2311. struct __vxge_hw_channel *channel;
  2312. channel = &ring->channel;
  2313. __vxge_hw_ring_abort(ring);
  2314. status = __vxge_hw_channel_reset(channel);
  2315. if (status != VXGE_HW_OK)
  2316. goto exit;
  2317. if (ring->rxd_init) {
  2318. status = vxge_hw_ring_replenish(ring);
  2319. if (status != VXGE_HW_OK)
  2320. goto exit;
  2321. }
  2322. exit:
  2323. return status;
  2324. }
  2325. /*
  2326. * __vxge_hw_ring_delete - Removes the ring
  2327. * This function freeup the memory pool and removes the ring
  2328. */
  2329. static enum vxge_hw_status
  2330. __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
  2331. {
  2332. struct __vxge_hw_ring *ring = vp->vpath->ringh;
  2333. __vxge_hw_ring_abort(ring);
  2334. if (ring->mempool)
  2335. __vxge_hw_mempool_destroy(ring->mempool);
  2336. vp->vpath->ringh = NULL;
  2337. __vxge_hw_channel_free(&ring->channel);
  2338. return VXGE_HW_OK;
  2339. }
  2340. /*
  2341. * __vxge_hw_ring_create - Create a Ring
  2342. * This function creates Ring and initializes it.
  2343. */
  2344. static enum vxge_hw_status
  2345. __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
  2346. struct vxge_hw_ring_attr *attr)
  2347. {
  2348. enum vxge_hw_status status = VXGE_HW_OK;
  2349. struct __vxge_hw_ring *ring;
  2350. u32 ring_length;
  2351. struct vxge_hw_ring_config *config;
  2352. struct __vxge_hw_device *hldev;
  2353. u32 vp_id;
  2354. struct vxge_hw_mempool_cbs ring_mp_callback;
  2355. if ((vp == NULL) || (attr == NULL)) {
  2356. status = VXGE_HW_FAIL;
  2357. goto exit;
  2358. }
  2359. hldev = vp->vpath->hldev;
  2360. vp_id = vp->vpath->vp_id;
  2361. config = &hldev->config.vp_config[vp_id].ring;
  2362. ring_length = config->ring_blocks *
  2363. vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
  2364. ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
  2365. VXGE_HW_CHANNEL_TYPE_RING,
  2366. ring_length,
  2367. attr->per_rxd_space,
  2368. attr->userdata);
  2369. if (ring == NULL) {
  2370. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2371. goto exit;
  2372. }
  2373. vp->vpath->ringh = ring;
  2374. ring->vp_id = vp_id;
  2375. ring->vp_reg = vp->vpath->vp_reg;
  2376. ring->common_reg = hldev->common_reg;
  2377. ring->stats = &vp->vpath->sw_stats->ring_stats;
  2378. ring->config = config;
  2379. ring->callback = attr->callback;
  2380. ring->rxd_init = attr->rxd_init;
  2381. ring->rxd_term = attr->rxd_term;
  2382. ring->buffer_mode = config->buffer_mode;
  2383. ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
  2384. ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
  2385. ring->rxds_limit = config->rxds_limit;
  2386. ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
  2387. ring->rxd_priv_size =
  2388. sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
  2389. ring->per_rxd_space = attr->per_rxd_space;
  2390. ring->rxd_priv_size =
  2391. ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
  2392. VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
  2393. /* how many RxDs can fit into one block. Depends on configured
  2394. * buffer_mode. */
  2395. ring->rxds_per_block =
  2396. vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
  2397. /* calculate actual RxD block private size */
  2398. ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
  2399. ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
  2400. ring->mempool = __vxge_hw_mempool_create(hldev,
  2401. VXGE_HW_BLOCK_SIZE,
  2402. VXGE_HW_BLOCK_SIZE,
  2403. ring->rxdblock_priv_size,
  2404. ring->config->ring_blocks,
  2405. ring->config->ring_blocks,
  2406. &ring_mp_callback,
  2407. ring);
  2408. if (ring->mempool == NULL) {
  2409. __vxge_hw_ring_delete(vp);
  2410. return VXGE_HW_ERR_OUT_OF_MEMORY;
  2411. }
  2412. status = __vxge_hw_channel_initialize(&ring->channel);
  2413. if (status != VXGE_HW_OK) {
  2414. __vxge_hw_ring_delete(vp);
  2415. goto exit;
  2416. }
  2417. /* Note:
  2418. * Specifying rxd_init callback means two things:
  2419. * 1) rxds need to be initialized by driver at channel-open time;
  2420. * 2) rxds need to be posted at channel-open time
  2421. * (that's what the initial_replenish() below does)
  2422. * Currently we don't have a case when the 1) is done without the 2).
  2423. */
  2424. if (ring->rxd_init) {
  2425. status = vxge_hw_ring_replenish(ring);
  2426. if (status != VXGE_HW_OK) {
  2427. __vxge_hw_ring_delete(vp);
  2428. goto exit;
  2429. }
  2430. }
  2431. /* initial replenish will increment the counter in its post() routine,
  2432. * we have to reset it */
  2433. ring->stats->common_stats.usage_cnt = 0;
  2434. exit:
  2435. return status;
  2436. }
  2437. /*
  2438. * vxge_hw_device_config_default_get - Initialize device config with defaults.
  2439. * Initialize Titan device config with default values.
  2440. */
  2441. enum vxge_hw_status __devinit
  2442. vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
  2443. {
  2444. u32 i;
  2445. device_config->dma_blockpool_initial =
  2446. VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
  2447. device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
  2448. device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
  2449. device_config->rth_en = VXGE_HW_RTH_DEFAULT;
  2450. device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
  2451. device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
  2452. device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
  2453. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2454. device_config->vp_config[i].vp_id = i;
  2455. device_config->vp_config[i].min_bandwidth =
  2456. VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
  2457. device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
  2458. device_config->vp_config[i].ring.ring_blocks =
  2459. VXGE_HW_DEF_RING_BLOCKS;
  2460. device_config->vp_config[i].ring.buffer_mode =
  2461. VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
  2462. device_config->vp_config[i].ring.scatter_mode =
  2463. VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
  2464. device_config->vp_config[i].ring.rxds_limit =
  2465. VXGE_HW_DEF_RING_RXDS_LIMIT;
  2466. device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
  2467. device_config->vp_config[i].fifo.fifo_blocks =
  2468. VXGE_HW_MIN_FIFO_BLOCKS;
  2469. device_config->vp_config[i].fifo.max_frags =
  2470. VXGE_HW_MAX_FIFO_FRAGS;
  2471. device_config->vp_config[i].fifo.memblock_size =
  2472. VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
  2473. device_config->vp_config[i].fifo.alignment_size =
  2474. VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
  2475. device_config->vp_config[i].fifo.intr =
  2476. VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
  2477. device_config->vp_config[i].fifo.no_snoop_bits =
  2478. VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
  2479. device_config->vp_config[i].tti.intr_enable =
  2480. VXGE_HW_TIM_INTR_DEFAULT;
  2481. device_config->vp_config[i].tti.btimer_val =
  2482. VXGE_HW_USE_FLASH_DEFAULT;
  2483. device_config->vp_config[i].tti.timer_ac_en =
  2484. VXGE_HW_USE_FLASH_DEFAULT;
  2485. device_config->vp_config[i].tti.timer_ci_en =
  2486. VXGE_HW_USE_FLASH_DEFAULT;
  2487. device_config->vp_config[i].tti.timer_ri_en =
  2488. VXGE_HW_USE_FLASH_DEFAULT;
  2489. device_config->vp_config[i].tti.rtimer_val =
  2490. VXGE_HW_USE_FLASH_DEFAULT;
  2491. device_config->vp_config[i].tti.util_sel =
  2492. VXGE_HW_USE_FLASH_DEFAULT;
  2493. device_config->vp_config[i].tti.ltimer_val =
  2494. VXGE_HW_USE_FLASH_DEFAULT;
  2495. device_config->vp_config[i].tti.urange_a =
  2496. VXGE_HW_USE_FLASH_DEFAULT;
  2497. device_config->vp_config[i].tti.uec_a =
  2498. VXGE_HW_USE_FLASH_DEFAULT;
  2499. device_config->vp_config[i].tti.urange_b =
  2500. VXGE_HW_USE_FLASH_DEFAULT;
  2501. device_config->vp_config[i].tti.uec_b =
  2502. VXGE_HW_USE_FLASH_DEFAULT;
  2503. device_config->vp_config[i].tti.urange_c =
  2504. VXGE_HW_USE_FLASH_DEFAULT;
  2505. device_config->vp_config[i].tti.uec_c =
  2506. VXGE_HW_USE_FLASH_DEFAULT;
  2507. device_config->vp_config[i].tti.uec_d =
  2508. VXGE_HW_USE_FLASH_DEFAULT;
  2509. device_config->vp_config[i].rti.intr_enable =
  2510. VXGE_HW_TIM_INTR_DEFAULT;
  2511. device_config->vp_config[i].rti.btimer_val =
  2512. VXGE_HW_USE_FLASH_DEFAULT;
  2513. device_config->vp_config[i].rti.timer_ac_en =
  2514. VXGE_HW_USE_FLASH_DEFAULT;
  2515. device_config->vp_config[i].rti.timer_ci_en =
  2516. VXGE_HW_USE_FLASH_DEFAULT;
  2517. device_config->vp_config[i].rti.timer_ri_en =
  2518. VXGE_HW_USE_FLASH_DEFAULT;
  2519. device_config->vp_config[i].rti.rtimer_val =
  2520. VXGE_HW_USE_FLASH_DEFAULT;
  2521. device_config->vp_config[i].rti.util_sel =
  2522. VXGE_HW_USE_FLASH_DEFAULT;
  2523. device_config->vp_config[i].rti.ltimer_val =
  2524. VXGE_HW_USE_FLASH_DEFAULT;
  2525. device_config->vp_config[i].rti.urange_a =
  2526. VXGE_HW_USE_FLASH_DEFAULT;
  2527. device_config->vp_config[i].rti.uec_a =
  2528. VXGE_HW_USE_FLASH_DEFAULT;
  2529. device_config->vp_config[i].rti.urange_b =
  2530. VXGE_HW_USE_FLASH_DEFAULT;
  2531. device_config->vp_config[i].rti.uec_b =
  2532. VXGE_HW_USE_FLASH_DEFAULT;
  2533. device_config->vp_config[i].rti.urange_c =
  2534. VXGE_HW_USE_FLASH_DEFAULT;
  2535. device_config->vp_config[i].rti.uec_c =
  2536. VXGE_HW_USE_FLASH_DEFAULT;
  2537. device_config->vp_config[i].rti.uec_d =
  2538. VXGE_HW_USE_FLASH_DEFAULT;
  2539. device_config->vp_config[i].mtu =
  2540. VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
  2541. device_config->vp_config[i].rpa_strip_vlan_tag =
  2542. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
  2543. }
  2544. return VXGE_HW_OK;
  2545. }
  2546. /*
  2547. * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  2548. * Set the swapper bits appropriately for the vpath.
  2549. */
  2550. static enum vxge_hw_status
  2551. __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
  2552. {
  2553. #ifndef __BIG_ENDIAN
  2554. u64 val64;
  2555. val64 = readq(&vpath_reg->vpath_general_cfg1);
  2556. wmb();
  2557. val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
  2558. writeq(val64, &vpath_reg->vpath_general_cfg1);
  2559. wmb();
  2560. #endif
  2561. return VXGE_HW_OK;
  2562. }
  2563. /*
  2564. * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
  2565. * Set the swapper bits appropriately for the vpath.
  2566. */
  2567. static enum vxge_hw_status
  2568. __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
  2569. struct vxge_hw_vpath_reg __iomem *vpath_reg)
  2570. {
  2571. u64 val64;
  2572. val64 = readq(&legacy_reg->pifm_wr_swap_en);
  2573. if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
  2574. val64 = readq(&vpath_reg->kdfcctl_cfg0);
  2575. wmb();
  2576. val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
  2577. VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
  2578. VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
  2579. writeq(val64, &vpath_reg->kdfcctl_cfg0);
  2580. wmb();
  2581. }
  2582. return VXGE_HW_OK;
  2583. }
  2584. /*
  2585. * vxge_hw_mgmt_reg_read - Read Titan register.
  2586. */
  2587. enum vxge_hw_status
  2588. vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
  2589. enum vxge_hw_mgmt_reg_type type,
  2590. u32 index, u32 offset, u64 *value)
  2591. {
  2592. enum vxge_hw_status status = VXGE_HW_OK;
  2593. if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
  2594. status = VXGE_HW_ERR_INVALID_DEVICE;
  2595. goto exit;
  2596. }
  2597. switch (type) {
  2598. case vxge_hw_mgmt_reg_type_legacy:
  2599. if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
  2600. status = VXGE_HW_ERR_INVALID_OFFSET;
  2601. break;
  2602. }
  2603. *value = readq((void __iomem *)hldev->legacy_reg + offset);
  2604. break;
  2605. case vxge_hw_mgmt_reg_type_toc:
  2606. if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
  2607. status = VXGE_HW_ERR_INVALID_OFFSET;
  2608. break;
  2609. }
  2610. *value = readq((void __iomem *)hldev->toc_reg + offset);
  2611. break;
  2612. case vxge_hw_mgmt_reg_type_common:
  2613. if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
  2614. status = VXGE_HW_ERR_INVALID_OFFSET;
  2615. break;
  2616. }
  2617. *value = readq((void __iomem *)hldev->common_reg + offset);
  2618. break;
  2619. case vxge_hw_mgmt_reg_type_mrpcim:
  2620. if (!(hldev->access_rights &
  2621. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
  2622. status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
  2623. break;
  2624. }
  2625. if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
  2626. status = VXGE_HW_ERR_INVALID_OFFSET;
  2627. break;
  2628. }
  2629. *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
  2630. break;
  2631. case vxge_hw_mgmt_reg_type_srpcim:
  2632. if (!(hldev->access_rights &
  2633. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
  2634. status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
  2635. break;
  2636. }
  2637. if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
  2638. status = VXGE_HW_ERR_INVALID_INDEX;
  2639. break;
  2640. }
  2641. if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
  2642. status = VXGE_HW_ERR_INVALID_OFFSET;
  2643. break;
  2644. }
  2645. *value = readq((void __iomem *)hldev->srpcim_reg[index] +
  2646. offset);
  2647. break;
  2648. case vxge_hw_mgmt_reg_type_vpmgmt:
  2649. if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
  2650. (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
  2651. status = VXGE_HW_ERR_INVALID_INDEX;
  2652. break;
  2653. }
  2654. if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
  2655. status = VXGE_HW_ERR_INVALID_OFFSET;
  2656. break;
  2657. }
  2658. *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
  2659. offset);
  2660. break;
  2661. case vxge_hw_mgmt_reg_type_vpath:
  2662. if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
  2663. (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
  2664. status = VXGE_HW_ERR_INVALID_INDEX;
  2665. break;
  2666. }
  2667. if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
  2668. status = VXGE_HW_ERR_INVALID_INDEX;
  2669. break;
  2670. }
  2671. if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
  2672. status = VXGE_HW_ERR_INVALID_OFFSET;
  2673. break;
  2674. }
  2675. *value = readq((void __iomem *)hldev->vpath_reg[index] +
  2676. offset);
  2677. break;
  2678. default:
  2679. status = VXGE_HW_ERR_INVALID_TYPE;
  2680. break;
  2681. }
  2682. exit:
  2683. return status;
  2684. }
  2685. /*
  2686. * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
  2687. */
  2688. enum vxge_hw_status
  2689. vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
  2690. {
  2691. struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
  2692. enum vxge_hw_status status = VXGE_HW_OK;
  2693. int i = 0, j = 0;
  2694. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2695. if (!((vpath_mask) & vxge_mBIT(i)))
  2696. continue;
  2697. vpmgmt_reg = hldev->vpmgmt_reg[i];
  2698. for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
  2699. if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
  2700. & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
  2701. return VXGE_HW_FAIL;
  2702. }
  2703. }
  2704. return status;
  2705. }
  2706. /*
  2707. * vxge_hw_mgmt_reg_Write - Write Titan register.
  2708. */
  2709. enum vxge_hw_status
  2710. vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
  2711. enum vxge_hw_mgmt_reg_type type,
  2712. u32 index, u32 offset, u64 value)
  2713. {
  2714. enum vxge_hw_status status = VXGE_HW_OK;
  2715. if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
  2716. status = VXGE_HW_ERR_INVALID_DEVICE;
  2717. goto exit;
  2718. }
  2719. switch (type) {
  2720. case vxge_hw_mgmt_reg_type_legacy:
  2721. if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
  2722. status = VXGE_HW_ERR_INVALID_OFFSET;
  2723. break;
  2724. }
  2725. writeq(value, (void __iomem *)hldev->legacy_reg + offset);
  2726. break;
  2727. case vxge_hw_mgmt_reg_type_toc:
  2728. if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
  2729. status = VXGE_HW_ERR_INVALID_OFFSET;
  2730. break;
  2731. }
  2732. writeq(value, (void __iomem *)hldev->toc_reg + offset);
  2733. break;
  2734. case vxge_hw_mgmt_reg_type_common:
  2735. if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
  2736. status = VXGE_HW_ERR_INVALID_OFFSET;
  2737. break;
  2738. }
  2739. writeq(value, (void __iomem *)hldev->common_reg + offset);
  2740. break;
  2741. case vxge_hw_mgmt_reg_type_mrpcim:
  2742. if (!(hldev->access_rights &
  2743. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
  2744. status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
  2745. break;
  2746. }
  2747. if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
  2748. status = VXGE_HW_ERR_INVALID_OFFSET;
  2749. break;
  2750. }
  2751. writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
  2752. break;
  2753. case vxge_hw_mgmt_reg_type_srpcim:
  2754. if (!(hldev->access_rights &
  2755. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
  2756. status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
  2757. break;
  2758. }
  2759. if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
  2760. status = VXGE_HW_ERR_INVALID_INDEX;
  2761. break;
  2762. }
  2763. if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
  2764. status = VXGE_HW_ERR_INVALID_OFFSET;
  2765. break;
  2766. }
  2767. writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
  2768. offset);
  2769. break;
  2770. case vxge_hw_mgmt_reg_type_vpmgmt:
  2771. if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
  2772. (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
  2773. status = VXGE_HW_ERR_INVALID_INDEX;
  2774. break;
  2775. }
  2776. if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
  2777. status = VXGE_HW_ERR_INVALID_OFFSET;
  2778. break;
  2779. }
  2780. writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
  2781. offset);
  2782. break;
  2783. case vxge_hw_mgmt_reg_type_vpath:
  2784. if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
  2785. (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
  2786. status = VXGE_HW_ERR_INVALID_INDEX;
  2787. break;
  2788. }
  2789. if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
  2790. status = VXGE_HW_ERR_INVALID_OFFSET;
  2791. break;
  2792. }
  2793. writeq(value, (void __iomem *)hldev->vpath_reg[index] +
  2794. offset);
  2795. break;
  2796. default:
  2797. status = VXGE_HW_ERR_INVALID_TYPE;
  2798. break;
  2799. }
  2800. exit:
  2801. return status;
  2802. }
  2803. /*
  2804. * __vxge_hw_fifo_abort - Returns the TxD
  2805. * This function terminates the TxDs of fifo
  2806. */
  2807. static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
  2808. {
  2809. void *txdlh;
  2810. for (;;) {
  2811. vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
  2812. if (txdlh == NULL)
  2813. break;
  2814. vxge_hw_channel_dtr_complete(&fifo->channel);
  2815. if (fifo->txdl_term) {
  2816. fifo->txdl_term(txdlh,
  2817. VXGE_HW_TXDL_STATE_POSTED,
  2818. fifo->channel.userdata);
  2819. }
  2820. vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
  2821. }
  2822. return VXGE_HW_OK;
  2823. }
  2824. /*
  2825. * __vxge_hw_fifo_reset - Resets the fifo
  2826. * This function resets the fifo during vpath reset operation
  2827. */
  2828. static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
  2829. {
  2830. enum vxge_hw_status status = VXGE_HW_OK;
  2831. __vxge_hw_fifo_abort(fifo);
  2832. status = __vxge_hw_channel_reset(&fifo->channel);
  2833. return status;
  2834. }
  2835. /*
  2836. * __vxge_hw_fifo_delete - Removes the FIFO
  2837. * This function freeup the memory pool and removes the FIFO
  2838. */
  2839. static enum vxge_hw_status
  2840. __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
  2841. {
  2842. struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
  2843. __vxge_hw_fifo_abort(fifo);
  2844. if (fifo->mempool)
  2845. __vxge_hw_mempool_destroy(fifo->mempool);
  2846. vp->vpath->fifoh = NULL;
  2847. __vxge_hw_channel_free(&fifo->channel);
  2848. return VXGE_HW_OK;
  2849. }
  2850. /*
  2851. * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
  2852. * list callback
  2853. * This function is callback passed to __vxge_hw_mempool_create to create memory
  2854. * pool for TxD list
  2855. */
  2856. static void
  2857. __vxge_hw_fifo_mempool_item_alloc(
  2858. struct vxge_hw_mempool *mempoolh,
  2859. u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
  2860. u32 index, u32 is_last)
  2861. {
  2862. u32 memblock_item_idx;
  2863. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  2864. struct vxge_hw_fifo_txd *txdp =
  2865. (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
  2866. struct __vxge_hw_fifo *fifo =
  2867. (struct __vxge_hw_fifo *)mempoolh->userdata;
  2868. void *memblock = mempoolh->memblocks_arr[memblock_index];
  2869. vxge_assert(txdp);
  2870. txdp->host_control = (u64) (size_t)
  2871. __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
  2872. &memblock_item_idx);
  2873. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
  2874. vxge_assert(txdl_priv);
  2875. fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
  2876. /* pre-format HW's TxDL's private */
  2877. txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
  2878. txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
  2879. txdl_priv->dma_handle = dma_object->handle;
  2880. txdl_priv->memblock = memblock;
  2881. txdl_priv->first_txdp = txdp;
  2882. txdl_priv->next_txdl_priv = NULL;
  2883. txdl_priv->alloc_frags = 0;
  2884. }
  2885. /*
  2886. * __vxge_hw_fifo_create - Create a FIFO
  2887. * This function creates FIFO and initializes it.
  2888. */
  2889. static enum vxge_hw_status
  2890. __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
  2891. struct vxge_hw_fifo_attr *attr)
  2892. {
  2893. enum vxge_hw_status status = VXGE_HW_OK;
  2894. struct __vxge_hw_fifo *fifo;
  2895. struct vxge_hw_fifo_config *config;
  2896. u32 txdl_size, txdl_per_memblock;
  2897. struct vxge_hw_mempool_cbs fifo_mp_callback;
  2898. struct __vxge_hw_virtualpath *vpath;
  2899. if ((vp == NULL) || (attr == NULL)) {
  2900. status = VXGE_HW_ERR_INVALID_HANDLE;
  2901. goto exit;
  2902. }
  2903. vpath = vp->vpath;
  2904. config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
  2905. txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
  2906. txdl_per_memblock = config->memblock_size / txdl_size;
  2907. fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
  2908. VXGE_HW_CHANNEL_TYPE_FIFO,
  2909. config->fifo_blocks * txdl_per_memblock,
  2910. attr->per_txdl_space, attr->userdata);
  2911. if (fifo == NULL) {
  2912. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2913. goto exit;
  2914. }
  2915. vpath->fifoh = fifo;
  2916. fifo->nofl_db = vpath->nofl_db;
  2917. fifo->vp_id = vpath->vp_id;
  2918. fifo->vp_reg = vpath->vp_reg;
  2919. fifo->stats = &vpath->sw_stats->fifo_stats;
  2920. fifo->config = config;
  2921. /* apply "interrupts per txdl" attribute */
  2922. fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
  2923. fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
  2924. fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
  2925. if (fifo->config->intr)
  2926. fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
  2927. fifo->no_snoop_bits = config->no_snoop_bits;
  2928. /*
  2929. * FIFO memory management strategy:
  2930. *
  2931. * TxDL split into three independent parts:
  2932. * - set of TxD's
  2933. * - TxD HW private part
  2934. * - driver private part
  2935. *
  2936. * Adaptative memory allocation used. i.e. Memory allocated on
  2937. * demand with the size which will fit into one memory block.
  2938. * One memory block may contain more than one TxDL.
  2939. *
  2940. * During "reserve" operations more memory can be allocated on demand
  2941. * for example due to FIFO full condition.
  2942. *
  2943. * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
  2944. * routine which will essentially stop the channel and free resources.
  2945. */
  2946. /* TxDL common private size == TxDL private + driver private */
  2947. fifo->priv_size =
  2948. sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
  2949. fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
  2950. VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
  2951. fifo->per_txdl_space = attr->per_txdl_space;
  2952. /* recompute txdl size to be cacheline aligned */
  2953. fifo->txdl_size = txdl_size;
  2954. fifo->txdl_per_memblock = txdl_per_memblock;
  2955. fifo->txdl_term = attr->txdl_term;
  2956. fifo->callback = attr->callback;
  2957. if (fifo->txdl_per_memblock == 0) {
  2958. __vxge_hw_fifo_delete(vp);
  2959. status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
  2960. goto exit;
  2961. }
  2962. fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
  2963. fifo->mempool =
  2964. __vxge_hw_mempool_create(vpath->hldev,
  2965. fifo->config->memblock_size,
  2966. fifo->txdl_size,
  2967. fifo->priv_size,
  2968. (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
  2969. (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
  2970. &fifo_mp_callback,
  2971. fifo);
  2972. if (fifo->mempool == NULL) {
  2973. __vxge_hw_fifo_delete(vp);
  2974. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  2975. goto exit;
  2976. }
  2977. status = __vxge_hw_channel_initialize(&fifo->channel);
  2978. if (status != VXGE_HW_OK) {
  2979. __vxge_hw_fifo_delete(vp);
  2980. goto exit;
  2981. }
  2982. vxge_assert(fifo->channel.reserve_ptr);
  2983. exit:
  2984. return status;
  2985. }
  2986. /*
  2987. * __vxge_hw_vpath_pci_read - Read the content of given address
  2988. * in pci config space.
  2989. * Read from the vpath pci config space.
  2990. */
  2991. static enum vxge_hw_status
  2992. __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
  2993. u32 phy_func_0, u32 offset, u32 *val)
  2994. {
  2995. u64 val64;
  2996. enum vxge_hw_status status = VXGE_HW_OK;
  2997. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  2998. val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
  2999. if (phy_func_0)
  3000. val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
  3001. writeq(val64, &vp_reg->pci_config_access_cfg1);
  3002. wmb();
  3003. writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
  3004. &vp_reg->pci_config_access_cfg2);
  3005. wmb();
  3006. status = __vxge_hw_device_register_poll(
  3007. &vp_reg->pci_config_access_cfg2,
  3008. VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  3009. if (status != VXGE_HW_OK)
  3010. goto exit;
  3011. val64 = readq(&vp_reg->pci_config_access_status);
  3012. if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
  3013. status = VXGE_HW_FAIL;
  3014. *val = 0;
  3015. } else
  3016. *val = (u32)vxge_bVALn(val64, 32, 32);
  3017. exit:
  3018. return status;
  3019. }
  3020. /**
  3021. * vxge_hw_device_flick_link_led - Flick (blink) link LED.
  3022. * @hldev: HW device.
  3023. * @on_off: TRUE if flickering to be on, FALSE to be off
  3024. *
  3025. * Flicker the link LED.
  3026. */
  3027. enum vxge_hw_status
  3028. vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
  3029. {
  3030. struct __vxge_hw_virtualpath *vpath;
  3031. u64 data0, data1 = 0, steer_ctrl = 0;
  3032. enum vxge_hw_status status;
  3033. if (hldev == NULL) {
  3034. status = VXGE_HW_ERR_INVALID_DEVICE;
  3035. goto exit;
  3036. }
  3037. vpath = &hldev->virtual_paths[hldev->first_vp_id];
  3038. data0 = on_off;
  3039. status = vxge_hw_vpath_fw_api(vpath,
  3040. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
  3041. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
  3042. 0, &data0, &data1, &steer_ctrl);
  3043. exit:
  3044. return status;
  3045. }
  3046. /*
  3047. * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
  3048. */
  3049. enum vxge_hw_status
  3050. __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
  3051. u32 action, u32 rts_table, u32 offset,
  3052. u64 *data0, u64 *data1)
  3053. {
  3054. enum vxge_hw_status status;
  3055. u64 steer_ctrl = 0;
  3056. if (vp == NULL) {
  3057. status = VXGE_HW_ERR_INVALID_HANDLE;
  3058. goto exit;
  3059. }
  3060. if ((rts_table ==
  3061. VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
  3062. (rts_table ==
  3063. VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
  3064. (rts_table ==
  3065. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
  3066. (rts_table ==
  3067. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
  3068. steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
  3069. }
  3070. status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
  3071. data0, data1, &steer_ctrl);
  3072. if (status != VXGE_HW_OK)
  3073. goto exit;
  3074. if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
  3075. (rts_table !=
  3076. VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
  3077. *data1 = 0;
  3078. exit:
  3079. return status;
  3080. }
  3081. /*
  3082. * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
  3083. */
  3084. enum vxge_hw_status
  3085. __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
  3086. u32 rts_table, u32 offset, u64 steer_data0,
  3087. u64 steer_data1)
  3088. {
  3089. u64 data0, data1 = 0, steer_ctrl = 0;
  3090. enum vxge_hw_status status;
  3091. if (vp == NULL) {
  3092. status = VXGE_HW_ERR_INVALID_HANDLE;
  3093. goto exit;
  3094. }
  3095. data0 = steer_data0;
  3096. if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
  3097. (rts_table ==
  3098. VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
  3099. data1 = steer_data1;
  3100. status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
  3101. &data0, &data1, &steer_ctrl);
  3102. exit:
  3103. return status;
  3104. }
  3105. /*
  3106. * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
  3107. */
  3108. enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
  3109. struct __vxge_hw_vpath_handle *vp,
  3110. enum vxge_hw_rth_algoritms algorithm,
  3111. struct vxge_hw_rth_hash_types *hash_type,
  3112. u16 bucket_size)
  3113. {
  3114. u64 data0, data1;
  3115. enum vxge_hw_status status = VXGE_HW_OK;
  3116. if (vp == NULL) {
  3117. status = VXGE_HW_ERR_INVALID_HANDLE;
  3118. goto exit;
  3119. }
  3120. status = __vxge_hw_vpath_rts_table_get(vp,
  3121. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
  3122. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
  3123. 0, &data0, &data1);
  3124. if (status != VXGE_HW_OK)
  3125. goto exit;
  3126. data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
  3127. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
  3128. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
  3129. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
  3130. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
  3131. if (hash_type->hash_type_tcpipv4_en)
  3132. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
  3133. if (hash_type->hash_type_ipv4_en)
  3134. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
  3135. if (hash_type->hash_type_tcpipv6_en)
  3136. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
  3137. if (hash_type->hash_type_ipv6_en)
  3138. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
  3139. if (hash_type->hash_type_tcpipv6ex_en)
  3140. data0 |=
  3141. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
  3142. if (hash_type->hash_type_ipv6ex_en)
  3143. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
  3144. if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
  3145. data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
  3146. else
  3147. data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
  3148. status = __vxge_hw_vpath_rts_table_set(vp,
  3149. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
  3150. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
  3151. 0, data0, 0);
  3152. exit:
  3153. return status;
  3154. }
  3155. static void
  3156. vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
  3157. u16 flag, u8 *itable)
  3158. {
  3159. switch (flag) {
  3160. case 1:
  3161. *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
  3162. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
  3163. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
  3164. itable[j]);
  3165. case 2:
  3166. *data0 |=
  3167. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
  3168. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
  3169. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
  3170. itable[j]);
  3171. case 3:
  3172. *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
  3173. VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
  3174. VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
  3175. itable[j]);
  3176. case 4:
  3177. *data1 |=
  3178. VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
  3179. VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
  3180. VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
  3181. itable[j]);
  3182. default:
  3183. return;
  3184. }
  3185. }
  3186. /*
  3187. * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
  3188. */
  3189. enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
  3190. struct __vxge_hw_vpath_handle **vpath_handles,
  3191. u32 vpath_count,
  3192. u8 *mtable,
  3193. u8 *itable,
  3194. u32 itable_size)
  3195. {
  3196. u32 i, j, action, rts_table;
  3197. u64 data0;
  3198. u64 data1;
  3199. u32 max_entries;
  3200. enum vxge_hw_status status = VXGE_HW_OK;
  3201. struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
  3202. if (vp == NULL) {
  3203. status = VXGE_HW_ERR_INVALID_HANDLE;
  3204. goto exit;
  3205. }
  3206. max_entries = (((u32)1) << itable_size);
  3207. if (vp->vpath->hldev->config.rth_it_type
  3208. == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
  3209. action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
  3210. rts_table =
  3211. VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
  3212. for (j = 0; j < max_entries; j++) {
  3213. data1 = 0;
  3214. data0 =
  3215. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
  3216. itable[j]);
  3217. status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
  3218. action, rts_table, j, data0, data1);
  3219. if (status != VXGE_HW_OK)
  3220. goto exit;
  3221. }
  3222. for (j = 0; j < max_entries; j++) {
  3223. data1 = 0;
  3224. data0 =
  3225. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
  3226. VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
  3227. itable[j]);
  3228. status = __vxge_hw_vpath_rts_table_set(
  3229. vpath_handles[mtable[itable[j]]], action,
  3230. rts_table, j, data0, data1);
  3231. if (status != VXGE_HW_OK)
  3232. goto exit;
  3233. }
  3234. } else {
  3235. action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
  3236. rts_table =
  3237. VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
  3238. for (i = 0; i < vpath_count; i++) {
  3239. for (j = 0; j < max_entries;) {
  3240. data0 = 0;
  3241. data1 = 0;
  3242. while (j < max_entries) {
  3243. if (mtable[itable[j]] != i) {
  3244. j++;
  3245. continue;
  3246. }
  3247. vxge_hw_rts_rth_data0_data1_get(j,
  3248. &data0, &data1, 1, itable);
  3249. j++;
  3250. break;
  3251. }
  3252. while (j < max_entries) {
  3253. if (mtable[itable[j]] != i) {
  3254. j++;
  3255. continue;
  3256. }
  3257. vxge_hw_rts_rth_data0_data1_get(j,
  3258. &data0, &data1, 2, itable);
  3259. j++;
  3260. break;
  3261. }
  3262. while (j < max_entries) {
  3263. if (mtable[itable[j]] != i) {
  3264. j++;
  3265. continue;
  3266. }
  3267. vxge_hw_rts_rth_data0_data1_get(j,
  3268. &data0, &data1, 3, itable);
  3269. j++;
  3270. break;
  3271. }
  3272. while (j < max_entries) {
  3273. if (mtable[itable[j]] != i) {
  3274. j++;
  3275. continue;
  3276. }
  3277. vxge_hw_rts_rth_data0_data1_get(j,
  3278. &data0, &data1, 4, itable);
  3279. j++;
  3280. break;
  3281. }
  3282. if (data0 != 0) {
  3283. status = __vxge_hw_vpath_rts_table_set(
  3284. vpath_handles[i],
  3285. action, rts_table,
  3286. 0, data0, data1);
  3287. if (status != VXGE_HW_OK)
  3288. goto exit;
  3289. }
  3290. }
  3291. }
  3292. }
  3293. exit:
  3294. return status;
  3295. }
  3296. /**
  3297. * vxge_hw_vpath_check_leak - Check for memory leak
  3298. * @ringh: Handle to the ring object used for receive
  3299. *
  3300. * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
  3301. * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
  3302. * Returns: VXGE_HW_FAIL, if leak has occurred.
  3303. *
  3304. */
  3305. enum vxge_hw_status
  3306. vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
  3307. {
  3308. enum vxge_hw_status status = VXGE_HW_OK;
  3309. u64 rxd_new_count, rxd_spat;
  3310. if (ring == NULL)
  3311. return status;
  3312. rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
  3313. rxd_spat = readq(&ring->vp_reg->prc_cfg6);
  3314. rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
  3315. if (rxd_new_count >= rxd_spat)
  3316. status = VXGE_HW_FAIL;
  3317. return status;
  3318. }
  3319. /*
  3320. * __vxge_hw_vpath_mgmt_read
  3321. * This routine reads the vpath_mgmt registers
  3322. */
  3323. static enum vxge_hw_status
  3324. __vxge_hw_vpath_mgmt_read(
  3325. struct __vxge_hw_device *hldev,
  3326. struct __vxge_hw_virtualpath *vpath)
  3327. {
  3328. u32 i, mtu = 0, max_pyld = 0;
  3329. u64 val64;
  3330. enum vxge_hw_status status = VXGE_HW_OK;
  3331. for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
  3332. val64 = readq(&vpath->vpmgmt_reg->
  3333. rxmac_cfg0_port_vpmgmt_clone[i]);
  3334. max_pyld =
  3335. (u32)
  3336. VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
  3337. (val64);
  3338. if (mtu < max_pyld)
  3339. mtu = max_pyld;
  3340. }
  3341. vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
  3342. val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
  3343. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3344. if (val64 & vxge_mBIT(i))
  3345. vpath->vsport_number = i;
  3346. }
  3347. val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
  3348. if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
  3349. VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
  3350. else
  3351. VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
  3352. return status;
  3353. }
  3354. /*
  3355. * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
  3356. * This routine checks the vpath_rst_in_prog register to see if
  3357. * adapter completed the reset process for the vpath
  3358. */
  3359. static enum vxge_hw_status
  3360. __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
  3361. {
  3362. enum vxge_hw_status status;
  3363. status = __vxge_hw_device_register_poll(
  3364. &vpath->hldev->common_reg->vpath_rst_in_prog,
  3365. VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
  3366. 1 << (16 - vpath->vp_id)),
  3367. vpath->hldev->config.device_poll_millis);
  3368. return status;
  3369. }
  3370. /*
  3371. * __vxge_hw_vpath_reset
  3372. * This routine resets the vpath on the device
  3373. */
  3374. static enum vxge_hw_status
  3375. __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
  3376. {
  3377. u64 val64;
  3378. enum vxge_hw_status status = VXGE_HW_OK;
  3379. val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
  3380. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  3381. &hldev->common_reg->cmn_rsthdlr_cfg0);
  3382. return status;
  3383. }
  3384. /*
  3385. * __vxge_hw_vpath_sw_reset
  3386. * This routine resets the vpath structures
  3387. */
  3388. static enum vxge_hw_status
  3389. __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
  3390. {
  3391. enum vxge_hw_status status = VXGE_HW_OK;
  3392. struct __vxge_hw_virtualpath *vpath;
  3393. vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
  3394. if (vpath->ringh) {
  3395. status = __vxge_hw_ring_reset(vpath->ringh);
  3396. if (status != VXGE_HW_OK)
  3397. goto exit;
  3398. }
  3399. if (vpath->fifoh)
  3400. status = __vxge_hw_fifo_reset(vpath->fifoh);
  3401. exit:
  3402. return status;
  3403. }
  3404. /*
  3405. * __vxge_hw_vpath_prc_configure
  3406. * This routine configures the prc registers of virtual path using the config
  3407. * passed
  3408. */
  3409. static void
  3410. __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  3411. {
  3412. u64 val64;
  3413. struct __vxge_hw_virtualpath *vpath;
  3414. struct vxge_hw_vp_config *vp_config;
  3415. struct vxge_hw_vpath_reg __iomem *vp_reg;
  3416. vpath = &hldev->virtual_paths[vp_id];
  3417. vp_reg = vpath->vp_reg;
  3418. vp_config = vpath->vp_config;
  3419. if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
  3420. return;
  3421. val64 = readq(&vp_reg->prc_cfg1);
  3422. val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
  3423. writeq(val64, &vp_reg->prc_cfg1);
  3424. val64 = readq(&vpath->vp_reg->prc_cfg6);
  3425. val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
  3426. writeq(val64, &vpath->vp_reg->prc_cfg6);
  3427. val64 = readq(&vp_reg->prc_cfg7);
  3428. if (vpath->vp_config->ring.scatter_mode !=
  3429. VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
  3430. val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
  3431. switch (vpath->vp_config->ring.scatter_mode) {
  3432. case VXGE_HW_RING_SCATTER_MODE_A:
  3433. val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
  3434. VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
  3435. break;
  3436. case VXGE_HW_RING_SCATTER_MODE_B:
  3437. val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
  3438. VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
  3439. break;
  3440. case VXGE_HW_RING_SCATTER_MODE_C:
  3441. val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
  3442. VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
  3443. break;
  3444. }
  3445. }
  3446. writeq(val64, &vp_reg->prc_cfg7);
  3447. writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
  3448. __vxge_hw_ring_first_block_address_get(
  3449. vpath->ringh) >> 3), &vp_reg->prc_cfg5);
  3450. val64 = readq(&vp_reg->prc_cfg4);
  3451. val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
  3452. val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
  3453. val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
  3454. VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
  3455. if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
  3456. val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
  3457. else
  3458. val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
  3459. writeq(val64, &vp_reg->prc_cfg4);
  3460. }
  3461. /*
  3462. * __vxge_hw_vpath_kdfc_configure
  3463. * This routine configures the kdfc registers of virtual path using the
  3464. * config passed
  3465. */
  3466. static enum vxge_hw_status
  3467. __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  3468. {
  3469. u64 val64;
  3470. u64 vpath_stride;
  3471. enum vxge_hw_status status = VXGE_HW_OK;
  3472. struct __vxge_hw_virtualpath *vpath;
  3473. struct vxge_hw_vpath_reg __iomem *vp_reg;
  3474. vpath = &hldev->virtual_paths[vp_id];
  3475. vp_reg = vpath->vp_reg;
  3476. status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
  3477. if (status != VXGE_HW_OK)
  3478. goto exit;
  3479. val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
  3480. vpath->max_kdfc_db =
  3481. (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
  3482. val64+1)/2;
  3483. if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
  3484. vpath->max_nofl_db = vpath->max_kdfc_db;
  3485. if (vpath->max_nofl_db <
  3486. ((vpath->vp_config->fifo.memblock_size /
  3487. (vpath->vp_config->fifo.max_frags *
  3488. sizeof(struct vxge_hw_fifo_txd))) *
  3489. vpath->vp_config->fifo.fifo_blocks)) {
  3490. return VXGE_HW_BADCFG_FIFO_BLOCKS;
  3491. }
  3492. val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
  3493. (vpath->max_nofl_db*2)-1);
  3494. }
  3495. writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
  3496. writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
  3497. &vp_reg->kdfc_fifo_trpl_ctrl);
  3498. val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
  3499. val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
  3500. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
  3501. val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
  3502. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
  3503. #ifndef __BIG_ENDIAN
  3504. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
  3505. #endif
  3506. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
  3507. writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
  3508. writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
  3509. wmb();
  3510. vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
  3511. vpath->nofl_db =
  3512. (struct __vxge_hw_non_offload_db_wrapper __iomem *)
  3513. (hldev->kdfc + (vp_id *
  3514. VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
  3515. vpath_stride)));
  3516. exit:
  3517. return status;
  3518. }
  3519. /*
  3520. * __vxge_hw_vpath_mac_configure
  3521. * This routine configures the mac of virtual path using the config passed
  3522. */
  3523. static enum vxge_hw_status
  3524. __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  3525. {
  3526. u64 val64;
  3527. enum vxge_hw_status status = VXGE_HW_OK;
  3528. struct __vxge_hw_virtualpath *vpath;
  3529. struct vxge_hw_vp_config *vp_config;
  3530. struct vxge_hw_vpath_reg __iomem *vp_reg;
  3531. vpath = &hldev->virtual_paths[vp_id];
  3532. vp_reg = vpath->vp_reg;
  3533. vp_config = vpath->vp_config;
  3534. writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
  3535. vpath->vsport_number), &vp_reg->xmac_vsport_choice);
  3536. if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
  3537. val64 = readq(&vp_reg->xmac_rpa_vcfg);
  3538. if (vp_config->rpa_strip_vlan_tag !=
  3539. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
  3540. if (vp_config->rpa_strip_vlan_tag)
  3541. val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
  3542. else
  3543. val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
  3544. }
  3545. writeq(val64, &vp_reg->xmac_rpa_vcfg);
  3546. val64 = readq(&vp_reg->rxmac_vcfg0);
  3547. if (vp_config->mtu !=
  3548. VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
  3549. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  3550. if ((vp_config->mtu +
  3551. VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
  3552. val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
  3553. vp_config->mtu +
  3554. VXGE_HW_MAC_HEADER_MAX_SIZE);
  3555. else
  3556. val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
  3557. vpath->max_mtu);
  3558. }
  3559. writeq(val64, &vp_reg->rxmac_vcfg0);
  3560. val64 = readq(&vp_reg->rxmac_vcfg1);
  3561. val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
  3562. VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
  3563. if (hldev->config.rth_it_type ==
  3564. VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
  3565. val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
  3566. 0x2) |
  3567. VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
  3568. }
  3569. writeq(val64, &vp_reg->rxmac_vcfg1);
  3570. }
  3571. return status;
  3572. }
  3573. /*
  3574. * __vxge_hw_vpath_tim_configure
  3575. * This routine configures the tim registers of virtual path using the config
  3576. * passed
  3577. */
  3578. static enum vxge_hw_status
  3579. __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  3580. {
  3581. u64 val64;
  3582. enum vxge_hw_status status = VXGE_HW_OK;
  3583. struct __vxge_hw_virtualpath *vpath;
  3584. struct vxge_hw_vpath_reg __iomem *vp_reg;
  3585. struct vxge_hw_vp_config *config;
  3586. vpath = &hldev->virtual_paths[vp_id];
  3587. vp_reg = vpath->vp_reg;
  3588. config = vpath->vp_config;
  3589. writeq(0, &vp_reg->tim_dest_addr);
  3590. writeq(0, &vp_reg->tim_vpath_map);
  3591. writeq(0, &vp_reg->tim_bitmap);
  3592. writeq(0, &vp_reg->tim_remap);
  3593. if (config->ring.enable == VXGE_HW_RING_ENABLE)
  3594. writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
  3595. (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
  3596. VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
  3597. val64 = readq(&vp_reg->tim_pci_cfg);
  3598. val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
  3599. writeq(val64, &vp_reg->tim_pci_cfg);
  3600. if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
  3601. val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  3602. if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
  3603. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  3604. 0x3ffffff);
  3605. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  3606. config->tti.btimer_val);
  3607. }
  3608. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
  3609. if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
  3610. if (config->tti.timer_ac_en)
  3611. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
  3612. else
  3613. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
  3614. }
  3615. if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
  3616. if (config->tti.timer_ci_en)
  3617. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  3618. else
  3619. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  3620. }
  3621. if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
  3622. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
  3623. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
  3624. config->tti.urange_a);
  3625. }
  3626. if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
  3627. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
  3628. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
  3629. config->tti.urange_b);
  3630. }
  3631. if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
  3632. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
  3633. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
  3634. config->tti.urange_c);
  3635. }
  3636. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  3637. vpath->tim_tti_cfg1_saved = val64;
  3638. val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
  3639. if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
  3640. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
  3641. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
  3642. config->tti.uec_a);
  3643. }
  3644. if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
  3645. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
  3646. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
  3647. config->tti.uec_b);
  3648. }
  3649. if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
  3650. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
  3651. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
  3652. config->tti.uec_c);
  3653. }
  3654. if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
  3655. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
  3656. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
  3657. config->tti.uec_d);
  3658. }
  3659. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
  3660. val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  3661. if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
  3662. if (config->tti.timer_ri_en)
  3663. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
  3664. else
  3665. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
  3666. }
  3667. if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
  3668. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
  3669. 0x3ffffff);
  3670. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
  3671. config->tti.rtimer_val);
  3672. }
  3673. if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
  3674. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
  3675. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
  3676. }
  3677. if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
  3678. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  3679. 0x3ffffff);
  3680. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  3681. config->tti.ltimer_val);
  3682. }
  3683. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  3684. vpath->tim_tti_cfg3_saved = val64;
  3685. }
  3686. if (config->ring.enable == VXGE_HW_RING_ENABLE) {
  3687. val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  3688. if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
  3689. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  3690. 0x3ffffff);
  3691. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  3692. config->rti.btimer_val);
  3693. }
  3694. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
  3695. if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
  3696. if (config->rti.timer_ac_en)
  3697. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
  3698. else
  3699. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
  3700. }
  3701. if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
  3702. if (config->rti.timer_ci_en)
  3703. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  3704. else
  3705. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  3706. }
  3707. if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
  3708. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
  3709. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
  3710. config->rti.urange_a);
  3711. }
  3712. if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
  3713. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
  3714. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
  3715. config->rti.urange_b);
  3716. }
  3717. if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
  3718. val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
  3719. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
  3720. config->rti.urange_c);
  3721. }
  3722. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  3723. vpath->tim_rti_cfg1_saved = val64;
  3724. val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
  3725. if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
  3726. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
  3727. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
  3728. config->rti.uec_a);
  3729. }
  3730. if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
  3731. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
  3732. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
  3733. config->rti.uec_b);
  3734. }
  3735. if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
  3736. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
  3737. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
  3738. config->rti.uec_c);
  3739. }
  3740. if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
  3741. val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
  3742. val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
  3743. config->rti.uec_d);
  3744. }
  3745. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
  3746. val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  3747. if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
  3748. if (config->rti.timer_ri_en)
  3749. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
  3750. else
  3751. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
  3752. }
  3753. if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
  3754. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
  3755. 0x3ffffff);
  3756. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
  3757. config->rti.rtimer_val);
  3758. }
  3759. if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
  3760. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
  3761. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
  3762. }
  3763. if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
  3764. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  3765. 0x3ffffff);
  3766. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  3767. config->rti.ltimer_val);
  3768. }
  3769. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  3770. vpath->tim_rti_cfg3_saved = val64;
  3771. }
  3772. val64 = 0;
  3773. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  3774. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  3775. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  3776. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  3777. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  3778. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  3779. val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
  3780. val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
  3781. val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
  3782. writeq(val64, &vp_reg->tim_wrkld_clc);
  3783. return status;
  3784. }
  3785. /*
  3786. * __vxge_hw_vpath_initialize
  3787. * This routine is the final phase of init which initializes the
  3788. * registers of the vpath using the configuration passed.
  3789. */
  3790. static enum vxge_hw_status
  3791. __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
  3792. {
  3793. u64 val64;
  3794. u32 val32;
  3795. enum vxge_hw_status status = VXGE_HW_OK;
  3796. struct __vxge_hw_virtualpath *vpath;
  3797. struct vxge_hw_vpath_reg __iomem *vp_reg;
  3798. vpath = &hldev->virtual_paths[vp_id];
  3799. if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
  3800. status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
  3801. goto exit;
  3802. }
  3803. vp_reg = vpath->vp_reg;
  3804. status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
  3805. if (status != VXGE_HW_OK)
  3806. goto exit;
  3807. status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
  3808. if (status != VXGE_HW_OK)
  3809. goto exit;
  3810. status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
  3811. if (status != VXGE_HW_OK)
  3812. goto exit;
  3813. status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
  3814. if (status != VXGE_HW_OK)
  3815. goto exit;
  3816. val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
  3817. /* Get MRRS value from device control */
  3818. status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
  3819. if (status == VXGE_HW_OK) {
  3820. val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
  3821. val64 &=
  3822. ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
  3823. val64 |=
  3824. VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
  3825. val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
  3826. }
  3827. val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
  3828. val64 |=
  3829. VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
  3830. VXGE_HW_MAX_PAYLOAD_SIZE_512);
  3831. val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
  3832. writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
  3833. exit:
  3834. return status;
  3835. }
  3836. /*
  3837. * __vxge_hw_vp_terminate - Terminate Virtual Path structure
  3838. * This routine closes all channels it opened and freeup memory
  3839. */
  3840. static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
  3841. {
  3842. struct __vxge_hw_virtualpath *vpath;
  3843. vpath = &hldev->virtual_paths[vp_id];
  3844. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
  3845. goto exit;
  3846. VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
  3847. vpath->hldev->tim_int_mask1, vpath->vp_id);
  3848. hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
  3849. /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
  3850. * work after the interface is brought down.
  3851. */
  3852. spin_lock(&vpath->lock);
  3853. vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
  3854. spin_unlock(&vpath->lock);
  3855. vpath->vpmgmt_reg = NULL;
  3856. vpath->nofl_db = NULL;
  3857. vpath->max_mtu = 0;
  3858. vpath->vsport_number = 0;
  3859. vpath->max_kdfc_db = 0;
  3860. vpath->max_nofl_db = 0;
  3861. vpath->ringh = NULL;
  3862. vpath->fifoh = NULL;
  3863. memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
  3864. vpath->stats_block = 0;
  3865. vpath->hw_stats = NULL;
  3866. vpath->hw_stats_sav = NULL;
  3867. vpath->sw_stats = NULL;
  3868. exit:
  3869. return;
  3870. }
  3871. /*
  3872. * __vxge_hw_vp_initialize - Initialize Virtual Path structure
  3873. * This routine is the initial phase of init which resets the vpath and
  3874. * initializes the software support structures.
  3875. */
  3876. static enum vxge_hw_status
  3877. __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
  3878. struct vxge_hw_vp_config *config)
  3879. {
  3880. struct __vxge_hw_virtualpath *vpath;
  3881. enum vxge_hw_status status = VXGE_HW_OK;
  3882. if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
  3883. status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
  3884. goto exit;
  3885. }
  3886. vpath = &hldev->virtual_paths[vp_id];
  3887. spin_lock_init(&vpath->lock);
  3888. vpath->vp_id = vp_id;
  3889. vpath->vp_open = VXGE_HW_VP_OPEN;
  3890. vpath->hldev = hldev;
  3891. vpath->vp_config = config;
  3892. vpath->vp_reg = hldev->vpath_reg[vp_id];
  3893. vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
  3894. __vxge_hw_vpath_reset(hldev, vp_id);
  3895. status = __vxge_hw_vpath_reset_check(vpath);
  3896. if (status != VXGE_HW_OK) {
  3897. memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
  3898. goto exit;
  3899. }
  3900. status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
  3901. if (status != VXGE_HW_OK) {
  3902. memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
  3903. goto exit;
  3904. }
  3905. INIT_LIST_HEAD(&vpath->vpath_handles);
  3906. vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
  3907. VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
  3908. hldev->tim_int_mask1, vp_id);
  3909. status = __vxge_hw_vpath_initialize(hldev, vp_id);
  3910. if (status != VXGE_HW_OK)
  3911. __vxge_hw_vp_terminate(hldev, vp_id);
  3912. exit:
  3913. return status;
  3914. }
  3915. /*
  3916. * vxge_hw_vpath_mtu_set - Set MTU.
  3917. * Set new MTU value. Example, to use jumbo frames:
  3918. * vxge_hw_vpath_mtu_set(my_device, 9600);
  3919. */
  3920. enum vxge_hw_status
  3921. vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
  3922. {
  3923. u64 val64;
  3924. enum vxge_hw_status status = VXGE_HW_OK;
  3925. struct __vxge_hw_virtualpath *vpath;
  3926. if (vp == NULL) {
  3927. status = VXGE_HW_ERR_INVALID_HANDLE;
  3928. goto exit;
  3929. }
  3930. vpath = vp->vpath;
  3931. new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
  3932. if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
  3933. status = VXGE_HW_ERR_INVALID_MTU_SIZE;
  3934. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  3935. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  3936. val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
  3937. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  3938. vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
  3939. exit:
  3940. return status;
  3941. }
  3942. /*
  3943. * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
  3944. * Enable the DMA vpath statistics. The function is to be called to re-enable
  3945. * the adapter to update stats into the host memory
  3946. */
  3947. static enum vxge_hw_status
  3948. vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
  3949. {
  3950. enum vxge_hw_status status = VXGE_HW_OK;
  3951. struct __vxge_hw_virtualpath *vpath;
  3952. vpath = vp->vpath;
  3953. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  3954. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  3955. goto exit;
  3956. }
  3957. memcpy(vpath->hw_stats_sav, vpath->hw_stats,
  3958. sizeof(struct vxge_hw_vpath_stats_hw_info));
  3959. status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
  3960. exit:
  3961. return status;
  3962. }
  3963. /*
  3964. * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
  3965. * This function allocates a block from block pool or from the system
  3966. */
  3967. static struct __vxge_hw_blockpool_entry *
  3968. __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
  3969. {
  3970. struct __vxge_hw_blockpool_entry *entry = NULL;
  3971. struct __vxge_hw_blockpool *blockpool;
  3972. blockpool = &devh->block_pool;
  3973. if (size == blockpool->block_size) {
  3974. if (!list_empty(&blockpool->free_block_list))
  3975. entry = (struct __vxge_hw_blockpool_entry *)
  3976. list_first_entry(&blockpool->free_block_list,
  3977. struct __vxge_hw_blockpool_entry,
  3978. item);
  3979. if (entry != NULL) {
  3980. list_del(&entry->item);
  3981. blockpool->pool_size--;
  3982. }
  3983. }
  3984. if (entry != NULL)
  3985. __vxge_hw_blockpool_blocks_add(blockpool);
  3986. return entry;
  3987. }
  3988. /*
  3989. * vxge_hw_vpath_open - Open a virtual path on a given adapter
  3990. * This function is used to open access to virtual path of an
  3991. * adapter for offload, GRO operations. This function returns
  3992. * synchronously.
  3993. */
  3994. enum vxge_hw_status
  3995. vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
  3996. struct vxge_hw_vpath_attr *attr,
  3997. struct __vxge_hw_vpath_handle **vpath_handle)
  3998. {
  3999. struct __vxge_hw_virtualpath *vpath;
  4000. struct __vxge_hw_vpath_handle *vp;
  4001. enum vxge_hw_status status;
  4002. vpath = &hldev->virtual_paths[attr->vp_id];
  4003. if (vpath->vp_open == VXGE_HW_VP_OPEN) {
  4004. status = VXGE_HW_ERR_INVALID_STATE;
  4005. goto vpath_open_exit1;
  4006. }
  4007. status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
  4008. &hldev->config.vp_config[attr->vp_id]);
  4009. if (status != VXGE_HW_OK)
  4010. goto vpath_open_exit1;
  4011. vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
  4012. if (vp == NULL) {
  4013. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  4014. goto vpath_open_exit2;
  4015. }
  4016. vp->vpath = vpath;
  4017. if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
  4018. status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
  4019. if (status != VXGE_HW_OK)
  4020. goto vpath_open_exit6;
  4021. }
  4022. if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
  4023. status = __vxge_hw_ring_create(vp, &attr->ring_attr);
  4024. if (status != VXGE_HW_OK)
  4025. goto vpath_open_exit7;
  4026. __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
  4027. }
  4028. vpath->fifoh->tx_intr_num =
  4029. (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
  4030. VXGE_HW_VPATH_INTR_TX;
  4031. vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
  4032. VXGE_HW_BLOCK_SIZE);
  4033. if (vpath->stats_block == NULL) {
  4034. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  4035. goto vpath_open_exit8;
  4036. }
  4037. vpath->hw_stats = vpath->stats_block->memblock;
  4038. memset(vpath->hw_stats, 0,
  4039. sizeof(struct vxge_hw_vpath_stats_hw_info));
  4040. hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
  4041. vpath->hw_stats;
  4042. vpath->hw_stats_sav =
  4043. &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
  4044. memset(vpath->hw_stats_sav, 0,
  4045. sizeof(struct vxge_hw_vpath_stats_hw_info));
  4046. writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
  4047. status = vxge_hw_vpath_stats_enable(vp);
  4048. if (status != VXGE_HW_OK)
  4049. goto vpath_open_exit8;
  4050. list_add(&vp->item, &vpath->vpath_handles);
  4051. hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
  4052. *vpath_handle = vp;
  4053. attr->fifo_attr.userdata = vpath->fifoh;
  4054. attr->ring_attr.userdata = vpath->ringh;
  4055. return VXGE_HW_OK;
  4056. vpath_open_exit8:
  4057. if (vpath->ringh != NULL)
  4058. __vxge_hw_ring_delete(vp);
  4059. vpath_open_exit7:
  4060. if (vpath->fifoh != NULL)
  4061. __vxge_hw_fifo_delete(vp);
  4062. vpath_open_exit6:
  4063. vfree(vp);
  4064. vpath_open_exit2:
  4065. __vxge_hw_vp_terminate(hldev, attr->vp_id);
  4066. vpath_open_exit1:
  4067. return status;
  4068. }
  4069. /**
  4070. * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
  4071. * (vpath) open
  4072. * @vp: Handle got from previous vpath open
  4073. *
  4074. * This function is used to close access to virtual path opened
  4075. * earlier.
  4076. */
  4077. void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
  4078. {
  4079. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  4080. struct __vxge_hw_ring *ring = vpath->ringh;
  4081. struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
  4082. u64 new_count, val64, val164;
  4083. if (vdev->titan1) {
  4084. new_count = readq(&vpath->vp_reg->rxdmem_size);
  4085. new_count &= 0x1fff;
  4086. } else
  4087. new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
  4088. val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
  4089. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
  4090. &vpath->vp_reg->prc_rxd_doorbell);
  4091. readl(&vpath->vp_reg->prc_rxd_doorbell);
  4092. val164 /= 2;
  4093. val64 = readq(&vpath->vp_reg->prc_cfg6);
  4094. val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
  4095. val64 &= 0x1ff;
  4096. /*
  4097. * Each RxD is of 4 qwords
  4098. */
  4099. new_count -= (val64 + 1);
  4100. val64 = min(val164, new_count) / 4;
  4101. ring->rxds_limit = min(ring->rxds_limit, val64);
  4102. if (ring->rxds_limit < 4)
  4103. ring->rxds_limit = 4;
  4104. }
  4105. /*
  4106. * __vxge_hw_blockpool_block_free - Frees a block from block pool
  4107. * @devh: Hal device
  4108. * @entry: Entry of block to be freed
  4109. *
  4110. * This function frees a block from block pool
  4111. */
  4112. static void
  4113. __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
  4114. struct __vxge_hw_blockpool_entry *entry)
  4115. {
  4116. struct __vxge_hw_blockpool *blockpool;
  4117. blockpool = &devh->block_pool;
  4118. if (entry->length == blockpool->block_size) {
  4119. list_add(&entry->item, &blockpool->free_block_list);
  4120. blockpool->pool_size++;
  4121. }
  4122. __vxge_hw_blockpool_blocks_remove(blockpool);
  4123. }
  4124. /*
  4125. * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
  4126. * This function is used to close access to virtual path opened
  4127. * earlier.
  4128. */
  4129. enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
  4130. {
  4131. struct __vxge_hw_virtualpath *vpath = NULL;
  4132. struct __vxge_hw_device *devh = NULL;
  4133. u32 vp_id = vp->vpath->vp_id;
  4134. u32 is_empty = TRUE;
  4135. enum vxge_hw_status status = VXGE_HW_OK;
  4136. vpath = vp->vpath;
  4137. devh = vpath->hldev;
  4138. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  4139. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  4140. goto vpath_close_exit;
  4141. }
  4142. list_del(&vp->item);
  4143. if (!list_empty(&vpath->vpath_handles)) {
  4144. list_add(&vp->item, &vpath->vpath_handles);
  4145. is_empty = FALSE;
  4146. }
  4147. if (!is_empty) {
  4148. status = VXGE_HW_FAIL;
  4149. goto vpath_close_exit;
  4150. }
  4151. devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
  4152. if (vpath->ringh != NULL)
  4153. __vxge_hw_ring_delete(vp);
  4154. if (vpath->fifoh != NULL)
  4155. __vxge_hw_fifo_delete(vp);
  4156. if (vpath->stats_block != NULL)
  4157. __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
  4158. vfree(vp);
  4159. __vxge_hw_vp_terminate(devh, vp_id);
  4160. vpath_close_exit:
  4161. return status;
  4162. }
  4163. /*
  4164. * vxge_hw_vpath_reset - Resets vpath
  4165. * This function is used to request a reset of vpath
  4166. */
  4167. enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
  4168. {
  4169. enum vxge_hw_status status;
  4170. u32 vp_id;
  4171. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  4172. vp_id = vpath->vp_id;
  4173. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  4174. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  4175. goto exit;
  4176. }
  4177. status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
  4178. if (status == VXGE_HW_OK)
  4179. vpath->sw_stats->soft_reset_cnt++;
  4180. exit:
  4181. return status;
  4182. }
  4183. /*
  4184. * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
  4185. * This function poll's for the vpath reset completion and re initializes
  4186. * the vpath.
  4187. */
  4188. enum vxge_hw_status
  4189. vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
  4190. {
  4191. struct __vxge_hw_virtualpath *vpath = NULL;
  4192. enum vxge_hw_status status;
  4193. struct __vxge_hw_device *hldev;
  4194. u32 vp_id;
  4195. vp_id = vp->vpath->vp_id;
  4196. vpath = vp->vpath;
  4197. hldev = vpath->hldev;
  4198. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  4199. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  4200. goto exit;
  4201. }
  4202. status = __vxge_hw_vpath_reset_check(vpath);
  4203. if (status != VXGE_HW_OK)
  4204. goto exit;
  4205. status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
  4206. if (status != VXGE_HW_OK)
  4207. goto exit;
  4208. status = __vxge_hw_vpath_initialize(hldev, vp_id);
  4209. if (status != VXGE_HW_OK)
  4210. goto exit;
  4211. if (vpath->ringh != NULL)
  4212. __vxge_hw_vpath_prc_configure(hldev, vp_id);
  4213. memset(vpath->hw_stats, 0,
  4214. sizeof(struct vxge_hw_vpath_stats_hw_info));
  4215. memset(vpath->hw_stats_sav, 0,
  4216. sizeof(struct vxge_hw_vpath_stats_hw_info));
  4217. writeq(vpath->stats_block->dma_addr,
  4218. &vpath->vp_reg->stats_cfg);
  4219. status = vxge_hw_vpath_stats_enable(vp);
  4220. exit:
  4221. return status;
  4222. }
  4223. /*
  4224. * vxge_hw_vpath_enable - Enable vpath.
  4225. * This routine clears the vpath reset thereby enabling a vpath
  4226. * to start forwarding frames and generating interrupts.
  4227. */
  4228. void
  4229. vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
  4230. {
  4231. struct __vxge_hw_device *hldev;
  4232. u64 val64;
  4233. hldev = vp->vpath->hldev;
  4234. val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
  4235. 1 << (16 - vp->vpath->vp_id));
  4236. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  4237. &hldev->common_reg->cmn_rsthdlr_cfg1);
  4238. }