bnx2x_sp.c 167 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135
  1. /* bnx2x_sp.c: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2011-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Vladislav Zolotarov
  17. *
  18. */
  19. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20. #include <linux/module.h>
  21. #include <linux/crc32.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/crc32c.h>
  25. #include "bnx2x.h"
  26. #include "bnx2x_cmn.h"
  27. #include "bnx2x_sp.h"
  28. #define BNX2X_MAX_EMUL_MULTI 16
  29. /**** Exe Queue interfaces ****/
  30. /**
  31. * bnx2x_exe_queue_init - init the Exe Queue object
  32. *
  33. * @o: pointer to the object
  34. * @exe_len: length
  35. * @owner: pointer to the owner
  36. * @validate: validate function pointer
  37. * @optimize: optimize function pointer
  38. * @exec: execute function pointer
  39. * @get: get function pointer
  40. */
  41. static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
  42. struct bnx2x_exe_queue_obj *o,
  43. int exe_len,
  44. union bnx2x_qable_obj *owner,
  45. exe_q_validate validate,
  46. exe_q_remove remove,
  47. exe_q_optimize optimize,
  48. exe_q_execute exec,
  49. exe_q_get get)
  50. {
  51. memset(o, 0, sizeof(*o));
  52. INIT_LIST_HEAD(&o->exe_queue);
  53. INIT_LIST_HEAD(&o->pending_comp);
  54. spin_lock_init(&o->lock);
  55. o->exe_chunk_len = exe_len;
  56. o->owner = owner;
  57. /* Owner specific callbacks */
  58. o->validate = validate;
  59. o->remove = remove;
  60. o->optimize = optimize;
  61. o->execute = exec;
  62. o->get = get;
  63. DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
  64. exe_len);
  65. }
  66. static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
  67. struct bnx2x_exeq_elem *elem)
  68. {
  69. DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
  70. kfree(elem);
  71. }
  72. static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
  73. {
  74. struct bnx2x_exeq_elem *elem;
  75. int cnt = 0;
  76. spin_lock_bh(&o->lock);
  77. list_for_each_entry(elem, &o->exe_queue, link)
  78. cnt++;
  79. spin_unlock_bh(&o->lock);
  80. return cnt;
  81. }
  82. /**
  83. * bnx2x_exe_queue_add - add a new element to the execution queue
  84. *
  85. * @bp: driver handle
  86. * @o: queue
  87. * @cmd: new command to add
  88. * @restore: true - do not optimize the command
  89. *
  90. * If the element is optimized or is illegal, frees it.
  91. */
  92. static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
  93. struct bnx2x_exe_queue_obj *o,
  94. struct bnx2x_exeq_elem *elem,
  95. bool restore)
  96. {
  97. int rc;
  98. spin_lock_bh(&o->lock);
  99. if (!restore) {
  100. /* Try to cancel this element queue */
  101. rc = o->optimize(bp, o->owner, elem);
  102. if (rc)
  103. goto free_and_exit;
  104. /* Check if this request is ok */
  105. rc = o->validate(bp, o->owner, elem);
  106. if (rc) {
  107. DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
  108. goto free_and_exit;
  109. }
  110. }
  111. /* If so, add it to the execution queue */
  112. list_add_tail(&elem->link, &o->exe_queue);
  113. spin_unlock_bh(&o->lock);
  114. return 0;
  115. free_and_exit:
  116. bnx2x_exe_queue_free_elem(bp, elem);
  117. spin_unlock_bh(&o->lock);
  118. return rc;
  119. }
  120. static inline void __bnx2x_exe_queue_reset_pending(
  121. struct bnx2x *bp,
  122. struct bnx2x_exe_queue_obj *o)
  123. {
  124. struct bnx2x_exeq_elem *elem;
  125. while (!list_empty(&o->pending_comp)) {
  126. elem = list_first_entry(&o->pending_comp,
  127. struct bnx2x_exeq_elem, link);
  128. list_del(&elem->link);
  129. bnx2x_exe_queue_free_elem(bp, elem);
  130. }
  131. }
  132. /**
  133. * bnx2x_exe_queue_step - execute one execution chunk atomically
  134. *
  135. * @bp: driver handle
  136. * @o: queue
  137. * @ramrod_flags: flags
  138. *
  139. * (Should be called while holding the exe_queue->lock).
  140. */
  141. static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
  142. struct bnx2x_exe_queue_obj *o,
  143. unsigned long *ramrod_flags)
  144. {
  145. struct bnx2x_exeq_elem *elem, spacer;
  146. int cur_len = 0, rc;
  147. memset(&spacer, 0, sizeof(spacer));
  148. /* Next step should not be performed until the current is finished,
  149. * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
  150. * properly clear object internals without sending any command to the FW
  151. * which also implies there won't be any completion to clear the
  152. * 'pending' list.
  153. */
  154. if (!list_empty(&o->pending_comp)) {
  155. if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
  156. DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
  157. __bnx2x_exe_queue_reset_pending(bp, o);
  158. } else {
  159. return 1;
  160. }
  161. }
  162. /* Run through the pending commands list and create a next
  163. * execution chunk.
  164. */
  165. while (!list_empty(&o->exe_queue)) {
  166. elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
  167. link);
  168. WARN_ON(!elem->cmd_len);
  169. if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
  170. cur_len += elem->cmd_len;
  171. /* Prevent from both lists being empty when moving an
  172. * element. This will allow the call of
  173. * bnx2x_exe_queue_empty() without locking.
  174. */
  175. list_add_tail(&spacer.link, &o->pending_comp);
  176. mb();
  177. list_move_tail(&elem->link, &o->pending_comp);
  178. list_del(&spacer.link);
  179. } else
  180. break;
  181. }
  182. /* Sanity check */
  183. if (!cur_len)
  184. return 0;
  185. rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
  186. if (rc < 0)
  187. /* In case of an error return the commands back to the queue
  188. * and reset the pending_comp.
  189. */
  190. list_splice_init(&o->pending_comp, &o->exe_queue);
  191. else if (!rc)
  192. /* If zero is returned, means there are no outstanding pending
  193. * completions and we may dismiss the pending list.
  194. */
  195. __bnx2x_exe_queue_reset_pending(bp, o);
  196. return rc;
  197. }
  198. static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
  199. {
  200. bool empty = list_empty(&o->exe_queue);
  201. /* Don't reorder!!! */
  202. mb();
  203. return empty && list_empty(&o->pending_comp);
  204. }
  205. static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
  206. struct bnx2x *bp)
  207. {
  208. DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
  209. return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
  210. }
  211. /************************ raw_obj functions ***********************************/
  212. static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
  213. {
  214. return !!test_bit(o->state, o->pstate);
  215. }
  216. static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
  217. {
  218. smp_mb__before_clear_bit();
  219. clear_bit(o->state, o->pstate);
  220. smp_mb__after_clear_bit();
  221. }
  222. static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
  223. {
  224. smp_mb__before_clear_bit();
  225. set_bit(o->state, o->pstate);
  226. smp_mb__after_clear_bit();
  227. }
  228. /**
  229. * bnx2x_state_wait - wait until the given bit(state) is cleared
  230. *
  231. * @bp: device handle
  232. * @state: state which is to be cleared
  233. * @state_p: state buffer
  234. *
  235. */
  236. static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
  237. unsigned long *pstate)
  238. {
  239. /* can take a while if any port is running */
  240. int cnt = 5000;
  241. if (CHIP_REV_IS_EMUL(bp))
  242. cnt *= 20;
  243. DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
  244. might_sleep();
  245. while (cnt--) {
  246. if (!test_bit(state, pstate)) {
  247. #ifdef BNX2X_STOP_ON_ERROR
  248. DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
  249. #endif
  250. return 0;
  251. }
  252. usleep_range(1000, 2000);
  253. if (bp->panic)
  254. return -EIO;
  255. }
  256. /* timeout! */
  257. BNX2X_ERR("timeout waiting for state %d\n", state);
  258. #ifdef BNX2X_STOP_ON_ERROR
  259. bnx2x_panic();
  260. #endif
  261. return -EBUSY;
  262. }
  263. static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
  264. {
  265. return bnx2x_state_wait(bp, raw->state, raw->pstate);
  266. }
  267. /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
  268. /* credit handling callbacks */
  269. static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
  270. {
  271. struct bnx2x_credit_pool_obj *mp = o->macs_pool;
  272. WARN_ON(!mp);
  273. return mp->get_entry(mp, offset);
  274. }
  275. static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
  276. {
  277. struct bnx2x_credit_pool_obj *mp = o->macs_pool;
  278. WARN_ON(!mp);
  279. return mp->get(mp, 1);
  280. }
  281. static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
  282. {
  283. struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
  284. WARN_ON(!vp);
  285. return vp->get_entry(vp, offset);
  286. }
  287. static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
  288. {
  289. struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
  290. WARN_ON(!vp);
  291. return vp->get(vp, 1);
  292. }
  293. static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
  294. {
  295. struct bnx2x_credit_pool_obj *mp = o->macs_pool;
  296. struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
  297. if (!mp->get(mp, 1))
  298. return false;
  299. if (!vp->get(vp, 1)) {
  300. mp->put(mp, 1);
  301. return false;
  302. }
  303. return true;
  304. }
  305. static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
  306. {
  307. struct bnx2x_credit_pool_obj *mp = o->macs_pool;
  308. return mp->put_entry(mp, offset);
  309. }
  310. static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
  311. {
  312. struct bnx2x_credit_pool_obj *mp = o->macs_pool;
  313. return mp->put(mp, 1);
  314. }
  315. static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
  316. {
  317. struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
  318. return vp->put_entry(vp, offset);
  319. }
  320. static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
  321. {
  322. struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
  323. return vp->put(vp, 1);
  324. }
  325. static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
  326. {
  327. struct bnx2x_credit_pool_obj *mp = o->macs_pool;
  328. struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
  329. if (!mp->put(mp, 1))
  330. return false;
  331. if (!vp->put(vp, 1)) {
  332. mp->get(mp, 1);
  333. return false;
  334. }
  335. return true;
  336. }
  337. /**
  338. * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  339. *
  340. * @bp: device handle
  341. * @o: vlan_mac object
  342. *
  343. * @details: Non-blocking implementation; should be called under execution
  344. * queue lock.
  345. */
  346. static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
  347. struct bnx2x_vlan_mac_obj *o)
  348. {
  349. if (o->head_reader) {
  350. DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
  351. return -EBUSY;
  352. }
  353. DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
  354. return 0;
  355. }
  356. /**
  357. * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
  358. *
  359. * @bp: device handle
  360. * @o: vlan_mac object
  361. *
  362. * @details Should be called under execution queue lock; notice it might release
  363. * and reclaim it during its run.
  364. */
  365. static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
  366. struct bnx2x_vlan_mac_obj *o)
  367. {
  368. int rc;
  369. unsigned long ramrod_flags = o->saved_ramrod_flags;
  370. DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
  371. ramrod_flags);
  372. o->head_exe_request = false;
  373. o->saved_ramrod_flags = 0;
  374. rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
  375. if (rc != 0) {
  376. BNX2X_ERR("execution of pending commands failed with rc %d\n",
  377. rc);
  378. #ifdef BNX2X_STOP_ON_ERROR
  379. bnx2x_panic();
  380. #endif
  381. }
  382. }
  383. /**
  384. * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
  385. *
  386. * @bp: device handle
  387. * @o: vlan_mac object
  388. * @ramrod_flags: ramrod flags of missed execution
  389. *
  390. * @details Should be called under execution queue lock.
  391. */
  392. static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
  393. struct bnx2x_vlan_mac_obj *o,
  394. unsigned long ramrod_flags)
  395. {
  396. o->head_exe_request = true;
  397. o->saved_ramrod_flags = ramrod_flags;
  398. DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
  399. ramrod_flags);
  400. }
  401. /**
  402. * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
  403. *
  404. * @bp: device handle
  405. * @o: vlan_mac object
  406. *
  407. * @details Should be called under execution queue lock. Notice if a pending
  408. * execution exists, it would perform it - possibly releasing and
  409. * reclaiming the execution queue lock.
  410. */
  411. static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
  412. struct bnx2x_vlan_mac_obj *o)
  413. {
  414. /* It's possible a new pending execution was added since this writer
  415. * executed. If so, execute again. [Ad infinitum]
  416. */
  417. while (o->head_exe_request) {
  418. DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
  419. __bnx2x_vlan_mac_h_exec_pending(bp, o);
  420. }
  421. }
  422. /**
  423. * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
  424. *
  425. * @bp: device handle
  426. * @o: vlan_mac object
  427. *
  428. * @details Notice if a pending execution exists, it would perform it -
  429. * possibly releasing and reclaiming the execution queue lock.
  430. */
  431. void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
  432. struct bnx2x_vlan_mac_obj *o)
  433. {
  434. spin_lock_bh(&o->exe_queue.lock);
  435. __bnx2x_vlan_mac_h_write_unlock(bp, o);
  436. spin_unlock_bh(&o->exe_queue.lock);
  437. }
  438. /**
  439. * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
  440. *
  441. * @bp: device handle
  442. * @o: vlan_mac object
  443. *
  444. * @details Should be called under the execution queue lock. May sleep. May
  445. * release and reclaim execution queue lock during its run.
  446. */
  447. static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
  448. struct bnx2x_vlan_mac_obj *o)
  449. {
  450. /* If we got here, we're holding lock --> no WRITER exists */
  451. o->head_reader++;
  452. DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
  453. o->head_reader);
  454. return 0;
  455. }
  456. /**
  457. * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
  458. *
  459. * @bp: device handle
  460. * @o: vlan_mac object
  461. *
  462. * @details May sleep. Claims and releases execution queue lock during its run.
  463. */
  464. int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
  465. struct bnx2x_vlan_mac_obj *o)
  466. {
  467. int rc;
  468. spin_lock_bh(&o->exe_queue.lock);
  469. rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
  470. spin_unlock_bh(&o->exe_queue.lock);
  471. return rc;
  472. }
  473. /**
  474. * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
  475. *
  476. * @bp: device handle
  477. * @o: vlan_mac object
  478. *
  479. * @details Should be called under execution queue lock. Notice if a pending
  480. * execution exists, it would be performed if this was the last
  481. * reader. possibly releasing and reclaiming the execution queue lock.
  482. */
  483. static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
  484. struct bnx2x_vlan_mac_obj *o)
  485. {
  486. if (!o->head_reader) {
  487. BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
  488. #ifdef BNX2X_STOP_ON_ERROR
  489. bnx2x_panic();
  490. #endif
  491. } else {
  492. o->head_reader--;
  493. DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
  494. o->head_reader);
  495. }
  496. /* It's possible a new pending execution was added, and that this reader
  497. * was last - if so we need to execute the command.
  498. */
  499. if (!o->head_reader && o->head_exe_request) {
  500. DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
  501. /* Writer release will do the trick */
  502. __bnx2x_vlan_mac_h_write_unlock(bp, o);
  503. }
  504. }
  505. /**
  506. * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
  507. *
  508. * @bp: device handle
  509. * @o: vlan_mac object
  510. *
  511. * @details Notice if a pending execution exists, it would be performed if this
  512. * was the last reader. Claims and releases the execution queue lock
  513. * during its run.
  514. */
  515. void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
  516. struct bnx2x_vlan_mac_obj *o)
  517. {
  518. spin_lock_bh(&o->exe_queue.lock);
  519. __bnx2x_vlan_mac_h_read_unlock(bp, o);
  520. spin_unlock_bh(&o->exe_queue.lock);
  521. }
  522. static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
  523. int n, u8 *base, u8 stride, u8 size)
  524. {
  525. struct bnx2x_vlan_mac_registry_elem *pos;
  526. u8 *next = base;
  527. int counter = 0;
  528. int read_lock;
  529. DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
  530. read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
  531. if (read_lock != 0)
  532. BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
  533. /* traverse list */
  534. list_for_each_entry(pos, &o->head, link) {
  535. if (counter < n) {
  536. memcpy(next, &pos->u, size);
  537. counter++;
  538. DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
  539. counter, next);
  540. next += stride + size;
  541. }
  542. }
  543. if (read_lock == 0) {
  544. DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
  545. bnx2x_vlan_mac_h_read_unlock(bp, o);
  546. }
  547. return counter * ETH_ALEN;
  548. }
  549. /* check_add() callbacks */
  550. static int bnx2x_check_mac_add(struct bnx2x *bp,
  551. struct bnx2x_vlan_mac_obj *o,
  552. union bnx2x_classification_ramrod_data *data)
  553. {
  554. struct bnx2x_vlan_mac_registry_elem *pos;
  555. DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
  556. if (!is_valid_ether_addr(data->mac.mac))
  557. return -EINVAL;
  558. /* Check if a requested MAC already exists */
  559. list_for_each_entry(pos, &o->head, link)
  560. if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
  561. (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
  562. return -EEXIST;
  563. return 0;
  564. }
  565. static int bnx2x_check_vlan_add(struct bnx2x *bp,
  566. struct bnx2x_vlan_mac_obj *o,
  567. union bnx2x_classification_ramrod_data *data)
  568. {
  569. struct bnx2x_vlan_mac_registry_elem *pos;
  570. DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
  571. list_for_each_entry(pos, &o->head, link)
  572. if (data->vlan.vlan == pos->u.vlan.vlan)
  573. return -EEXIST;
  574. return 0;
  575. }
  576. static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
  577. struct bnx2x_vlan_mac_obj *o,
  578. union bnx2x_classification_ramrod_data *data)
  579. {
  580. struct bnx2x_vlan_mac_registry_elem *pos;
  581. DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
  582. data->vlan_mac.mac, data->vlan_mac.vlan);
  583. list_for_each_entry(pos, &o->head, link)
  584. if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
  585. (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
  586. ETH_ALEN)) &&
  587. (data->vlan_mac.is_inner_mac ==
  588. pos->u.vlan_mac.is_inner_mac))
  589. return -EEXIST;
  590. return 0;
  591. }
  592. /* check_del() callbacks */
  593. static struct bnx2x_vlan_mac_registry_elem *
  594. bnx2x_check_mac_del(struct bnx2x *bp,
  595. struct bnx2x_vlan_mac_obj *o,
  596. union bnx2x_classification_ramrod_data *data)
  597. {
  598. struct bnx2x_vlan_mac_registry_elem *pos;
  599. DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
  600. list_for_each_entry(pos, &o->head, link)
  601. if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
  602. (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
  603. return pos;
  604. return NULL;
  605. }
  606. static struct bnx2x_vlan_mac_registry_elem *
  607. bnx2x_check_vlan_del(struct bnx2x *bp,
  608. struct bnx2x_vlan_mac_obj *o,
  609. union bnx2x_classification_ramrod_data *data)
  610. {
  611. struct bnx2x_vlan_mac_registry_elem *pos;
  612. DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
  613. list_for_each_entry(pos, &o->head, link)
  614. if (data->vlan.vlan == pos->u.vlan.vlan)
  615. return pos;
  616. return NULL;
  617. }
  618. static struct bnx2x_vlan_mac_registry_elem *
  619. bnx2x_check_vlan_mac_del(struct bnx2x *bp,
  620. struct bnx2x_vlan_mac_obj *o,
  621. union bnx2x_classification_ramrod_data *data)
  622. {
  623. struct bnx2x_vlan_mac_registry_elem *pos;
  624. DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
  625. data->vlan_mac.mac, data->vlan_mac.vlan);
  626. list_for_each_entry(pos, &o->head, link)
  627. if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
  628. (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
  629. ETH_ALEN)) &&
  630. (data->vlan_mac.is_inner_mac ==
  631. pos->u.vlan_mac.is_inner_mac))
  632. return pos;
  633. return NULL;
  634. }
  635. /* check_move() callback */
  636. static bool bnx2x_check_move(struct bnx2x *bp,
  637. struct bnx2x_vlan_mac_obj *src_o,
  638. struct bnx2x_vlan_mac_obj *dst_o,
  639. union bnx2x_classification_ramrod_data *data)
  640. {
  641. struct bnx2x_vlan_mac_registry_elem *pos;
  642. int rc;
  643. /* Check if we can delete the requested configuration from the first
  644. * object.
  645. */
  646. pos = src_o->check_del(bp, src_o, data);
  647. /* check if configuration can be added */
  648. rc = dst_o->check_add(bp, dst_o, data);
  649. /* If this classification can not be added (is already set)
  650. * or can't be deleted - return an error.
  651. */
  652. if (rc || !pos)
  653. return false;
  654. return true;
  655. }
  656. static bool bnx2x_check_move_always_err(
  657. struct bnx2x *bp,
  658. struct bnx2x_vlan_mac_obj *src_o,
  659. struct bnx2x_vlan_mac_obj *dst_o,
  660. union bnx2x_classification_ramrod_data *data)
  661. {
  662. return false;
  663. }
  664. static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
  665. {
  666. struct bnx2x_raw_obj *raw = &o->raw;
  667. u8 rx_tx_flag = 0;
  668. if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
  669. (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
  670. rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
  671. if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
  672. (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
  673. rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
  674. return rx_tx_flag;
  675. }
  676. void bnx2x_set_mac_in_nig(struct bnx2x *bp,
  677. bool add, unsigned char *dev_addr, int index)
  678. {
  679. u32 wb_data[2];
  680. u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
  681. NIG_REG_LLH0_FUNC_MEM;
  682. if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
  683. return;
  684. if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
  685. return;
  686. DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
  687. (add ? "ADD" : "DELETE"), index);
  688. if (add) {
  689. /* LLH_FUNC_MEM is a u64 WB register */
  690. reg_offset += 8*index;
  691. wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
  692. (dev_addr[4] << 8) | dev_addr[5]);
  693. wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
  694. REG_WR_DMAE(bp, reg_offset, wb_data, 2);
  695. }
  696. REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
  697. NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
  698. }
  699. /**
  700. * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
  701. *
  702. * @bp: device handle
  703. * @o: queue for which we want to configure this rule
  704. * @add: if true the command is an ADD command, DEL otherwise
  705. * @opcode: CLASSIFY_RULE_OPCODE_XXX
  706. * @hdr: pointer to a header to setup
  707. *
  708. */
  709. static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
  710. struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
  711. struct eth_classify_cmd_header *hdr)
  712. {
  713. struct bnx2x_raw_obj *raw = &o->raw;
  714. hdr->client_id = raw->cl_id;
  715. hdr->func_id = raw->func_id;
  716. /* Rx or/and Tx (internal switching) configuration ? */
  717. hdr->cmd_general_data |=
  718. bnx2x_vlan_mac_get_rx_tx_flag(o);
  719. if (add)
  720. hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
  721. hdr->cmd_general_data |=
  722. (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
  723. }
  724. /**
  725. * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
  726. *
  727. * @cid: connection id
  728. * @type: BNX2X_FILTER_XXX_PENDING
  729. * @hdr: pointer to header to setup
  730. * @rule_cnt:
  731. *
  732. * currently we always configure one rule and echo field to contain a CID and an
  733. * opcode type.
  734. */
  735. static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
  736. struct eth_classify_header *hdr, int rule_cnt)
  737. {
  738. hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
  739. (type << BNX2X_SWCID_SHIFT));
  740. hdr->rule_cnt = (u8)rule_cnt;
  741. }
  742. /* hw_config() callbacks */
  743. static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
  744. struct bnx2x_vlan_mac_obj *o,
  745. struct bnx2x_exeq_elem *elem, int rule_idx,
  746. int cam_offset)
  747. {
  748. struct bnx2x_raw_obj *raw = &o->raw;
  749. struct eth_classify_rules_ramrod_data *data =
  750. (struct eth_classify_rules_ramrod_data *)(raw->rdata);
  751. int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
  752. union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
  753. bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
  754. unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
  755. u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
  756. /* Set LLH CAM entry: currently only iSCSI and ETH macs are
  757. * relevant. In addition, current implementation is tuned for a
  758. * single ETH MAC.
  759. *
  760. * When multiple unicast ETH MACs PF configuration in switch
  761. * independent mode is required (NetQ, multiple netdev MACs,
  762. * etc.), consider better utilisation of 8 per function MAC
  763. * entries in the LLH register. There is also
  764. * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
  765. * total number of CAM entries to 16.
  766. *
  767. * Currently we won't configure NIG for MACs other than a primary ETH
  768. * MAC and iSCSI L2 MAC.
  769. *
  770. * If this MAC is moving from one Queue to another, no need to change
  771. * NIG configuration.
  772. */
  773. if (cmd != BNX2X_VLAN_MAC_MOVE) {
  774. if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
  775. bnx2x_set_mac_in_nig(bp, add, mac,
  776. BNX2X_LLH_CAM_ISCSI_ETH_LINE);
  777. else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
  778. bnx2x_set_mac_in_nig(bp, add, mac,
  779. BNX2X_LLH_CAM_ETH_LINE);
  780. }
  781. /* Reset the ramrod data buffer for the first rule */
  782. if (rule_idx == 0)
  783. memset(data, 0, sizeof(*data));
  784. /* Setup a command header */
  785. bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
  786. &rule_entry->mac.header);
  787. DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
  788. (add ? "add" : "delete"), mac, raw->cl_id);
  789. /* Set a MAC itself */
  790. bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
  791. &rule_entry->mac.mac_mid,
  792. &rule_entry->mac.mac_lsb, mac);
  793. rule_entry->mac.inner_mac =
  794. cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
  795. /* MOVE: Add a rule that will add this MAC to the target Queue */
  796. if (cmd == BNX2X_VLAN_MAC_MOVE) {
  797. rule_entry++;
  798. rule_cnt++;
  799. /* Setup ramrod data */
  800. bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
  801. elem->cmd_data.vlan_mac.target_obj,
  802. true, CLASSIFY_RULE_OPCODE_MAC,
  803. &rule_entry->mac.header);
  804. /* Set a MAC itself */
  805. bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
  806. &rule_entry->mac.mac_mid,
  807. &rule_entry->mac.mac_lsb, mac);
  808. rule_entry->mac.inner_mac =
  809. cpu_to_le16(elem->cmd_data.vlan_mac.
  810. u.mac.is_inner_mac);
  811. }
  812. /* Set the ramrod data header */
  813. /* TODO: take this to the higher level in order to prevent multiple
  814. writing */
  815. bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
  816. rule_cnt);
  817. }
  818. /**
  819. * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
  820. *
  821. * @bp: device handle
  822. * @o: queue
  823. * @type:
  824. * @cam_offset: offset in cam memory
  825. * @hdr: pointer to a header to setup
  826. *
  827. * E1/E1H
  828. */
  829. static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
  830. struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
  831. struct mac_configuration_hdr *hdr)
  832. {
  833. struct bnx2x_raw_obj *r = &o->raw;
  834. hdr->length = 1;
  835. hdr->offset = (u8)cam_offset;
  836. hdr->client_id = cpu_to_le16(0xff);
  837. hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
  838. (type << BNX2X_SWCID_SHIFT));
  839. }
  840. static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
  841. struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
  842. u16 vlan_id, struct mac_configuration_entry *cfg_entry)
  843. {
  844. struct bnx2x_raw_obj *r = &o->raw;
  845. u32 cl_bit_vec = (1 << r->cl_id);
  846. cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
  847. cfg_entry->pf_id = r->func_id;
  848. cfg_entry->vlan_id = cpu_to_le16(vlan_id);
  849. if (add) {
  850. SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  851. T_ETH_MAC_COMMAND_SET);
  852. SET_FLAG(cfg_entry->flags,
  853. MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
  854. /* Set a MAC in a ramrod data */
  855. bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
  856. &cfg_entry->middle_mac_addr,
  857. &cfg_entry->lsb_mac_addr, mac);
  858. } else
  859. SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  860. T_ETH_MAC_COMMAND_INVALIDATE);
  861. }
  862. static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
  863. struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
  864. u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
  865. {
  866. struct mac_configuration_entry *cfg_entry = &config->config_table[0];
  867. struct bnx2x_raw_obj *raw = &o->raw;
  868. bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
  869. &config->hdr);
  870. bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
  871. cfg_entry);
  872. DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
  873. (add ? "setting" : "clearing"),
  874. mac, raw->cl_id, cam_offset);
  875. }
  876. /**
  877. * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
  878. *
  879. * @bp: device handle
  880. * @o: bnx2x_vlan_mac_obj
  881. * @elem: bnx2x_exeq_elem
  882. * @rule_idx: rule_idx
  883. * @cam_offset: cam_offset
  884. */
  885. static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
  886. struct bnx2x_vlan_mac_obj *o,
  887. struct bnx2x_exeq_elem *elem, int rule_idx,
  888. int cam_offset)
  889. {
  890. struct bnx2x_raw_obj *raw = &o->raw;
  891. struct mac_configuration_cmd *config =
  892. (struct mac_configuration_cmd *)(raw->rdata);
  893. /* 57710 and 57711 do not support MOVE command,
  894. * so it's either ADD or DEL
  895. */
  896. bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
  897. true : false;
  898. /* Reset the ramrod data buffer */
  899. memset(config, 0, sizeof(*config));
  900. bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
  901. cam_offset, add,
  902. elem->cmd_data.vlan_mac.u.mac.mac, 0,
  903. ETH_VLAN_FILTER_ANY_VLAN, config);
  904. }
  905. static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
  906. struct bnx2x_vlan_mac_obj *o,
  907. struct bnx2x_exeq_elem *elem, int rule_idx,
  908. int cam_offset)
  909. {
  910. struct bnx2x_raw_obj *raw = &o->raw;
  911. struct eth_classify_rules_ramrod_data *data =
  912. (struct eth_classify_rules_ramrod_data *)(raw->rdata);
  913. int rule_cnt = rule_idx + 1;
  914. union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
  915. enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
  916. bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
  917. u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
  918. /* Reset the ramrod data buffer for the first rule */
  919. if (rule_idx == 0)
  920. memset(data, 0, sizeof(*data));
  921. /* Set a rule header */
  922. bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
  923. &rule_entry->vlan.header);
  924. DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
  925. vlan);
  926. /* Set a VLAN itself */
  927. rule_entry->vlan.vlan = cpu_to_le16(vlan);
  928. /* MOVE: Add a rule that will add this MAC to the target Queue */
  929. if (cmd == BNX2X_VLAN_MAC_MOVE) {
  930. rule_entry++;
  931. rule_cnt++;
  932. /* Setup ramrod data */
  933. bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
  934. elem->cmd_data.vlan_mac.target_obj,
  935. true, CLASSIFY_RULE_OPCODE_VLAN,
  936. &rule_entry->vlan.header);
  937. /* Set a VLAN itself */
  938. rule_entry->vlan.vlan = cpu_to_le16(vlan);
  939. }
  940. /* Set the ramrod data header */
  941. /* TODO: take this to the higher level in order to prevent multiple
  942. writing */
  943. bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
  944. rule_cnt);
  945. }
  946. static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
  947. struct bnx2x_vlan_mac_obj *o,
  948. struct bnx2x_exeq_elem *elem,
  949. int rule_idx, int cam_offset)
  950. {
  951. struct bnx2x_raw_obj *raw = &o->raw;
  952. struct eth_classify_rules_ramrod_data *data =
  953. (struct eth_classify_rules_ramrod_data *)(raw->rdata);
  954. int rule_cnt = rule_idx + 1;
  955. union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
  956. enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
  957. bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
  958. u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
  959. u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
  960. /* Reset the ramrod data buffer for the first rule */
  961. if (rule_idx == 0)
  962. memset(data, 0, sizeof(*data));
  963. /* Set a rule header */
  964. bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
  965. &rule_entry->pair.header);
  966. /* Set VLAN and MAC themselves */
  967. rule_entry->pair.vlan = cpu_to_le16(vlan);
  968. bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
  969. &rule_entry->pair.mac_mid,
  970. &rule_entry->pair.mac_lsb, mac);
  971. rule_entry->pair.inner_mac =
  972. cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
  973. /* MOVE: Add a rule that will add this MAC to the target Queue */
  974. if (cmd == BNX2X_VLAN_MAC_MOVE) {
  975. rule_entry++;
  976. rule_cnt++;
  977. /* Setup ramrod data */
  978. bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
  979. elem->cmd_data.vlan_mac.target_obj,
  980. true, CLASSIFY_RULE_OPCODE_PAIR,
  981. &rule_entry->pair.header);
  982. /* Set a VLAN itself */
  983. rule_entry->pair.vlan = cpu_to_le16(vlan);
  984. bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
  985. &rule_entry->pair.mac_mid,
  986. &rule_entry->pair.mac_lsb, mac);
  987. rule_entry->pair.inner_mac =
  988. cpu_to_le16(elem->cmd_data.vlan_mac.u.
  989. vlan_mac.is_inner_mac);
  990. }
  991. /* Set the ramrod data header */
  992. /* TODO: take this to the higher level in order to prevent multiple
  993. writing */
  994. bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
  995. rule_cnt);
  996. }
  997. /**
  998. * bnx2x_set_one_vlan_mac_e1h -
  999. *
  1000. * @bp: device handle
  1001. * @o: bnx2x_vlan_mac_obj
  1002. * @elem: bnx2x_exeq_elem
  1003. * @rule_idx: rule_idx
  1004. * @cam_offset: cam_offset
  1005. */
  1006. static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
  1007. struct bnx2x_vlan_mac_obj *o,
  1008. struct bnx2x_exeq_elem *elem,
  1009. int rule_idx, int cam_offset)
  1010. {
  1011. struct bnx2x_raw_obj *raw = &o->raw;
  1012. struct mac_configuration_cmd *config =
  1013. (struct mac_configuration_cmd *)(raw->rdata);
  1014. /* 57710 and 57711 do not support MOVE command,
  1015. * so it's either ADD or DEL
  1016. */
  1017. bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
  1018. true : false;
  1019. /* Reset the ramrod data buffer */
  1020. memset(config, 0, sizeof(*config));
  1021. bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
  1022. cam_offset, add,
  1023. elem->cmd_data.vlan_mac.u.vlan_mac.mac,
  1024. elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
  1025. ETH_VLAN_FILTER_CLASSIFY, config);
  1026. }
  1027. #define list_next_entry(pos, member) \
  1028. list_entry((pos)->member.next, typeof(*(pos)), member)
  1029. /**
  1030. * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  1031. *
  1032. * @bp: device handle
  1033. * @p: command parameters
  1034. * @ppos: pointer to the cookie
  1035. *
  1036. * reconfigure next MAC/VLAN/VLAN-MAC element from the
  1037. * previously configured elements list.
  1038. *
  1039. * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
  1040. * into an account
  1041. *
  1042. * pointer to the cookie - that should be given back in the next call to make
  1043. * function handle the next element. If *ppos is set to NULL it will restart the
  1044. * iterator. If returned *ppos == NULL this means that the last element has been
  1045. * handled.
  1046. *
  1047. */
  1048. static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
  1049. struct bnx2x_vlan_mac_ramrod_params *p,
  1050. struct bnx2x_vlan_mac_registry_elem **ppos)
  1051. {
  1052. struct bnx2x_vlan_mac_registry_elem *pos;
  1053. struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
  1054. /* If list is empty - there is nothing to do here */
  1055. if (list_empty(&o->head)) {
  1056. *ppos = NULL;
  1057. return 0;
  1058. }
  1059. /* make a step... */
  1060. if (*ppos == NULL)
  1061. *ppos = list_first_entry(&o->head,
  1062. struct bnx2x_vlan_mac_registry_elem,
  1063. link);
  1064. else
  1065. *ppos = list_next_entry(*ppos, link);
  1066. pos = *ppos;
  1067. /* If it's the last step - return NULL */
  1068. if (list_is_last(&pos->link, &o->head))
  1069. *ppos = NULL;
  1070. /* Prepare a 'user_req' */
  1071. memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
  1072. /* Set the command */
  1073. p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
  1074. /* Set vlan_mac_flags */
  1075. p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
  1076. /* Set a restore bit */
  1077. __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
  1078. return bnx2x_config_vlan_mac(bp, p);
  1079. }
  1080. /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
  1081. * pointer to an element with a specific criteria and NULL if such an element
  1082. * hasn't been found.
  1083. */
  1084. static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
  1085. struct bnx2x_exe_queue_obj *o,
  1086. struct bnx2x_exeq_elem *elem)
  1087. {
  1088. struct bnx2x_exeq_elem *pos;
  1089. struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
  1090. /* Check pending for execution commands */
  1091. list_for_each_entry(pos, &o->exe_queue, link)
  1092. if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
  1093. sizeof(*data)) &&
  1094. (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
  1095. return pos;
  1096. return NULL;
  1097. }
  1098. static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
  1099. struct bnx2x_exe_queue_obj *o,
  1100. struct bnx2x_exeq_elem *elem)
  1101. {
  1102. struct bnx2x_exeq_elem *pos;
  1103. struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
  1104. /* Check pending for execution commands */
  1105. list_for_each_entry(pos, &o->exe_queue, link)
  1106. if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
  1107. sizeof(*data)) &&
  1108. (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
  1109. return pos;
  1110. return NULL;
  1111. }
  1112. static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
  1113. struct bnx2x_exe_queue_obj *o,
  1114. struct bnx2x_exeq_elem *elem)
  1115. {
  1116. struct bnx2x_exeq_elem *pos;
  1117. struct bnx2x_vlan_mac_ramrod_data *data =
  1118. &elem->cmd_data.vlan_mac.u.vlan_mac;
  1119. /* Check pending for execution commands */
  1120. list_for_each_entry(pos, &o->exe_queue, link)
  1121. if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
  1122. sizeof(*data)) &&
  1123. (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
  1124. return pos;
  1125. return NULL;
  1126. }
  1127. /**
  1128. * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  1129. *
  1130. * @bp: device handle
  1131. * @qo: bnx2x_qable_obj
  1132. * @elem: bnx2x_exeq_elem
  1133. *
  1134. * Checks that the requested configuration can be added. If yes and if
  1135. * requested, consume CAM credit.
  1136. *
  1137. * The 'validate' is run after the 'optimize'.
  1138. *
  1139. */
  1140. static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
  1141. union bnx2x_qable_obj *qo,
  1142. struct bnx2x_exeq_elem *elem)
  1143. {
  1144. struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
  1145. struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
  1146. int rc;
  1147. /* Check the registry */
  1148. rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
  1149. if (rc) {
  1150. DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
  1151. return rc;
  1152. }
  1153. /* Check if there is a pending ADD command for this
  1154. * MAC/VLAN/VLAN-MAC. Return an error if there is.
  1155. */
  1156. if (exeq->get(exeq, elem)) {
  1157. DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
  1158. return -EEXIST;
  1159. }
  1160. /* TODO: Check the pending MOVE from other objects where this
  1161. * object is a destination object.
  1162. */
  1163. /* Consume the credit if not requested not to */
  1164. if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
  1165. &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
  1166. o->get_credit(o)))
  1167. return -EINVAL;
  1168. return 0;
  1169. }
  1170. /**
  1171. * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
  1172. *
  1173. * @bp: device handle
  1174. * @qo: quable object to check
  1175. * @elem: element that needs to be deleted
  1176. *
  1177. * Checks that the requested configuration can be deleted. If yes and if
  1178. * requested, returns a CAM credit.
  1179. *
  1180. * The 'validate' is run after the 'optimize'.
  1181. */
  1182. static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
  1183. union bnx2x_qable_obj *qo,
  1184. struct bnx2x_exeq_elem *elem)
  1185. {
  1186. struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
  1187. struct bnx2x_vlan_mac_registry_elem *pos;
  1188. struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
  1189. struct bnx2x_exeq_elem query_elem;
  1190. /* If this classification can not be deleted (doesn't exist)
  1191. * - return a BNX2X_EXIST.
  1192. */
  1193. pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
  1194. if (!pos) {
  1195. DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
  1196. return -EEXIST;
  1197. }
  1198. /* Check if there are pending DEL or MOVE commands for this
  1199. * MAC/VLAN/VLAN-MAC. Return an error if so.
  1200. */
  1201. memcpy(&query_elem, elem, sizeof(query_elem));
  1202. /* Check for MOVE commands */
  1203. query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
  1204. if (exeq->get(exeq, &query_elem)) {
  1205. BNX2X_ERR("There is a pending MOVE command already\n");
  1206. return -EINVAL;
  1207. }
  1208. /* Check for DEL commands */
  1209. if (exeq->get(exeq, elem)) {
  1210. DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
  1211. return -EEXIST;
  1212. }
  1213. /* Return the credit to the credit pool if not requested not to */
  1214. if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
  1215. &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
  1216. o->put_credit(o))) {
  1217. BNX2X_ERR("Failed to return a credit\n");
  1218. return -EINVAL;
  1219. }
  1220. return 0;
  1221. }
  1222. /**
  1223. * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
  1224. *
  1225. * @bp: device handle
  1226. * @qo: quable object to check (source)
  1227. * @elem: element that needs to be moved
  1228. *
  1229. * Checks that the requested configuration can be moved. If yes and if
  1230. * requested, returns a CAM credit.
  1231. *
  1232. * The 'validate' is run after the 'optimize'.
  1233. */
  1234. static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
  1235. union bnx2x_qable_obj *qo,
  1236. struct bnx2x_exeq_elem *elem)
  1237. {
  1238. struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
  1239. struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
  1240. struct bnx2x_exeq_elem query_elem;
  1241. struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
  1242. struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
  1243. /* Check if we can perform this operation based on the current registry
  1244. * state.
  1245. */
  1246. if (!src_o->check_move(bp, src_o, dest_o,
  1247. &elem->cmd_data.vlan_mac.u)) {
  1248. DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
  1249. return -EINVAL;
  1250. }
  1251. /* Check if there is an already pending DEL or MOVE command for the
  1252. * source object or ADD command for a destination object. Return an
  1253. * error if so.
  1254. */
  1255. memcpy(&query_elem, elem, sizeof(query_elem));
  1256. /* Check DEL on source */
  1257. query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
  1258. if (src_exeq->get(src_exeq, &query_elem)) {
  1259. BNX2X_ERR("There is a pending DEL command on the source queue already\n");
  1260. return -EINVAL;
  1261. }
  1262. /* Check MOVE on source */
  1263. if (src_exeq->get(src_exeq, elem)) {
  1264. DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
  1265. return -EEXIST;
  1266. }
  1267. /* Check ADD on destination */
  1268. query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
  1269. if (dest_exeq->get(dest_exeq, &query_elem)) {
  1270. BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
  1271. return -EINVAL;
  1272. }
  1273. /* Consume the credit if not requested not to */
  1274. if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
  1275. &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
  1276. dest_o->get_credit(dest_o)))
  1277. return -EINVAL;
  1278. if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
  1279. &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
  1280. src_o->put_credit(src_o))) {
  1281. /* return the credit taken from dest... */
  1282. dest_o->put_credit(dest_o);
  1283. return -EINVAL;
  1284. }
  1285. return 0;
  1286. }
  1287. static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
  1288. union bnx2x_qable_obj *qo,
  1289. struct bnx2x_exeq_elem *elem)
  1290. {
  1291. switch (elem->cmd_data.vlan_mac.cmd) {
  1292. case BNX2X_VLAN_MAC_ADD:
  1293. return bnx2x_validate_vlan_mac_add(bp, qo, elem);
  1294. case BNX2X_VLAN_MAC_DEL:
  1295. return bnx2x_validate_vlan_mac_del(bp, qo, elem);
  1296. case BNX2X_VLAN_MAC_MOVE:
  1297. return bnx2x_validate_vlan_mac_move(bp, qo, elem);
  1298. default:
  1299. return -EINVAL;
  1300. }
  1301. }
  1302. static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
  1303. union bnx2x_qable_obj *qo,
  1304. struct bnx2x_exeq_elem *elem)
  1305. {
  1306. int rc = 0;
  1307. /* If consumption wasn't required, nothing to do */
  1308. if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
  1309. &elem->cmd_data.vlan_mac.vlan_mac_flags))
  1310. return 0;
  1311. switch (elem->cmd_data.vlan_mac.cmd) {
  1312. case BNX2X_VLAN_MAC_ADD:
  1313. case BNX2X_VLAN_MAC_MOVE:
  1314. rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
  1315. break;
  1316. case BNX2X_VLAN_MAC_DEL:
  1317. rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
  1318. break;
  1319. default:
  1320. return -EINVAL;
  1321. }
  1322. if (rc != true)
  1323. return -EINVAL;
  1324. return 0;
  1325. }
  1326. /**
  1327. * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
  1328. *
  1329. * @bp: device handle
  1330. * @o: bnx2x_vlan_mac_obj
  1331. *
  1332. */
  1333. static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
  1334. struct bnx2x_vlan_mac_obj *o)
  1335. {
  1336. int cnt = 5000, rc;
  1337. struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
  1338. struct bnx2x_raw_obj *raw = &o->raw;
  1339. while (cnt--) {
  1340. /* Wait for the current command to complete */
  1341. rc = raw->wait_comp(bp, raw);
  1342. if (rc)
  1343. return rc;
  1344. /* Wait until there are no pending commands */
  1345. if (!bnx2x_exe_queue_empty(exeq))
  1346. usleep_range(1000, 2000);
  1347. else
  1348. return 0;
  1349. }
  1350. return -EBUSY;
  1351. }
  1352. static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
  1353. struct bnx2x_vlan_mac_obj *o,
  1354. unsigned long *ramrod_flags)
  1355. {
  1356. int rc = 0;
  1357. spin_lock_bh(&o->exe_queue.lock);
  1358. DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
  1359. rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
  1360. if (rc != 0) {
  1361. __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
  1362. /* Calling function should not diffrentiate between this case
  1363. * and the case in which there is already a pending ramrod
  1364. */
  1365. rc = 1;
  1366. } else {
  1367. rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
  1368. }
  1369. spin_unlock_bh(&o->exe_queue.lock);
  1370. return rc;
  1371. }
  1372. /**
  1373. * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
  1374. *
  1375. * @bp: device handle
  1376. * @o: bnx2x_vlan_mac_obj
  1377. * @cqe:
  1378. * @cont: if true schedule next execution chunk
  1379. *
  1380. */
  1381. static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
  1382. struct bnx2x_vlan_mac_obj *o,
  1383. union event_ring_elem *cqe,
  1384. unsigned long *ramrod_flags)
  1385. {
  1386. struct bnx2x_raw_obj *r = &o->raw;
  1387. int rc;
  1388. /* Clearing the pending list & raw state should be made
  1389. * atomically (as execution flow assumes they represent the same).
  1390. */
  1391. spin_lock_bh(&o->exe_queue.lock);
  1392. /* Reset pending list */
  1393. __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
  1394. /* Clear pending */
  1395. r->clear_pending(r);
  1396. spin_unlock_bh(&o->exe_queue.lock);
  1397. /* If ramrod failed this is most likely a SW bug */
  1398. if (cqe->message.error)
  1399. return -EINVAL;
  1400. /* Run the next bulk of pending commands if requested */
  1401. if (test_bit(RAMROD_CONT, ramrod_flags)) {
  1402. rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
  1403. if (rc < 0)
  1404. return rc;
  1405. }
  1406. /* If there is more work to do return PENDING */
  1407. if (!bnx2x_exe_queue_empty(&o->exe_queue))
  1408. return 1;
  1409. return 0;
  1410. }
  1411. /**
  1412. * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
  1413. *
  1414. * @bp: device handle
  1415. * @o: bnx2x_qable_obj
  1416. * @elem: bnx2x_exeq_elem
  1417. */
  1418. static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
  1419. union bnx2x_qable_obj *qo,
  1420. struct bnx2x_exeq_elem *elem)
  1421. {
  1422. struct bnx2x_exeq_elem query, *pos;
  1423. struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
  1424. struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
  1425. memcpy(&query, elem, sizeof(query));
  1426. switch (elem->cmd_data.vlan_mac.cmd) {
  1427. case BNX2X_VLAN_MAC_ADD:
  1428. query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
  1429. break;
  1430. case BNX2X_VLAN_MAC_DEL:
  1431. query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
  1432. break;
  1433. default:
  1434. /* Don't handle anything other than ADD or DEL */
  1435. return 0;
  1436. }
  1437. /* If we found the appropriate element - delete it */
  1438. pos = exeq->get(exeq, &query);
  1439. if (pos) {
  1440. /* Return the credit of the optimized command */
  1441. if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
  1442. &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
  1443. if ((query.cmd_data.vlan_mac.cmd ==
  1444. BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
  1445. BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
  1446. return -EINVAL;
  1447. } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
  1448. BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
  1449. return -EINVAL;
  1450. }
  1451. }
  1452. DP(BNX2X_MSG_SP, "Optimizing %s command\n",
  1453. (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
  1454. "ADD" : "DEL");
  1455. list_del(&pos->link);
  1456. bnx2x_exe_queue_free_elem(bp, pos);
  1457. return 1;
  1458. }
  1459. return 0;
  1460. }
  1461. /**
  1462. * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
  1463. *
  1464. * @bp: device handle
  1465. * @o:
  1466. * @elem:
  1467. * @restore:
  1468. * @re:
  1469. *
  1470. * prepare a registry element according to the current command request.
  1471. */
  1472. static inline int bnx2x_vlan_mac_get_registry_elem(
  1473. struct bnx2x *bp,
  1474. struct bnx2x_vlan_mac_obj *o,
  1475. struct bnx2x_exeq_elem *elem,
  1476. bool restore,
  1477. struct bnx2x_vlan_mac_registry_elem **re)
  1478. {
  1479. enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
  1480. struct bnx2x_vlan_mac_registry_elem *reg_elem;
  1481. /* Allocate a new registry element if needed. */
  1482. if (!restore &&
  1483. ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
  1484. reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
  1485. if (!reg_elem)
  1486. return -ENOMEM;
  1487. /* Get a new CAM offset */
  1488. if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
  1489. /* This shall never happen, because we have checked the
  1490. * CAM availability in the 'validate'.
  1491. */
  1492. WARN_ON(1);
  1493. kfree(reg_elem);
  1494. return -EINVAL;
  1495. }
  1496. DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
  1497. /* Set a VLAN-MAC data */
  1498. memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
  1499. sizeof(reg_elem->u));
  1500. /* Copy the flags (needed for DEL and RESTORE flows) */
  1501. reg_elem->vlan_mac_flags =
  1502. elem->cmd_data.vlan_mac.vlan_mac_flags;
  1503. } else /* DEL, RESTORE */
  1504. reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
  1505. *re = reg_elem;
  1506. return 0;
  1507. }
  1508. /**
  1509. * bnx2x_execute_vlan_mac - execute vlan mac command
  1510. *
  1511. * @bp: device handle
  1512. * @qo:
  1513. * @exe_chunk:
  1514. * @ramrod_flags:
  1515. *
  1516. * go and send a ramrod!
  1517. */
  1518. static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
  1519. union bnx2x_qable_obj *qo,
  1520. struct list_head *exe_chunk,
  1521. unsigned long *ramrod_flags)
  1522. {
  1523. struct bnx2x_exeq_elem *elem;
  1524. struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
  1525. struct bnx2x_raw_obj *r = &o->raw;
  1526. int rc, idx = 0;
  1527. bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
  1528. bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
  1529. struct bnx2x_vlan_mac_registry_elem *reg_elem;
  1530. enum bnx2x_vlan_mac_cmd cmd;
  1531. /* If DRIVER_ONLY execution is requested, cleanup a registry
  1532. * and exit. Otherwise send a ramrod to FW.
  1533. */
  1534. if (!drv_only) {
  1535. WARN_ON(r->check_pending(r));
  1536. /* Set pending */
  1537. r->set_pending(r);
  1538. /* Fill the ramrod data */
  1539. list_for_each_entry(elem, exe_chunk, link) {
  1540. cmd = elem->cmd_data.vlan_mac.cmd;
  1541. /* We will add to the target object in MOVE command, so
  1542. * change the object for a CAM search.
  1543. */
  1544. if (cmd == BNX2X_VLAN_MAC_MOVE)
  1545. cam_obj = elem->cmd_data.vlan_mac.target_obj;
  1546. else
  1547. cam_obj = o;
  1548. rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
  1549. elem, restore,
  1550. &reg_elem);
  1551. if (rc)
  1552. goto error_exit;
  1553. WARN_ON(!reg_elem);
  1554. /* Push a new entry into the registry */
  1555. if (!restore &&
  1556. ((cmd == BNX2X_VLAN_MAC_ADD) ||
  1557. (cmd == BNX2X_VLAN_MAC_MOVE)))
  1558. list_add(&reg_elem->link, &cam_obj->head);
  1559. /* Configure a single command in a ramrod data buffer */
  1560. o->set_one_rule(bp, o, elem, idx,
  1561. reg_elem->cam_offset);
  1562. /* MOVE command consumes 2 entries in the ramrod data */
  1563. if (cmd == BNX2X_VLAN_MAC_MOVE)
  1564. idx += 2;
  1565. else
  1566. idx++;
  1567. }
  1568. /* No need for an explicit memory barrier here as long we would
  1569. * need to ensure the ordering of writing to the SPQ element
  1570. * and updating of the SPQ producer which involves a memory
  1571. * read and we will have to put a full memory barrier there
  1572. * (inside bnx2x_sp_post()).
  1573. */
  1574. rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
  1575. U64_HI(r->rdata_mapping),
  1576. U64_LO(r->rdata_mapping),
  1577. ETH_CONNECTION_TYPE);
  1578. if (rc)
  1579. goto error_exit;
  1580. }
  1581. /* Now, when we are done with the ramrod - clean up the registry */
  1582. list_for_each_entry(elem, exe_chunk, link) {
  1583. cmd = elem->cmd_data.vlan_mac.cmd;
  1584. if ((cmd == BNX2X_VLAN_MAC_DEL) ||
  1585. (cmd == BNX2X_VLAN_MAC_MOVE)) {
  1586. reg_elem = o->check_del(bp, o,
  1587. &elem->cmd_data.vlan_mac.u);
  1588. WARN_ON(!reg_elem);
  1589. o->put_cam_offset(o, reg_elem->cam_offset);
  1590. list_del(&reg_elem->link);
  1591. kfree(reg_elem);
  1592. }
  1593. }
  1594. if (!drv_only)
  1595. return 1;
  1596. else
  1597. return 0;
  1598. error_exit:
  1599. r->clear_pending(r);
  1600. /* Cleanup a registry in case of a failure */
  1601. list_for_each_entry(elem, exe_chunk, link) {
  1602. cmd = elem->cmd_data.vlan_mac.cmd;
  1603. if (cmd == BNX2X_VLAN_MAC_MOVE)
  1604. cam_obj = elem->cmd_data.vlan_mac.target_obj;
  1605. else
  1606. cam_obj = o;
  1607. /* Delete all newly added above entries */
  1608. if (!restore &&
  1609. ((cmd == BNX2X_VLAN_MAC_ADD) ||
  1610. (cmd == BNX2X_VLAN_MAC_MOVE))) {
  1611. reg_elem = o->check_del(bp, cam_obj,
  1612. &elem->cmd_data.vlan_mac.u);
  1613. if (reg_elem) {
  1614. list_del(&reg_elem->link);
  1615. kfree(reg_elem);
  1616. }
  1617. }
  1618. }
  1619. return rc;
  1620. }
  1621. static inline int bnx2x_vlan_mac_push_new_cmd(
  1622. struct bnx2x *bp,
  1623. struct bnx2x_vlan_mac_ramrod_params *p)
  1624. {
  1625. struct bnx2x_exeq_elem *elem;
  1626. struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
  1627. bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
  1628. /* Allocate the execution queue element */
  1629. elem = bnx2x_exe_queue_alloc_elem(bp);
  1630. if (!elem)
  1631. return -ENOMEM;
  1632. /* Set the command 'length' */
  1633. switch (p->user_req.cmd) {
  1634. case BNX2X_VLAN_MAC_MOVE:
  1635. elem->cmd_len = 2;
  1636. break;
  1637. default:
  1638. elem->cmd_len = 1;
  1639. }
  1640. /* Fill the object specific info */
  1641. memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
  1642. /* Try to add a new command to the pending list */
  1643. return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
  1644. }
  1645. /**
  1646. * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
  1647. *
  1648. * @bp: device handle
  1649. * @p:
  1650. *
  1651. */
  1652. int bnx2x_config_vlan_mac(struct bnx2x *bp,
  1653. struct bnx2x_vlan_mac_ramrod_params *p)
  1654. {
  1655. int rc = 0;
  1656. struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
  1657. unsigned long *ramrod_flags = &p->ramrod_flags;
  1658. bool cont = test_bit(RAMROD_CONT, ramrod_flags);
  1659. struct bnx2x_raw_obj *raw = &o->raw;
  1660. /*
  1661. * Add new elements to the execution list for commands that require it.
  1662. */
  1663. if (!cont) {
  1664. rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
  1665. if (rc)
  1666. return rc;
  1667. }
  1668. /* If nothing will be executed further in this iteration we want to
  1669. * return PENDING if there are pending commands
  1670. */
  1671. if (!bnx2x_exe_queue_empty(&o->exe_queue))
  1672. rc = 1;
  1673. if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
  1674. DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
  1675. raw->clear_pending(raw);
  1676. }
  1677. /* Execute commands if required */
  1678. if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
  1679. test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
  1680. rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
  1681. &p->ramrod_flags);
  1682. if (rc < 0)
  1683. return rc;
  1684. }
  1685. /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
  1686. * then user want to wait until the last command is done.
  1687. */
  1688. if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
  1689. /* Wait maximum for the current exe_queue length iterations plus
  1690. * one (for the current pending command).
  1691. */
  1692. int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
  1693. while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
  1694. max_iterations--) {
  1695. /* Wait for the current command to complete */
  1696. rc = raw->wait_comp(bp, raw);
  1697. if (rc)
  1698. return rc;
  1699. /* Make a next step */
  1700. rc = __bnx2x_vlan_mac_execute_step(bp,
  1701. p->vlan_mac_obj,
  1702. &p->ramrod_flags);
  1703. if (rc < 0)
  1704. return rc;
  1705. }
  1706. return 0;
  1707. }
  1708. return rc;
  1709. }
  1710. /**
  1711. * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
  1712. *
  1713. * @bp: device handle
  1714. * @o:
  1715. * @vlan_mac_flags:
  1716. * @ramrod_flags: execution flags to be used for this deletion
  1717. *
  1718. * if the last operation has completed successfully and there are no
  1719. * more elements left, positive value if the last operation has completed
  1720. * successfully and there are more previously configured elements, negative
  1721. * value is current operation has failed.
  1722. */
  1723. static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
  1724. struct bnx2x_vlan_mac_obj *o,
  1725. unsigned long *vlan_mac_flags,
  1726. unsigned long *ramrod_flags)
  1727. {
  1728. struct bnx2x_vlan_mac_registry_elem *pos = NULL;
  1729. struct bnx2x_vlan_mac_ramrod_params p;
  1730. struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
  1731. struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
  1732. int read_lock;
  1733. int rc = 0;
  1734. /* Clear pending commands first */
  1735. spin_lock_bh(&exeq->lock);
  1736. list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
  1737. if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
  1738. *vlan_mac_flags) {
  1739. rc = exeq->remove(bp, exeq->owner, exeq_pos);
  1740. if (rc) {
  1741. BNX2X_ERR("Failed to remove command\n");
  1742. spin_unlock_bh(&exeq->lock);
  1743. return rc;
  1744. }
  1745. list_del(&exeq_pos->link);
  1746. bnx2x_exe_queue_free_elem(bp, exeq_pos);
  1747. }
  1748. }
  1749. spin_unlock_bh(&exeq->lock);
  1750. /* Prepare a command request */
  1751. memset(&p, 0, sizeof(p));
  1752. p.vlan_mac_obj = o;
  1753. p.ramrod_flags = *ramrod_flags;
  1754. p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
  1755. /* Add all but the last VLAN-MAC to the execution queue without actually
  1756. * execution anything.
  1757. */
  1758. __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
  1759. __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
  1760. __clear_bit(RAMROD_CONT, &p.ramrod_flags);
  1761. DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
  1762. read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
  1763. if (read_lock != 0)
  1764. return read_lock;
  1765. list_for_each_entry(pos, &o->head, link) {
  1766. if (pos->vlan_mac_flags == *vlan_mac_flags) {
  1767. p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
  1768. memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
  1769. rc = bnx2x_config_vlan_mac(bp, &p);
  1770. if (rc < 0) {
  1771. BNX2X_ERR("Failed to add a new DEL command\n");
  1772. bnx2x_vlan_mac_h_read_unlock(bp, o);
  1773. return rc;
  1774. }
  1775. }
  1776. }
  1777. DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
  1778. bnx2x_vlan_mac_h_read_unlock(bp, o);
  1779. p.ramrod_flags = *ramrod_flags;
  1780. __set_bit(RAMROD_CONT, &p.ramrod_flags);
  1781. return bnx2x_config_vlan_mac(bp, &p);
  1782. }
  1783. static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
  1784. u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
  1785. unsigned long *pstate, bnx2x_obj_type type)
  1786. {
  1787. raw->func_id = func_id;
  1788. raw->cid = cid;
  1789. raw->cl_id = cl_id;
  1790. raw->rdata = rdata;
  1791. raw->rdata_mapping = rdata_mapping;
  1792. raw->state = state;
  1793. raw->pstate = pstate;
  1794. raw->obj_type = type;
  1795. raw->check_pending = bnx2x_raw_check_pending;
  1796. raw->clear_pending = bnx2x_raw_clear_pending;
  1797. raw->set_pending = bnx2x_raw_set_pending;
  1798. raw->wait_comp = bnx2x_raw_wait;
  1799. }
  1800. static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
  1801. u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
  1802. int state, unsigned long *pstate, bnx2x_obj_type type,
  1803. struct bnx2x_credit_pool_obj *macs_pool,
  1804. struct bnx2x_credit_pool_obj *vlans_pool)
  1805. {
  1806. INIT_LIST_HEAD(&o->head);
  1807. o->head_reader = 0;
  1808. o->head_exe_request = false;
  1809. o->saved_ramrod_flags = 0;
  1810. o->macs_pool = macs_pool;
  1811. o->vlans_pool = vlans_pool;
  1812. o->delete_all = bnx2x_vlan_mac_del_all;
  1813. o->restore = bnx2x_vlan_mac_restore;
  1814. o->complete = bnx2x_complete_vlan_mac;
  1815. o->wait = bnx2x_wait_vlan_mac;
  1816. bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
  1817. state, pstate, type);
  1818. }
  1819. void bnx2x_init_mac_obj(struct bnx2x *bp,
  1820. struct bnx2x_vlan_mac_obj *mac_obj,
  1821. u8 cl_id, u32 cid, u8 func_id, void *rdata,
  1822. dma_addr_t rdata_mapping, int state,
  1823. unsigned long *pstate, bnx2x_obj_type type,
  1824. struct bnx2x_credit_pool_obj *macs_pool)
  1825. {
  1826. union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
  1827. bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
  1828. rdata_mapping, state, pstate, type,
  1829. macs_pool, NULL);
  1830. /* CAM credit pool handling */
  1831. mac_obj->get_credit = bnx2x_get_credit_mac;
  1832. mac_obj->put_credit = bnx2x_put_credit_mac;
  1833. mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
  1834. mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
  1835. if (CHIP_IS_E1x(bp)) {
  1836. mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
  1837. mac_obj->check_del = bnx2x_check_mac_del;
  1838. mac_obj->check_add = bnx2x_check_mac_add;
  1839. mac_obj->check_move = bnx2x_check_move_always_err;
  1840. mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
  1841. /* Exe Queue */
  1842. bnx2x_exe_queue_init(bp,
  1843. &mac_obj->exe_queue, 1, qable_obj,
  1844. bnx2x_validate_vlan_mac,
  1845. bnx2x_remove_vlan_mac,
  1846. bnx2x_optimize_vlan_mac,
  1847. bnx2x_execute_vlan_mac,
  1848. bnx2x_exeq_get_mac);
  1849. } else {
  1850. mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
  1851. mac_obj->check_del = bnx2x_check_mac_del;
  1852. mac_obj->check_add = bnx2x_check_mac_add;
  1853. mac_obj->check_move = bnx2x_check_move;
  1854. mac_obj->ramrod_cmd =
  1855. RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
  1856. mac_obj->get_n_elements = bnx2x_get_n_elements;
  1857. /* Exe Queue */
  1858. bnx2x_exe_queue_init(bp,
  1859. &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
  1860. qable_obj, bnx2x_validate_vlan_mac,
  1861. bnx2x_remove_vlan_mac,
  1862. bnx2x_optimize_vlan_mac,
  1863. bnx2x_execute_vlan_mac,
  1864. bnx2x_exeq_get_mac);
  1865. }
  1866. }
  1867. void bnx2x_init_vlan_obj(struct bnx2x *bp,
  1868. struct bnx2x_vlan_mac_obj *vlan_obj,
  1869. u8 cl_id, u32 cid, u8 func_id, void *rdata,
  1870. dma_addr_t rdata_mapping, int state,
  1871. unsigned long *pstate, bnx2x_obj_type type,
  1872. struct bnx2x_credit_pool_obj *vlans_pool)
  1873. {
  1874. union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
  1875. bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
  1876. rdata_mapping, state, pstate, type, NULL,
  1877. vlans_pool);
  1878. vlan_obj->get_credit = bnx2x_get_credit_vlan;
  1879. vlan_obj->put_credit = bnx2x_put_credit_vlan;
  1880. vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
  1881. vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
  1882. if (CHIP_IS_E1x(bp)) {
  1883. BNX2X_ERR("Do not support chips others than E2 and newer\n");
  1884. BUG();
  1885. } else {
  1886. vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
  1887. vlan_obj->check_del = bnx2x_check_vlan_del;
  1888. vlan_obj->check_add = bnx2x_check_vlan_add;
  1889. vlan_obj->check_move = bnx2x_check_move;
  1890. vlan_obj->ramrod_cmd =
  1891. RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
  1892. vlan_obj->get_n_elements = bnx2x_get_n_elements;
  1893. /* Exe Queue */
  1894. bnx2x_exe_queue_init(bp,
  1895. &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
  1896. qable_obj, bnx2x_validate_vlan_mac,
  1897. bnx2x_remove_vlan_mac,
  1898. bnx2x_optimize_vlan_mac,
  1899. bnx2x_execute_vlan_mac,
  1900. bnx2x_exeq_get_vlan);
  1901. }
  1902. }
  1903. void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
  1904. struct bnx2x_vlan_mac_obj *vlan_mac_obj,
  1905. u8 cl_id, u32 cid, u8 func_id, void *rdata,
  1906. dma_addr_t rdata_mapping, int state,
  1907. unsigned long *pstate, bnx2x_obj_type type,
  1908. struct bnx2x_credit_pool_obj *macs_pool,
  1909. struct bnx2x_credit_pool_obj *vlans_pool)
  1910. {
  1911. union bnx2x_qable_obj *qable_obj =
  1912. (union bnx2x_qable_obj *)vlan_mac_obj;
  1913. bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
  1914. rdata_mapping, state, pstate, type,
  1915. macs_pool, vlans_pool);
  1916. /* CAM pool handling */
  1917. vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
  1918. vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
  1919. /* CAM offset is relevant for 57710 and 57711 chips only which have a
  1920. * single CAM for both MACs and VLAN-MAC pairs. So the offset
  1921. * will be taken from MACs' pool object only.
  1922. */
  1923. vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
  1924. vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
  1925. if (CHIP_IS_E1(bp)) {
  1926. BNX2X_ERR("Do not support chips others than E2\n");
  1927. BUG();
  1928. } else if (CHIP_IS_E1H(bp)) {
  1929. vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
  1930. vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
  1931. vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
  1932. vlan_mac_obj->check_move = bnx2x_check_move_always_err;
  1933. vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
  1934. /* Exe Queue */
  1935. bnx2x_exe_queue_init(bp,
  1936. &vlan_mac_obj->exe_queue, 1, qable_obj,
  1937. bnx2x_validate_vlan_mac,
  1938. bnx2x_remove_vlan_mac,
  1939. bnx2x_optimize_vlan_mac,
  1940. bnx2x_execute_vlan_mac,
  1941. bnx2x_exeq_get_vlan_mac);
  1942. } else {
  1943. vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
  1944. vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
  1945. vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
  1946. vlan_mac_obj->check_move = bnx2x_check_move;
  1947. vlan_mac_obj->ramrod_cmd =
  1948. RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
  1949. /* Exe Queue */
  1950. bnx2x_exe_queue_init(bp,
  1951. &vlan_mac_obj->exe_queue,
  1952. CLASSIFY_RULES_COUNT,
  1953. qable_obj, bnx2x_validate_vlan_mac,
  1954. bnx2x_remove_vlan_mac,
  1955. bnx2x_optimize_vlan_mac,
  1956. bnx2x_execute_vlan_mac,
  1957. bnx2x_exeq_get_vlan_mac);
  1958. }
  1959. }
  1960. /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
  1961. static inline void __storm_memset_mac_filters(struct bnx2x *bp,
  1962. struct tstorm_eth_mac_filter_config *mac_filters,
  1963. u16 pf_id)
  1964. {
  1965. size_t size = sizeof(struct tstorm_eth_mac_filter_config);
  1966. u32 addr = BAR_TSTRORM_INTMEM +
  1967. TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
  1968. __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
  1969. }
  1970. static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
  1971. struct bnx2x_rx_mode_ramrod_params *p)
  1972. {
  1973. /* update the bp MAC filter structure */
  1974. u32 mask = (1 << p->cl_id);
  1975. struct tstorm_eth_mac_filter_config *mac_filters =
  1976. (struct tstorm_eth_mac_filter_config *)p->rdata;
  1977. /* initial setting is drop-all */
  1978. u8 drop_all_ucast = 1, drop_all_mcast = 1;
  1979. u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
  1980. u8 unmatched_unicast = 0;
  1981. /* In e1x there we only take into account rx accept flag since tx switching
  1982. * isn't enabled. */
  1983. if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
  1984. /* accept matched ucast */
  1985. drop_all_ucast = 0;
  1986. if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
  1987. /* accept matched mcast */
  1988. drop_all_mcast = 0;
  1989. if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
  1990. /* accept all mcast */
  1991. drop_all_ucast = 0;
  1992. accp_all_ucast = 1;
  1993. }
  1994. if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
  1995. /* accept all mcast */
  1996. drop_all_mcast = 0;
  1997. accp_all_mcast = 1;
  1998. }
  1999. if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
  2000. /* accept (all) bcast */
  2001. accp_all_bcast = 1;
  2002. if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
  2003. /* accept unmatched unicasts */
  2004. unmatched_unicast = 1;
  2005. mac_filters->ucast_drop_all = drop_all_ucast ?
  2006. mac_filters->ucast_drop_all | mask :
  2007. mac_filters->ucast_drop_all & ~mask;
  2008. mac_filters->mcast_drop_all = drop_all_mcast ?
  2009. mac_filters->mcast_drop_all | mask :
  2010. mac_filters->mcast_drop_all & ~mask;
  2011. mac_filters->ucast_accept_all = accp_all_ucast ?
  2012. mac_filters->ucast_accept_all | mask :
  2013. mac_filters->ucast_accept_all & ~mask;
  2014. mac_filters->mcast_accept_all = accp_all_mcast ?
  2015. mac_filters->mcast_accept_all | mask :
  2016. mac_filters->mcast_accept_all & ~mask;
  2017. mac_filters->bcast_accept_all = accp_all_bcast ?
  2018. mac_filters->bcast_accept_all | mask :
  2019. mac_filters->bcast_accept_all & ~mask;
  2020. mac_filters->unmatched_unicast = unmatched_unicast ?
  2021. mac_filters->unmatched_unicast | mask :
  2022. mac_filters->unmatched_unicast & ~mask;
  2023. DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
  2024. "accp_mcast 0x%x\naccp_bcast 0x%x\n",
  2025. mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
  2026. mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
  2027. mac_filters->bcast_accept_all);
  2028. /* write the MAC filter structure*/
  2029. __storm_memset_mac_filters(bp, mac_filters, p->func_id);
  2030. /* The operation is completed */
  2031. clear_bit(p->state, p->pstate);
  2032. smp_mb__after_clear_bit();
  2033. return 0;
  2034. }
  2035. /* Setup ramrod data */
  2036. static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
  2037. struct eth_classify_header *hdr,
  2038. u8 rule_cnt)
  2039. {
  2040. hdr->echo = cpu_to_le32(cid);
  2041. hdr->rule_cnt = rule_cnt;
  2042. }
  2043. static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
  2044. unsigned long *accept_flags,
  2045. struct eth_filter_rules_cmd *cmd,
  2046. bool clear_accept_all)
  2047. {
  2048. u16 state;
  2049. /* start with 'drop-all' */
  2050. state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
  2051. ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
  2052. if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
  2053. state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
  2054. if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
  2055. state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
  2056. if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
  2057. state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
  2058. state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
  2059. }
  2060. if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
  2061. state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
  2062. state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
  2063. }
  2064. if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
  2065. state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
  2066. if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
  2067. state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
  2068. state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
  2069. }
  2070. if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
  2071. state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
  2072. /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
  2073. if (clear_accept_all) {
  2074. state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
  2075. state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
  2076. state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
  2077. state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
  2078. }
  2079. cmd->state = cpu_to_le16(state);
  2080. }
  2081. static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
  2082. struct bnx2x_rx_mode_ramrod_params *p)
  2083. {
  2084. struct eth_filter_rules_ramrod_data *data = p->rdata;
  2085. int rc;
  2086. u8 rule_idx = 0;
  2087. /* Reset the ramrod data buffer */
  2088. memset(data, 0, sizeof(*data));
  2089. /* Setup ramrod data */
  2090. /* Tx (internal switching) */
  2091. if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
  2092. data->rules[rule_idx].client_id = p->cl_id;
  2093. data->rules[rule_idx].func_id = p->func_id;
  2094. data->rules[rule_idx].cmd_general_data =
  2095. ETH_FILTER_RULES_CMD_TX_CMD;
  2096. bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
  2097. &(data->rules[rule_idx++]),
  2098. false);
  2099. }
  2100. /* Rx */
  2101. if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
  2102. data->rules[rule_idx].client_id = p->cl_id;
  2103. data->rules[rule_idx].func_id = p->func_id;
  2104. data->rules[rule_idx].cmd_general_data =
  2105. ETH_FILTER_RULES_CMD_RX_CMD;
  2106. bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
  2107. &(data->rules[rule_idx++]),
  2108. false);
  2109. }
  2110. /* If FCoE Queue configuration has been requested configure the Rx and
  2111. * internal switching modes for this queue in separate rules.
  2112. *
  2113. * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
  2114. * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
  2115. */
  2116. if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
  2117. /* Tx (internal switching) */
  2118. if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
  2119. data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
  2120. data->rules[rule_idx].func_id = p->func_id;
  2121. data->rules[rule_idx].cmd_general_data =
  2122. ETH_FILTER_RULES_CMD_TX_CMD;
  2123. bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
  2124. &(data->rules[rule_idx]),
  2125. true);
  2126. rule_idx++;
  2127. }
  2128. /* Rx */
  2129. if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
  2130. data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
  2131. data->rules[rule_idx].func_id = p->func_id;
  2132. data->rules[rule_idx].cmd_general_data =
  2133. ETH_FILTER_RULES_CMD_RX_CMD;
  2134. bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
  2135. &(data->rules[rule_idx]),
  2136. true);
  2137. rule_idx++;
  2138. }
  2139. }
  2140. /* Set the ramrod header (most importantly - number of rules to
  2141. * configure).
  2142. */
  2143. bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
  2144. DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
  2145. data->header.rule_cnt, p->rx_accept_flags,
  2146. p->tx_accept_flags);
  2147. /* No need for an explicit memory barrier here as long we would
  2148. * need to ensure the ordering of writing to the SPQ element
  2149. * and updating of the SPQ producer which involves a memory
  2150. * read and we will have to put a full memory barrier there
  2151. * (inside bnx2x_sp_post()).
  2152. */
  2153. /* Send a ramrod */
  2154. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
  2155. U64_HI(p->rdata_mapping),
  2156. U64_LO(p->rdata_mapping),
  2157. ETH_CONNECTION_TYPE);
  2158. if (rc)
  2159. return rc;
  2160. /* Ramrod completion is pending */
  2161. return 1;
  2162. }
  2163. static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
  2164. struct bnx2x_rx_mode_ramrod_params *p)
  2165. {
  2166. return bnx2x_state_wait(bp, p->state, p->pstate);
  2167. }
  2168. static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
  2169. struct bnx2x_rx_mode_ramrod_params *p)
  2170. {
  2171. /* Do nothing */
  2172. return 0;
  2173. }
  2174. int bnx2x_config_rx_mode(struct bnx2x *bp,
  2175. struct bnx2x_rx_mode_ramrod_params *p)
  2176. {
  2177. int rc;
  2178. /* Configure the new classification in the chip */
  2179. rc = p->rx_mode_obj->config_rx_mode(bp, p);
  2180. if (rc < 0)
  2181. return rc;
  2182. /* Wait for a ramrod completion if was requested */
  2183. if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
  2184. rc = p->rx_mode_obj->wait_comp(bp, p);
  2185. if (rc)
  2186. return rc;
  2187. }
  2188. return rc;
  2189. }
  2190. void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
  2191. struct bnx2x_rx_mode_obj *o)
  2192. {
  2193. if (CHIP_IS_E1x(bp)) {
  2194. o->wait_comp = bnx2x_empty_rx_mode_wait;
  2195. o->config_rx_mode = bnx2x_set_rx_mode_e1x;
  2196. } else {
  2197. o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
  2198. o->config_rx_mode = bnx2x_set_rx_mode_e2;
  2199. }
  2200. }
  2201. /********************* Multicast verbs: SET, CLEAR ****************************/
  2202. static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
  2203. {
  2204. return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
  2205. }
  2206. struct bnx2x_mcast_mac_elem {
  2207. struct list_head link;
  2208. u8 mac[ETH_ALEN];
  2209. u8 pad[2]; /* For a natural alignment of the following buffer */
  2210. };
  2211. struct bnx2x_pending_mcast_cmd {
  2212. struct list_head link;
  2213. int type; /* BNX2X_MCAST_CMD_X */
  2214. union {
  2215. struct list_head macs_head;
  2216. u32 macs_num; /* Needed for DEL command */
  2217. int next_bin; /* Needed for RESTORE flow with aprox match */
  2218. } data;
  2219. bool done; /* set to true, when the command has been handled,
  2220. * practically used in 57712 handling only, where one pending
  2221. * command may be handled in a few operations. As long as for
  2222. * other chips every operation handling is completed in a
  2223. * single ramrod, there is no need to utilize this field.
  2224. */
  2225. };
  2226. static int bnx2x_mcast_wait(struct bnx2x *bp,
  2227. struct bnx2x_mcast_obj *o)
  2228. {
  2229. if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
  2230. o->raw.wait_comp(bp, &o->raw))
  2231. return -EBUSY;
  2232. return 0;
  2233. }
  2234. static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
  2235. struct bnx2x_mcast_obj *o,
  2236. struct bnx2x_mcast_ramrod_params *p,
  2237. enum bnx2x_mcast_cmd cmd)
  2238. {
  2239. int total_sz;
  2240. struct bnx2x_pending_mcast_cmd *new_cmd;
  2241. struct bnx2x_mcast_mac_elem *cur_mac = NULL;
  2242. struct bnx2x_mcast_list_elem *pos;
  2243. int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
  2244. p->mcast_list_len : 0);
  2245. /* If the command is empty ("handle pending commands only"), break */
  2246. if (!p->mcast_list_len)
  2247. return 0;
  2248. total_sz = sizeof(*new_cmd) +
  2249. macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
  2250. /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
  2251. new_cmd = kzalloc(total_sz, GFP_ATOMIC);
  2252. if (!new_cmd)
  2253. return -ENOMEM;
  2254. DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
  2255. cmd, macs_list_len);
  2256. INIT_LIST_HEAD(&new_cmd->data.macs_head);
  2257. new_cmd->type = cmd;
  2258. new_cmd->done = false;
  2259. switch (cmd) {
  2260. case BNX2X_MCAST_CMD_ADD:
  2261. cur_mac = (struct bnx2x_mcast_mac_elem *)
  2262. ((u8 *)new_cmd + sizeof(*new_cmd));
  2263. /* Push the MACs of the current command into the pending command
  2264. * MACs list: FIFO
  2265. */
  2266. list_for_each_entry(pos, &p->mcast_list, link) {
  2267. memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
  2268. list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
  2269. cur_mac++;
  2270. }
  2271. break;
  2272. case BNX2X_MCAST_CMD_DEL:
  2273. new_cmd->data.macs_num = p->mcast_list_len;
  2274. break;
  2275. case BNX2X_MCAST_CMD_RESTORE:
  2276. new_cmd->data.next_bin = 0;
  2277. break;
  2278. default:
  2279. kfree(new_cmd);
  2280. BNX2X_ERR("Unknown command: %d\n", cmd);
  2281. return -EINVAL;
  2282. }
  2283. /* Push the new pending command to the tail of the pending list: FIFO */
  2284. list_add_tail(&new_cmd->link, &o->pending_cmds_head);
  2285. o->set_sched(o);
  2286. return 1;
  2287. }
  2288. /**
  2289. * bnx2x_mcast_get_next_bin - get the next set bin (index)
  2290. *
  2291. * @o:
  2292. * @last: index to start looking from (including)
  2293. *
  2294. * returns the next found (set) bin or a negative value if none is found.
  2295. */
  2296. static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
  2297. {
  2298. int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
  2299. for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
  2300. if (o->registry.aprox_match.vec[i])
  2301. for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
  2302. int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
  2303. if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
  2304. vec, cur_bit)) {
  2305. return cur_bit;
  2306. }
  2307. }
  2308. inner_start = 0;
  2309. }
  2310. /* None found */
  2311. return -1;
  2312. }
  2313. /**
  2314. * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
  2315. *
  2316. * @o:
  2317. *
  2318. * returns the index of the found bin or -1 if none is found
  2319. */
  2320. static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
  2321. {
  2322. int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
  2323. if (cur_bit >= 0)
  2324. BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
  2325. return cur_bit;
  2326. }
  2327. static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
  2328. {
  2329. struct bnx2x_raw_obj *raw = &o->raw;
  2330. u8 rx_tx_flag = 0;
  2331. if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
  2332. (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
  2333. rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
  2334. if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
  2335. (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
  2336. rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
  2337. return rx_tx_flag;
  2338. }
  2339. static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
  2340. struct bnx2x_mcast_obj *o, int idx,
  2341. union bnx2x_mcast_config_data *cfg_data,
  2342. enum bnx2x_mcast_cmd cmd)
  2343. {
  2344. struct bnx2x_raw_obj *r = &o->raw;
  2345. struct eth_multicast_rules_ramrod_data *data =
  2346. (struct eth_multicast_rules_ramrod_data *)(r->rdata);
  2347. u8 func_id = r->func_id;
  2348. u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
  2349. int bin;
  2350. if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
  2351. rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
  2352. data->rules[idx].cmd_general_data |= rx_tx_add_flag;
  2353. /* Get a bin and update a bins' vector */
  2354. switch (cmd) {
  2355. case BNX2X_MCAST_CMD_ADD:
  2356. bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
  2357. BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
  2358. break;
  2359. case BNX2X_MCAST_CMD_DEL:
  2360. /* If there were no more bins to clear
  2361. * (bnx2x_mcast_clear_first_bin() returns -1) then we would
  2362. * clear any (0xff) bin.
  2363. * See bnx2x_mcast_validate_e2() for explanation when it may
  2364. * happen.
  2365. */
  2366. bin = bnx2x_mcast_clear_first_bin(o);
  2367. break;
  2368. case BNX2X_MCAST_CMD_RESTORE:
  2369. bin = cfg_data->bin;
  2370. break;
  2371. default:
  2372. BNX2X_ERR("Unknown command: %d\n", cmd);
  2373. return;
  2374. }
  2375. DP(BNX2X_MSG_SP, "%s bin %d\n",
  2376. ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
  2377. "Setting" : "Clearing"), bin);
  2378. data->rules[idx].bin_id = (u8)bin;
  2379. data->rules[idx].func_id = func_id;
  2380. data->rules[idx].engine_id = o->engine_id;
  2381. }
  2382. /**
  2383. * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
  2384. *
  2385. * @bp: device handle
  2386. * @o:
  2387. * @start_bin: index in the registry to start from (including)
  2388. * @rdata_idx: index in the ramrod data to start from
  2389. *
  2390. * returns last handled bin index or -1 if all bins have been handled
  2391. */
  2392. static inline int bnx2x_mcast_handle_restore_cmd_e2(
  2393. struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
  2394. int *rdata_idx)
  2395. {
  2396. int cur_bin, cnt = *rdata_idx;
  2397. union bnx2x_mcast_config_data cfg_data = {NULL};
  2398. /* go through the registry and configure the bins from it */
  2399. for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
  2400. cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
  2401. cfg_data.bin = (u8)cur_bin;
  2402. o->set_one_rule(bp, o, cnt, &cfg_data,
  2403. BNX2X_MCAST_CMD_RESTORE);
  2404. cnt++;
  2405. DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
  2406. /* Break if we reached the maximum number
  2407. * of rules.
  2408. */
  2409. if (cnt >= o->max_cmd_len)
  2410. break;
  2411. }
  2412. *rdata_idx = cnt;
  2413. return cur_bin;
  2414. }
  2415. static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
  2416. struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
  2417. int *line_idx)
  2418. {
  2419. struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
  2420. int cnt = *line_idx;
  2421. union bnx2x_mcast_config_data cfg_data = {NULL};
  2422. list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
  2423. link) {
  2424. cfg_data.mac = &pmac_pos->mac[0];
  2425. o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
  2426. cnt++;
  2427. DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
  2428. pmac_pos->mac);
  2429. list_del(&pmac_pos->link);
  2430. /* Break if we reached the maximum number
  2431. * of rules.
  2432. */
  2433. if (cnt >= o->max_cmd_len)
  2434. break;
  2435. }
  2436. *line_idx = cnt;
  2437. /* if no more MACs to configure - we are done */
  2438. if (list_empty(&cmd_pos->data.macs_head))
  2439. cmd_pos->done = true;
  2440. }
  2441. static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
  2442. struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
  2443. int *line_idx)
  2444. {
  2445. int cnt = *line_idx;
  2446. while (cmd_pos->data.macs_num) {
  2447. o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
  2448. cnt++;
  2449. cmd_pos->data.macs_num--;
  2450. DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
  2451. cmd_pos->data.macs_num, cnt);
  2452. /* Break if we reached the maximum
  2453. * number of rules.
  2454. */
  2455. if (cnt >= o->max_cmd_len)
  2456. break;
  2457. }
  2458. *line_idx = cnt;
  2459. /* If we cleared all bins - we are done */
  2460. if (!cmd_pos->data.macs_num)
  2461. cmd_pos->done = true;
  2462. }
  2463. static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
  2464. struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
  2465. int *line_idx)
  2466. {
  2467. cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
  2468. line_idx);
  2469. if (cmd_pos->data.next_bin < 0)
  2470. /* If o->set_restore returned -1 we are done */
  2471. cmd_pos->done = true;
  2472. else
  2473. /* Start from the next bin next time */
  2474. cmd_pos->data.next_bin++;
  2475. }
  2476. static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
  2477. struct bnx2x_mcast_ramrod_params *p)
  2478. {
  2479. struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
  2480. int cnt = 0;
  2481. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2482. list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
  2483. link) {
  2484. switch (cmd_pos->type) {
  2485. case BNX2X_MCAST_CMD_ADD:
  2486. bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
  2487. break;
  2488. case BNX2X_MCAST_CMD_DEL:
  2489. bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
  2490. break;
  2491. case BNX2X_MCAST_CMD_RESTORE:
  2492. bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
  2493. &cnt);
  2494. break;
  2495. default:
  2496. BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
  2497. return -EINVAL;
  2498. }
  2499. /* If the command has been completed - remove it from the list
  2500. * and free the memory
  2501. */
  2502. if (cmd_pos->done) {
  2503. list_del(&cmd_pos->link);
  2504. kfree(cmd_pos);
  2505. }
  2506. /* Break if we reached the maximum number of rules */
  2507. if (cnt >= o->max_cmd_len)
  2508. break;
  2509. }
  2510. return cnt;
  2511. }
  2512. static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
  2513. struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
  2514. int *line_idx)
  2515. {
  2516. struct bnx2x_mcast_list_elem *mlist_pos;
  2517. union bnx2x_mcast_config_data cfg_data = {NULL};
  2518. int cnt = *line_idx;
  2519. list_for_each_entry(mlist_pos, &p->mcast_list, link) {
  2520. cfg_data.mac = mlist_pos->mac;
  2521. o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
  2522. cnt++;
  2523. DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
  2524. mlist_pos->mac);
  2525. }
  2526. *line_idx = cnt;
  2527. }
  2528. static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
  2529. struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
  2530. int *line_idx)
  2531. {
  2532. int cnt = *line_idx, i;
  2533. for (i = 0; i < p->mcast_list_len; i++) {
  2534. o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
  2535. cnt++;
  2536. DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
  2537. p->mcast_list_len - i - 1);
  2538. }
  2539. *line_idx = cnt;
  2540. }
  2541. /**
  2542. * bnx2x_mcast_handle_current_cmd -
  2543. *
  2544. * @bp: device handle
  2545. * @p:
  2546. * @cmd:
  2547. * @start_cnt: first line in the ramrod data that may be used
  2548. *
  2549. * This function is called iff there is enough place for the current command in
  2550. * the ramrod data.
  2551. * Returns number of lines filled in the ramrod data in total.
  2552. */
  2553. static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
  2554. struct bnx2x_mcast_ramrod_params *p,
  2555. enum bnx2x_mcast_cmd cmd,
  2556. int start_cnt)
  2557. {
  2558. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2559. int cnt = start_cnt;
  2560. DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
  2561. switch (cmd) {
  2562. case BNX2X_MCAST_CMD_ADD:
  2563. bnx2x_mcast_hdl_add(bp, o, p, &cnt);
  2564. break;
  2565. case BNX2X_MCAST_CMD_DEL:
  2566. bnx2x_mcast_hdl_del(bp, o, p, &cnt);
  2567. break;
  2568. case BNX2X_MCAST_CMD_RESTORE:
  2569. o->hdl_restore(bp, o, 0, &cnt);
  2570. break;
  2571. default:
  2572. BNX2X_ERR("Unknown command: %d\n", cmd);
  2573. return -EINVAL;
  2574. }
  2575. /* The current command has been handled */
  2576. p->mcast_list_len = 0;
  2577. return cnt;
  2578. }
  2579. static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
  2580. struct bnx2x_mcast_ramrod_params *p,
  2581. enum bnx2x_mcast_cmd cmd)
  2582. {
  2583. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2584. int reg_sz = o->get_registry_size(o);
  2585. switch (cmd) {
  2586. /* DEL command deletes all currently configured MACs */
  2587. case BNX2X_MCAST_CMD_DEL:
  2588. o->set_registry_size(o, 0);
  2589. /* Don't break */
  2590. /* RESTORE command will restore the entire multicast configuration */
  2591. case BNX2X_MCAST_CMD_RESTORE:
  2592. /* Here we set the approximate amount of work to do, which in
  2593. * fact may be only less as some MACs in postponed ADD
  2594. * command(s) scheduled before this command may fall into
  2595. * the same bin and the actual number of bins set in the
  2596. * registry would be less than we estimated here. See
  2597. * bnx2x_mcast_set_one_rule_e2() for further details.
  2598. */
  2599. p->mcast_list_len = reg_sz;
  2600. break;
  2601. case BNX2X_MCAST_CMD_ADD:
  2602. case BNX2X_MCAST_CMD_CONT:
  2603. /* Here we assume that all new MACs will fall into new bins.
  2604. * However we will correct the real registry size after we
  2605. * handle all pending commands.
  2606. */
  2607. o->set_registry_size(o, reg_sz + p->mcast_list_len);
  2608. break;
  2609. default:
  2610. BNX2X_ERR("Unknown command: %d\n", cmd);
  2611. return -EINVAL;
  2612. }
  2613. /* Increase the total number of MACs pending to be configured */
  2614. o->total_pending_num += p->mcast_list_len;
  2615. return 0;
  2616. }
  2617. static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
  2618. struct bnx2x_mcast_ramrod_params *p,
  2619. int old_num_bins)
  2620. {
  2621. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2622. o->set_registry_size(o, old_num_bins);
  2623. o->total_pending_num -= p->mcast_list_len;
  2624. }
  2625. /**
  2626. * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
  2627. *
  2628. * @bp: device handle
  2629. * @p:
  2630. * @len: number of rules to handle
  2631. */
  2632. static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
  2633. struct bnx2x_mcast_ramrod_params *p,
  2634. u8 len)
  2635. {
  2636. struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
  2637. struct eth_multicast_rules_ramrod_data *data =
  2638. (struct eth_multicast_rules_ramrod_data *)(r->rdata);
  2639. data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
  2640. (BNX2X_FILTER_MCAST_PENDING <<
  2641. BNX2X_SWCID_SHIFT));
  2642. data->header.rule_cnt = len;
  2643. }
  2644. /**
  2645. * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
  2646. *
  2647. * @bp: device handle
  2648. * @o:
  2649. *
  2650. * Recalculate the actual number of set bins in the registry using Brian
  2651. * Kernighan's algorithm: it's execution complexity is as a number of set bins.
  2652. *
  2653. * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
  2654. */
  2655. static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
  2656. struct bnx2x_mcast_obj *o)
  2657. {
  2658. int i, cnt = 0;
  2659. u64 elem;
  2660. for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
  2661. elem = o->registry.aprox_match.vec[i];
  2662. for (; elem; cnt++)
  2663. elem &= elem - 1;
  2664. }
  2665. o->set_registry_size(o, cnt);
  2666. return 0;
  2667. }
  2668. static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
  2669. struct bnx2x_mcast_ramrod_params *p,
  2670. enum bnx2x_mcast_cmd cmd)
  2671. {
  2672. struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
  2673. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2674. struct eth_multicast_rules_ramrod_data *data =
  2675. (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
  2676. int cnt = 0, rc;
  2677. /* Reset the ramrod data buffer */
  2678. memset(data, 0, sizeof(*data));
  2679. cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
  2680. /* If there are no more pending commands - clear SCHEDULED state */
  2681. if (list_empty(&o->pending_cmds_head))
  2682. o->clear_sched(o);
  2683. /* The below may be true iff there was enough room in ramrod
  2684. * data for all pending commands and for the current
  2685. * command. Otherwise the current command would have been added
  2686. * to the pending commands and p->mcast_list_len would have been
  2687. * zeroed.
  2688. */
  2689. if (p->mcast_list_len > 0)
  2690. cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
  2691. /* We've pulled out some MACs - update the total number of
  2692. * outstanding.
  2693. */
  2694. o->total_pending_num -= cnt;
  2695. /* send a ramrod */
  2696. WARN_ON(o->total_pending_num < 0);
  2697. WARN_ON(cnt > o->max_cmd_len);
  2698. bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
  2699. /* Update a registry size if there are no more pending operations.
  2700. *
  2701. * We don't want to change the value of the registry size if there are
  2702. * pending operations because we want it to always be equal to the
  2703. * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
  2704. * set bins after the last requested operation in order to properly
  2705. * evaluate the size of the next DEL/RESTORE operation.
  2706. *
  2707. * Note that we update the registry itself during command(s) handling
  2708. * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
  2709. * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
  2710. * with a limited amount of update commands (per MAC/bin) and we don't
  2711. * know in this scope what the actual state of bins configuration is
  2712. * going to be after this ramrod.
  2713. */
  2714. if (!o->total_pending_num)
  2715. bnx2x_mcast_refresh_registry_e2(bp, o);
  2716. /* If CLEAR_ONLY was requested - don't send a ramrod and clear
  2717. * RAMROD_PENDING status immediately.
  2718. */
  2719. if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
  2720. raw->clear_pending(raw);
  2721. return 0;
  2722. } else {
  2723. /* No need for an explicit memory barrier here as long we would
  2724. * need to ensure the ordering of writing to the SPQ element
  2725. * and updating of the SPQ producer which involves a memory
  2726. * read and we will have to put a full memory barrier there
  2727. * (inside bnx2x_sp_post()).
  2728. */
  2729. /* Send a ramrod */
  2730. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
  2731. raw->cid, U64_HI(raw->rdata_mapping),
  2732. U64_LO(raw->rdata_mapping),
  2733. ETH_CONNECTION_TYPE);
  2734. if (rc)
  2735. return rc;
  2736. /* Ramrod completion is pending */
  2737. return 1;
  2738. }
  2739. }
  2740. static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
  2741. struct bnx2x_mcast_ramrod_params *p,
  2742. enum bnx2x_mcast_cmd cmd)
  2743. {
  2744. /* Mark, that there is a work to do */
  2745. if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
  2746. p->mcast_list_len = 1;
  2747. return 0;
  2748. }
  2749. static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
  2750. struct bnx2x_mcast_ramrod_params *p,
  2751. int old_num_bins)
  2752. {
  2753. /* Do nothing */
  2754. }
  2755. #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
  2756. do { \
  2757. (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
  2758. } while (0)
  2759. static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
  2760. struct bnx2x_mcast_obj *o,
  2761. struct bnx2x_mcast_ramrod_params *p,
  2762. u32 *mc_filter)
  2763. {
  2764. struct bnx2x_mcast_list_elem *mlist_pos;
  2765. int bit;
  2766. list_for_each_entry(mlist_pos, &p->mcast_list, link) {
  2767. bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
  2768. BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
  2769. DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
  2770. mlist_pos->mac, bit);
  2771. /* bookkeeping... */
  2772. BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
  2773. bit);
  2774. }
  2775. }
  2776. static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
  2777. struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
  2778. u32 *mc_filter)
  2779. {
  2780. int bit;
  2781. for (bit = bnx2x_mcast_get_next_bin(o, 0);
  2782. bit >= 0;
  2783. bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
  2784. BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
  2785. DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
  2786. }
  2787. }
  2788. /* On 57711 we write the multicast MACs' approximate match
  2789. * table by directly into the TSTORM's internal RAM. So we don't
  2790. * really need to handle any tricks to make it work.
  2791. */
  2792. static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
  2793. struct bnx2x_mcast_ramrod_params *p,
  2794. enum bnx2x_mcast_cmd cmd)
  2795. {
  2796. int i;
  2797. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2798. struct bnx2x_raw_obj *r = &o->raw;
  2799. /* If CLEAR_ONLY has been requested - clear the registry
  2800. * and clear a pending bit.
  2801. */
  2802. if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
  2803. u32 mc_filter[MC_HASH_SIZE] = {0};
  2804. /* Set the multicast filter bits before writing it into
  2805. * the internal memory.
  2806. */
  2807. switch (cmd) {
  2808. case BNX2X_MCAST_CMD_ADD:
  2809. bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
  2810. break;
  2811. case BNX2X_MCAST_CMD_DEL:
  2812. DP(BNX2X_MSG_SP,
  2813. "Invalidating multicast MACs configuration\n");
  2814. /* clear the registry */
  2815. memset(o->registry.aprox_match.vec, 0,
  2816. sizeof(o->registry.aprox_match.vec));
  2817. break;
  2818. case BNX2X_MCAST_CMD_RESTORE:
  2819. bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
  2820. break;
  2821. default:
  2822. BNX2X_ERR("Unknown command: %d\n", cmd);
  2823. return -EINVAL;
  2824. }
  2825. /* Set the mcast filter in the internal memory */
  2826. for (i = 0; i < MC_HASH_SIZE; i++)
  2827. REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
  2828. } else
  2829. /* clear the registry */
  2830. memset(o->registry.aprox_match.vec, 0,
  2831. sizeof(o->registry.aprox_match.vec));
  2832. /* We are done */
  2833. r->clear_pending(r);
  2834. return 0;
  2835. }
  2836. static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
  2837. struct bnx2x_mcast_ramrod_params *p,
  2838. enum bnx2x_mcast_cmd cmd)
  2839. {
  2840. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2841. int reg_sz = o->get_registry_size(o);
  2842. switch (cmd) {
  2843. /* DEL command deletes all currently configured MACs */
  2844. case BNX2X_MCAST_CMD_DEL:
  2845. o->set_registry_size(o, 0);
  2846. /* Don't break */
  2847. /* RESTORE command will restore the entire multicast configuration */
  2848. case BNX2X_MCAST_CMD_RESTORE:
  2849. p->mcast_list_len = reg_sz;
  2850. DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
  2851. cmd, p->mcast_list_len);
  2852. break;
  2853. case BNX2X_MCAST_CMD_ADD:
  2854. case BNX2X_MCAST_CMD_CONT:
  2855. /* Multicast MACs on 57710 are configured as unicast MACs and
  2856. * there is only a limited number of CAM entries for that
  2857. * matter.
  2858. */
  2859. if (p->mcast_list_len > o->max_cmd_len) {
  2860. BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
  2861. o->max_cmd_len);
  2862. return -EINVAL;
  2863. }
  2864. /* Every configured MAC should be cleared if DEL command is
  2865. * called. Only the last ADD command is relevant as long as
  2866. * every ADD commands overrides the previous configuration.
  2867. */
  2868. DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
  2869. if (p->mcast_list_len > 0)
  2870. o->set_registry_size(o, p->mcast_list_len);
  2871. break;
  2872. default:
  2873. BNX2X_ERR("Unknown command: %d\n", cmd);
  2874. return -EINVAL;
  2875. }
  2876. /* We want to ensure that commands are executed one by one for 57710.
  2877. * Therefore each none-empty command will consume o->max_cmd_len.
  2878. */
  2879. if (p->mcast_list_len)
  2880. o->total_pending_num += o->max_cmd_len;
  2881. return 0;
  2882. }
  2883. static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
  2884. struct bnx2x_mcast_ramrod_params *p,
  2885. int old_num_macs)
  2886. {
  2887. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2888. o->set_registry_size(o, old_num_macs);
  2889. /* If current command hasn't been handled yet and we are
  2890. * here means that it's meant to be dropped and we have to
  2891. * update the number of outstanding MACs accordingly.
  2892. */
  2893. if (p->mcast_list_len)
  2894. o->total_pending_num -= o->max_cmd_len;
  2895. }
  2896. static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
  2897. struct bnx2x_mcast_obj *o, int idx,
  2898. union bnx2x_mcast_config_data *cfg_data,
  2899. enum bnx2x_mcast_cmd cmd)
  2900. {
  2901. struct bnx2x_raw_obj *r = &o->raw;
  2902. struct mac_configuration_cmd *data =
  2903. (struct mac_configuration_cmd *)(r->rdata);
  2904. /* copy mac */
  2905. if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
  2906. bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
  2907. &data->config_table[idx].middle_mac_addr,
  2908. &data->config_table[idx].lsb_mac_addr,
  2909. cfg_data->mac);
  2910. data->config_table[idx].vlan_id = 0;
  2911. data->config_table[idx].pf_id = r->func_id;
  2912. data->config_table[idx].clients_bit_vector =
  2913. cpu_to_le32(1 << r->cl_id);
  2914. SET_FLAG(data->config_table[idx].flags,
  2915. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  2916. T_ETH_MAC_COMMAND_SET);
  2917. }
  2918. }
  2919. /**
  2920. * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
  2921. *
  2922. * @bp: device handle
  2923. * @p:
  2924. * @len: number of rules to handle
  2925. */
  2926. static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
  2927. struct bnx2x_mcast_ramrod_params *p,
  2928. u8 len)
  2929. {
  2930. struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
  2931. struct mac_configuration_cmd *data =
  2932. (struct mac_configuration_cmd *)(r->rdata);
  2933. u8 offset = (CHIP_REV_IS_SLOW(bp) ?
  2934. BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
  2935. BNX2X_MAX_MULTICAST*(1 + r->func_id));
  2936. data->hdr.offset = offset;
  2937. data->hdr.client_id = cpu_to_le16(0xff);
  2938. data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
  2939. (BNX2X_FILTER_MCAST_PENDING <<
  2940. BNX2X_SWCID_SHIFT));
  2941. data->hdr.length = len;
  2942. }
  2943. /**
  2944. * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
  2945. *
  2946. * @bp: device handle
  2947. * @o:
  2948. * @start_idx: index in the registry to start from
  2949. * @rdata_idx: index in the ramrod data to start from
  2950. *
  2951. * restore command for 57710 is like all other commands - always a stand alone
  2952. * command - start_idx and rdata_idx will always be 0. This function will always
  2953. * succeed.
  2954. * returns -1 to comply with 57712 variant.
  2955. */
  2956. static inline int bnx2x_mcast_handle_restore_cmd_e1(
  2957. struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
  2958. int *rdata_idx)
  2959. {
  2960. struct bnx2x_mcast_mac_elem *elem;
  2961. int i = 0;
  2962. union bnx2x_mcast_config_data cfg_data = {NULL};
  2963. /* go through the registry and configure the MACs from it. */
  2964. list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
  2965. cfg_data.mac = &elem->mac[0];
  2966. o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
  2967. i++;
  2968. DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
  2969. cfg_data.mac);
  2970. }
  2971. *rdata_idx = i;
  2972. return -1;
  2973. }
  2974. static inline int bnx2x_mcast_handle_pending_cmds_e1(
  2975. struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
  2976. {
  2977. struct bnx2x_pending_mcast_cmd *cmd_pos;
  2978. struct bnx2x_mcast_mac_elem *pmac_pos;
  2979. struct bnx2x_mcast_obj *o = p->mcast_obj;
  2980. union bnx2x_mcast_config_data cfg_data = {NULL};
  2981. int cnt = 0;
  2982. /* If nothing to be done - return */
  2983. if (list_empty(&o->pending_cmds_head))
  2984. return 0;
  2985. /* Handle the first command */
  2986. cmd_pos = list_first_entry(&o->pending_cmds_head,
  2987. struct bnx2x_pending_mcast_cmd, link);
  2988. switch (cmd_pos->type) {
  2989. case BNX2X_MCAST_CMD_ADD:
  2990. list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
  2991. cfg_data.mac = &pmac_pos->mac[0];
  2992. o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
  2993. cnt++;
  2994. DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
  2995. pmac_pos->mac);
  2996. }
  2997. break;
  2998. case BNX2X_MCAST_CMD_DEL:
  2999. cnt = cmd_pos->data.macs_num;
  3000. DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
  3001. break;
  3002. case BNX2X_MCAST_CMD_RESTORE:
  3003. o->hdl_restore(bp, o, 0, &cnt);
  3004. break;
  3005. default:
  3006. BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
  3007. return -EINVAL;
  3008. }
  3009. list_del(&cmd_pos->link);
  3010. kfree(cmd_pos);
  3011. return cnt;
  3012. }
  3013. /**
  3014. * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
  3015. *
  3016. * @fw_hi:
  3017. * @fw_mid:
  3018. * @fw_lo:
  3019. * @mac:
  3020. */
  3021. static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
  3022. __le16 *fw_lo, u8 *mac)
  3023. {
  3024. mac[1] = ((u8 *)fw_hi)[0];
  3025. mac[0] = ((u8 *)fw_hi)[1];
  3026. mac[3] = ((u8 *)fw_mid)[0];
  3027. mac[2] = ((u8 *)fw_mid)[1];
  3028. mac[5] = ((u8 *)fw_lo)[0];
  3029. mac[4] = ((u8 *)fw_lo)[1];
  3030. }
  3031. /**
  3032. * bnx2x_mcast_refresh_registry_e1 -
  3033. *
  3034. * @bp: device handle
  3035. * @cnt:
  3036. *
  3037. * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
  3038. * and update the registry correspondingly: if ADD - allocate a memory and add
  3039. * the entries to the registry (list), if DELETE - clear the registry and free
  3040. * the memory.
  3041. */
  3042. static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
  3043. struct bnx2x_mcast_obj *o)
  3044. {
  3045. struct bnx2x_raw_obj *raw = &o->raw;
  3046. struct bnx2x_mcast_mac_elem *elem;
  3047. struct mac_configuration_cmd *data =
  3048. (struct mac_configuration_cmd *)(raw->rdata);
  3049. /* If first entry contains a SET bit - the command was ADD,
  3050. * otherwise - DEL_ALL
  3051. */
  3052. if (GET_FLAG(data->config_table[0].flags,
  3053. MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
  3054. int i, len = data->hdr.length;
  3055. /* Break if it was a RESTORE command */
  3056. if (!list_empty(&o->registry.exact_match.macs))
  3057. return 0;
  3058. elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
  3059. if (!elem) {
  3060. BNX2X_ERR("Failed to allocate registry memory\n");
  3061. return -ENOMEM;
  3062. }
  3063. for (i = 0; i < len; i++, elem++) {
  3064. bnx2x_get_fw_mac_addr(
  3065. &data->config_table[i].msb_mac_addr,
  3066. &data->config_table[i].middle_mac_addr,
  3067. &data->config_table[i].lsb_mac_addr,
  3068. elem->mac);
  3069. DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
  3070. elem->mac);
  3071. list_add_tail(&elem->link,
  3072. &o->registry.exact_match.macs);
  3073. }
  3074. } else {
  3075. elem = list_first_entry(&o->registry.exact_match.macs,
  3076. struct bnx2x_mcast_mac_elem, link);
  3077. DP(BNX2X_MSG_SP, "Deleting a registry\n");
  3078. kfree(elem);
  3079. INIT_LIST_HEAD(&o->registry.exact_match.macs);
  3080. }
  3081. return 0;
  3082. }
  3083. static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
  3084. struct bnx2x_mcast_ramrod_params *p,
  3085. enum bnx2x_mcast_cmd cmd)
  3086. {
  3087. struct bnx2x_mcast_obj *o = p->mcast_obj;
  3088. struct bnx2x_raw_obj *raw = &o->raw;
  3089. struct mac_configuration_cmd *data =
  3090. (struct mac_configuration_cmd *)(raw->rdata);
  3091. int cnt = 0, i, rc;
  3092. /* Reset the ramrod data buffer */
  3093. memset(data, 0, sizeof(*data));
  3094. /* First set all entries as invalid */
  3095. for (i = 0; i < o->max_cmd_len ; i++)
  3096. SET_FLAG(data->config_table[i].flags,
  3097. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  3098. T_ETH_MAC_COMMAND_INVALIDATE);
  3099. /* Handle pending commands first */
  3100. cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
  3101. /* If there are no more pending commands - clear SCHEDULED state */
  3102. if (list_empty(&o->pending_cmds_head))
  3103. o->clear_sched(o);
  3104. /* The below may be true iff there were no pending commands */
  3105. if (!cnt)
  3106. cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
  3107. /* For 57710 every command has o->max_cmd_len length to ensure that
  3108. * commands are done one at a time.
  3109. */
  3110. o->total_pending_num -= o->max_cmd_len;
  3111. /* send a ramrod */
  3112. WARN_ON(cnt > o->max_cmd_len);
  3113. /* Set ramrod header (in particular, a number of entries to update) */
  3114. bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
  3115. /* update a registry: we need the registry contents to be always up
  3116. * to date in order to be able to execute a RESTORE opcode. Here
  3117. * we use the fact that for 57710 we sent one command at a time
  3118. * hence we may take the registry update out of the command handling
  3119. * and do it in a simpler way here.
  3120. */
  3121. rc = bnx2x_mcast_refresh_registry_e1(bp, o);
  3122. if (rc)
  3123. return rc;
  3124. /* If CLEAR_ONLY was requested - don't send a ramrod and clear
  3125. * RAMROD_PENDING status immediately.
  3126. */
  3127. if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
  3128. raw->clear_pending(raw);
  3129. return 0;
  3130. } else {
  3131. /* No need for an explicit memory barrier here as long we would
  3132. * need to ensure the ordering of writing to the SPQ element
  3133. * and updating of the SPQ producer which involves a memory
  3134. * read and we will have to put a full memory barrier there
  3135. * (inside bnx2x_sp_post()).
  3136. */
  3137. /* Send a ramrod */
  3138. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
  3139. U64_HI(raw->rdata_mapping),
  3140. U64_LO(raw->rdata_mapping),
  3141. ETH_CONNECTION_TYPE);
  3142. if (rc)
  3143. return rc;
  3144. /* Ramrod completion is pending */
  3145. return 1;
  3146. }
  3147. }
  3148. static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
  3149. {
  3150. return o->registry.exact_match.num_macs_set;
  3151. }
  3152. static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
  3153. {
  3154. return o->registry.aprox_match.num_bins_set;
  3155. }
  3156. static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
  3157. int n)
  3158. {
  3159. o->registry.exact_match.num_macs_set = n;
  3160. }
  3161. static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
  3162. int n)
  3163. {
  3164. o->registry.aprox_match.num_bins_set = n;
  3165. }
  3166. int bnx2x_config_mcast(struct bnx2x *bp,
  3167. struct bnx2x_mcast_ramrod_params *p,
  3168. enum bnx2x_mcast_cmd cmd)
  3169. {
  3170. struct bnx2x_mcast_obj *o = p->mcast_obj;
  3171. struct bnx2x_raw_obj *r = &o->raw;
  3172. int rc = 0, old_reg_size;
  3173. /* This is needed to recover number of currently configured mcast macs
  3174. * in case of failure.
  3175. */
  3176. old_reg_size = o->get_registry_size(o);
  3177. /* Do some calculations and checks */
  3178. rc = o->validate(bp, p, cmd);
  3179. if (rc)
  3180. return rc;
  3181. /* Return if there is no work to do */
  3182. if ((!p->mcast_list_len) && (!o->check_sched(o)))
  3183. return 0;
  3184. DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
  3185. o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
  3186. /* Enqueue the current command to the pending list if we can't complete
  3187. * it in the current iteration
  3188. */
  3189. if (r->check_pending(r) ||
  3190. ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
  3191. rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
  3192. if (rc < 0)
  3193. goto error_exit1;
  3194. /* As long as the current command is in a command list we
  3195. * don't need to handle it separately.
  3196. */
  3197. p->mcast_list_len = 0;
  3198. }
  3199. if (!r->check_pending(r)) {
  3200. /* Set 'pending' state */
  3201. r->set_pending(r);
  3202. /* Configure the new classification in the chip */
  3203. rc = o->config_mcast(bp, p, cmd);
  3204. if (rc < 0)
  3205. goto error_exit2;
  3206. /* Wait for a ramrod completion if was requested */
  3207. if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
  3208. rc = o->wait_comp(bp, o);
  3209. }
  3210. return rc;
  3211. error_exit2:
  3212. r->clear_pending(r);
  3213. error_exit1:
  3214. o->revert(bp, p, old_reg_size);
  3215. return rc;
  3216. }
  3217. static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
  3218. {
  3219. smp_mb__before_clear_bit();
  3220. clear_bit(o->sched_state, o->raw.pstate);
  3221. smp_mb__after_clear_bit();
  3222. }
  3223. static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
  3224. {
  3225. smp_mb__before_clear_bit();
  3226. set_bit(o->sched_state, o->raw.pstate);
  3227. smp_mb__after_clear_bit();
  3228. }
  3229. static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
  3230. {
  3231. return !!test_bit(o->sched_state, o->raw.pstate);
  3232. }
  3233. static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
  3234. {
  3235. return o->raw.check_pending(&o->raw) || o->check_sched(o);
  3236. }
  3237. void bnx2x_init_mcast_obj(struct bnx2x *bp,
  3238. struct bnx2x_mcast_obj *mcast_obj,
  3239. u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
  3240. u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
  3241. int state, unsigned long *pstate, bnx2x_obj_type type)
  3242. {
  3243. memset(mcast_obj, 0, sizeof(*mcast_obj));
  3244. bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
  3245. rdata, rdata_mapping, state, pstate, type);
  3246. mcast_obj->engine_id = engine_id;
  3247. INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
  3248. mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
  3249. mcast_obj->check_sched = bnx2x_mcast_check_sched;
  3250. mcast_obj->set_sched = bnx2x_mcast_set_sched;
  3251. mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
  3252. if (CHIP_IS_E1(bp)) {
  3253. mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
  3254. mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
  3255. mcast_obj->hdl_restore =
  3256. bnx2x_mcast_handle_restore_cmd_e1;
  3257. mcast_obj->check_pending = bnx2x_mcast_check_pending;
  3258. if (CHIP_REV_IS_SLOW(bp))
  3259. mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
  3260. else
  3261. mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
  3262. mcast_obj->wait_comp = bnx2x_mcast_wait;
  3263. mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
  3264. mcast_obj->validate = bnx2x_mcast_validate_e1;
  3265. mcast_obj->revert = bnx2x_mcast_revert_e1;
  3266. mcast_obj->get_registry_size =
  3267. bnx2x_mcast_get_registry_size_exact;
  3268. mcast_obj->set_registry_size =
  3269. bnx2x_mcast_set_registry_size_exact;
  3270. /* 57710 is the only chip that uses the exact match for mcast
  3271. * at the moment.
  3272. */
  3273. INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
  3274. } else if (CHIP_IS_E1H(bp)) {
  3275. mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
  3276. mcast_obj->enqueue_cmd = NULL;
  3277. mcast_obj->hdl_restore = NULL;
  3278. mcast_obj->check_pending = bnx2x_mcast_check_pending;
  3279. /* 57711 doesn't send a ramrod, so it has unlimited credit
  3280. * for one command.
  3281. */
  3282. mcast_obj->max_cmd_len = -1;
  3283. mcast_obj->wait_comp = bnx2x_mcast_wait;
  3284. mcast_obj->set_one_rule = NULL;
  3285. mcast_obj->validate = bnx2x_mcast_validate_e1h;
  3286. mcast_obj->revert = bnx2x_mcast_revert_e1h;
  3287. mcast_obj->get_registry_size =
  3288. bnx2x_mcast_get_registry_size_aprox;
  3289. mcast_obj->set_registry_size =
  3290. bnx2x_mcast_set_registry_size_aprox;
  3291. } else {
  3292. mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
  3293. mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
  3294. mcast_obj->hdl_restore =
  3295. bnx2x_mcast_handle_restore_cmd_e2;
  3296. mcast_obj->check_pending = bnx2x_mcast_check_pending;
  3297. /* TODO: There should be a proper HSI define for this number!!!
  3298. */
  3299. mcast_obj->max_cmd_len = 16;
  3300. mcast_obj->wait_comp = bnx2x_mcast_wait;
  3301. mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
  3302. mcast_obj->validate = bnx2x_mcast_validate_e2;
  3303. mcast_obj->revert = bnx2x_mcast_revert_e2;
  3304. mcast_obj->get_registry_size =
  3305. bnx2x_mcast_get_registry_size_aprox;
  3306. mcast_obj->set_registry_size =
  3307. bnx2x_mcast_set_registry_size_aprox;
  3308. }
  3309. }
  3310. /*************************** Credit handling **********************************/
  3311. /**
  3312. * atomic_add_ifless - add if the result is less than a given value.
  3313. *
  3314. * @v: pointer of type atomic_t
  3315. * @a: the amount to add to v...
  3316. * @u: ...if (v + a) is less than u.
  3317. *
  3318. * returns true if (v + a) was less than u, and false otherwise.
  3319. *
  3320. */
  3321. static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
  3322. {
  3323. int c, old;
  3324. c = atomic_read(v);
  3325. for (;;) {
  3326. if (unlikely(c + a >= u))
  3327. return false;
  3328. old = atomic_cmpxchg((v), c, c + a);
  3329. if (likely(old == c))
  3330. break;
  3331. c = old;
  3332. }
  3333. return true;
  3334. }
  3335. /**
  3336. * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
  3337. *
  3338. * @v: pointer of type atomic_t
  3339. * @a: the amount to dec from v...
  3340. * @u: ...if (v - a) is more or equal than u.
  3341. *
  3342. * returns true if (v - a) was more or equal than u, and false
  3343. * otherwise.
  3344. */
  3345. static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
  3346. {
  3347. int c, old;
  3348. c = atomic_read(v);
  3349. for (;;) {
  3350. if (unlikely(c - a < u))
  3351. return false;
  3352. old = atomic_cmpxchg((v), c, c - a);
  3353. if (likely(old == c))
  3354. break;
  3355. c = old;
  3356. }
  3357. return true;
  3358. }
  3359. static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
  3360. {
  3361. bool rc;
  3362. smp_mb();
  3363. rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
  3364. smp_mb();
  3365. return rc;
  3366. }
  3367. static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
  3368. {
  3369. bool rc;
  3370. smp_mb();
  3371. /* Don't let to refill if credit + cnt > pool_sz */
  3372. rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
  3373. smp_mb();
  3374. return rc;
  3375. }
  3376. static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
  3377. {
  3378. int cur_credit;
  3379. smp_mb();
  3380. cur_credit = atomic_read(&o->credit);
  3381. return cur_credit;
  3382. }
  3383. static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
  3384. int cnt)
  3385. {
  3386. return true;
  3387. }
  3388. static bool bnx2x_credit_pool_get_entry(
  3389. struct bnx2x_credit_pool_obj *o,
  3390. int *offset)
  3391. {
  3392. int idx, vec, i;
  3393. *offset = -1;
  3394. /* Find "internal cam-offset" then add to base for this object... */
  3395. for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
  3396. /* Skip the current vector if there are no free entries in it */
  3397. if (!o->pool_mirror[vec])
  3398. continue;
  3399. /* If we've got here we are going to find a free entry */
  3400. for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
  3401. i < BIT_VEC64_ELEM_SZ; idx++, i++)
  3402. if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
  3403. /* Got one!! */
  3404. BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
  3405. *offset = o->base_pool_offset + idx;
  3406. return true;
  3407. }
  3408. }
  3409. return false;
  3410. }
  3411. static bool bnx2x_credit_pool_put_entry(
  3412. struct bnx2x_credit_pool_obj *o,
  3413. int offset)
  3414. {
  3415. if (offset < o->base_pool_offset)
  3416. return false;
  3417. offset -= o->base_pool_offset;
  3418. if (offset >= o->pool_sz)
  3419. return false;
  3420. /* Return the entry to the pool */
  3421. BIT_VEC64_SET_BIT(o->pool_mirror, offset);
  3422. return true;
  3423. }
  3424. static bool bnx2x_credit_pool_put_entry_always_true(
  3425. struct bnx2x_credit_pool_obj *o,
  3426. int offset)
  3427. {
  3428. return true;
  3429. }
  3430. static bool bnx2x_credit_pool_get_entry_always_true(
  3431. struct bnx2x_credit_pool_obj *o,
  3432. int *offset)
  3433. {
  3434. *offset = -1;
  3435. return true;
  3436. }
  3437. /**
  3438. * bnx2x_init_credit_pool - initialize credit pool internals.
  3439. *
  3440. * @p:
  3441. * @base: Base entry in the CAM to use.
  3442. * @credit: pool size.
  3443. *
  3444. * If base is negative no CAM entries handling will be performed.
  3445. * If credit is negative pool operations will always succeed (unlimited pool).
  3446. *
  3447. */
  3448. static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
  3449. int base, int credit)
  3450. {
  3451. /* Zero the object first */
  3452. memset(p, 0, sizeof(*p));
  3453. /* Set the table to all 1s */
  3454. memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
  3455. /* Init a pool as full */
  3456. atomic_set(&p->credit, credit);
  3457. /* The total poll size */
  3458. p->pool_sz = credit;
  3459. p->base_pool_offset = base;
  3460. /* Commit the change */
  3461. smp_mb();
  3462. p->check = bnx2x_credit_pool_check;
  3463. /* if pool credit is negative - disable the checks */
  3464. if (credit >= 0) {
  3465. p->put = bnx2x_credit_pool_put;
  3466. p->get = bnx2x_credit_pool_get;
  3467. p->put_entry = bnx2x_credit_pool_put_entry;
  3468. p->get_entry = bnx2x_credit_pool_get_entry;
  3469. } else {
  3470. p->put = bnx2x_credit_pool_always_true;
  3471. p->get = bnx2x_credit_pool_always_true;
  3472. p->put_entry = bnx2x_credit_pool_put_entry_always_true;
  3473. p->get_entry = bnx2x_credit_pool_get_entry_always_true;
  3474. }
  3475. /* If base is negative - disable entries handling */
  3476. if (base < 0) {
  3477. p->put_entry = bnx2x_credit_pool_put_entry_always_true;
  3478. p->get_entry = bnx2x_credit_pool_get_entry_always_true;
  3479. }
  3480. }
  3481. void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
  3482. struct bnx2x_credit_pool_obj *p, u8 func_id,
  3483. u8 func_num)
  3484. {
  3485. /* TODO: this will be defined in consts as well... */
  3486. #define BNX2X_CAM_SIZE_EMUL 5
  3487. int cam_sz;
  3488. if (CHIP_IS_E1(bp)) {
  3489. /* In E1, Multicast is saved in cam... */
  3490. if (!CHIP_REV_IS_SLOW(bp))
  3491. cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
  3492. else
  3493. cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
  3494. bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
  3495. } else if (CHIP_IS_E1H(bp)) {
  3496. /* CAM credit is equaly divided between all active functions
  3497. * on the PORT!.
  3498. */
  3499. if ((func_num > 0)) {
  3500. if (!CHIP_REV_IS_SLOW(bp))
  3501. cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
  3502. else
  3503. cam_sz = BNX2X_CAM_SIZE_EMUL;
  3504. bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
  3505. } else {
  3506. /* this should never happen! Block MAC operations. */
  3507. bnx2x_init_credit_pool(p, 0, 0);
  3508. }
  3509. } else {
  3510. /* CAM credit is equaly divided between all active functions
  3511. * on the PATH.
  3512. */
  3513. if ((func_num > 0)) {
  3514. if (!CHIP_REV_IS_SLOW(bp))
  3515. cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
  3516. else
  3517. cam_sz = BNX2X_CAM_SIZE_EMUL;
  3518. /* No need for CAM entries handling for 57712 and
  3519. * newer.
  3520. */
  3521. bnx2x_init_credit_pool(p, -1, cam_sz);
  3522. } else {
  3523. /* this should never happen! Block MAC operations. */
  3524. bnx2x_init_credit_pool(p, 0, 0);
  3525. }
  3526. }
  3527. }
  3528. void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
  3529. struct bnx2x_credit_pool_obj *p,
  3530. u8 func_id,
  3531. u8 func_num)
  3532. {
  3533. if (CHIP_IS_E1x(bp)) {
  3534. /* There is no VLAN credit in HW on 57710 and 57711 only
  3535. * MAC / MAC-VLAN can be set
  3536. */
  3537. bnx2x_init_credit_pool(p, 0, -1);
  3538. } else {
  3539. /* CAM credit is equally divided between all active functions
  3540. * on the PATH.
  3541. */
  3542. if (func_num > 0) {
  3543. int credit = MAX_VLAN_CREDIT_E2 / func_num;
  3544. bnx2x_init_credit_pool(p, func_id * credit, credit);
  3545. } else
  3546. /* this should never happen! Block VLAN operations. */
  3547. bnx2x_init_credit_pool(p, 0, 0);
  3548. }
  3549. }
  3550. /****************** RSS Configuration ******************/
  3551. /**
  3552. * bnx2x_debug_print_ind_table - prints the indirection table configuration.
  3553. *
  3554. * @bp: driver handle
  3555. * @p: pointer to rss configuration
  3556. *
  3557. * Prints it when NETIF_MSG_IFUP debug level is configured.
  3558. */
  3559. static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
  3560. struct bnx2x_config_rss_params *p)
  3561. {
  3562. int i;
  3563. DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
  3564. DP(BNX2X_MSG_SP, "0x0000: ");
  3565. for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
  3566. DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
  3567. /* Print 4 bytes in a line */
  3568. if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
  3569. (((i + 1) & 0x3) == 0)) {
  3570. DP_CONT(BNX2X_MSG_SP, "\n");
  3571. DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
  3572. }
  3573. }
  3574. DP_CONT(BNX2X_MSG_SP, "\n");
  3575. }
  3576. /**
  3577. * bnx2x_setup_rss - configure RSS
  3578. *
  3579. * @bp: device handle
  3580. * @p: rss configuration
  3581. *
  3582. * sends on UPDATE ramrod for that matter.
  3583. */
  3584. static int bnx2x_setup_rss(struct bnx2x *bp,
  3585. struct bnx2x_config_rss_params *p)
  3586. {
  3587. struct bnx2x_rss_config_obj *o = p->rss_obj;
  3588. struct bnx2x_raw_obj *r = &o->raw;
  3589. struct eth_rss_update_ramrod_data *data =
  3590. (struct eth_rss_update_ramrod_data *)(r->rdata);
  3591. u8 rss_mode = 0;
  3592. int rc;
  3593. memset(data, 0, sizeof(*data));
  3594. DP(BNX2X_MSG_SP, "Configuring RSS\n");
  3595. /* Set an echo field */
  3596. data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
  3597. (r->state << BNX2X_SWCID_SHIFT));
  3598. /* RSS mode */
  3599. if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
  3600. rss_mode = ETH_RSS_MODE_DISABLED;
  3601. else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
  3602. rss_mode = ETH_RSS_MODE_REGULAR;
  3603. data->rss_mode = rss_mode;
  3604. DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
  3605. /* RSS capabilities */
  3606. if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
  3607. data->capabilities |=
  3608. ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
  3609. if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
  3610. data->capabilities |=
  3611. ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
  3612. if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
  3613. data->capabilities |=
  3614. ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
  3615. if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
  3616. data->capabilities |=
  3617. ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
  3618. if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
  3619. data->capabilities |=
  3620. ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
  3621. if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
  3622. data->capabilities |=
  3623. ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
  3624. /* Hashing mask */
  3625. data->rss_result_mask = p->rss_result_mask;
  3626. /* RSS engine ID */
  3627. data->rss_engine_id = o->engine_id;
  3628. DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
  3629. /* Indirection table */
  3630. memcpy(data->indirection_table, p->ind_table,
  3631. T_ETH_INDIRECTION_TABLE_SIZE);
  3632. /* Remember the last configuration */
  3633. memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  3634. /* Print the indirection table */
  3635. if (netif_msg_ifup(bp))
  3636. bnx2x_debug_print_ind_table(bp, p);
  3637. /* RSS keys */
  3638. if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
  3639. memcpy(&data->rss_key[0], &p->rss_key[0],
  3640. sizeof(data->rss_key));
  3641. data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
  3642. }
  3643. /* No need for an explicit memory barrier here as long we would
  3644. * need to ensure the ordering of writing to the SPQ element
  3645. * and updating of the SPQ producer which involves a memory
  3646. * read and we will have to put a full memory barrier there
  3647. * (inside bnx2x_sp_post()).
  3648. */
  3649. /* Send a ramrod */
  3650. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
  3651. U64_HI(r->rdata_mapping),
  3652. U64_LO(r->rdata_mapping),
  3653. ETH_CONNECTION_TYPE);
  3654. if (rc < 0)
  3655. return rc;
  3656. return 1;
  3657. }
  3658. void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
  3659. u8 *ind_table)
  3660. {
  3661. memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
  3662. }
  3663. int bnx2x_config_rss(struct bnx2x *bp,
  3664. struct bnx2x_config_rss_params *p)
  3665. {
  3666. int rc;
  3667. struct bnx2x_rss_config_obj *o = p->rss_obj;
  3668. struct bnx2x_raw_obj *r = &o->raw;
  3669. /* Do nothing if only driver cleanup was requested */
  3670. if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
  3671. return 0;
  3672. r->set_pending(r);
  3673. rc = o->config_rss(bp, p);
  3674. if (rc < 0) {
  3675. r->clear_pending(r);
  3676. return rc;
  3677. }
  3678. if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
  3679. rc = r->wait_comp(bp, r);
  3680. return rc;
  3681. }
  3682. void bnx2x_init_rss_config_obj(struct bnx2x *bp,
  3683. struct bnx2x_rss_config_obj *rss_obj,
  3684. u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
  3685. void *rdata, dma_addr_t rdata_mapping,
  3686. int state, unsigned long *pstate,
  3687. bnx2x_obj_type type)
  3688. {
  3689. bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
  3690. rdata_mapping, state, pstate, type);
  3691. rss_obj->engine_id = engine_id;
  3692. rss_obj->config_rss = bnx2x_setup_rss;
  3693. }
  3694. /********************** Queue state object ***********************************/
  3695. /**
  3696. * bnx2x_queue_state_change - perform Queue state change transition
  3697. *
  3698. * @bp: device handle
  3699. * @params: parameters to perform the transition
  3700. *
  3701. * returns 0 in case of successfully completed transition, negative error
  3702. * code in case of failure, positive (EBUSY) value if there is a completion
  3703. * to that is still pending (possible only if RAMROD_COMP_WAIT is
  3704. * not set in params->ramrod_flags for asynchronous commands).
  3705. *
  3706. */
  3707. int bnx2x_queue_state_change(struct bnx2x *bp,
  3708. struct bnx2x_queue_state_params *params)
  3709. {
  3710. struct bnx2x_queue_sp_obj *o = params->q_obj;
  3711. int rc, pending_bit;
  3712. unsigned long *pending = &o->pending;
  3713. /* Check that the requested transition is legal */
  3714. rc = o->check_transition(bp, o, params);
  3715. if (rc) {
  3716. BNX2X_ERR("check transition returned an error. rc %d\n", rc);
  3717. return -EINVAL;
  3718. }
  3719. /* Set "pending" bit */
  3720. DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
  3721. pending_bit = o->set_pending(o, params);
  3722. DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
  3723. /* Don't send a command if only driver cleanup was requested */
  3724. if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
  3725. o->complete_cmd(bp, o, pending_bit);
  3726. else {
  3727. /* Send a ramrod */
  3728. rc = o->send_cmd(bp, params);
  3729. if (rc) {
  3730. o->next_state = BNX2X_Q_STATE_MAX;
  3731. clear_bit(pending_bit, pending);
  3732. smp_mb__after_clear_bit();
  3733. return rc;
  3734. }
  3735. if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
  3736. rc = o->wait_comp(bp, o, pending_bit);
  3737. if (rc)
  3738. return rc;
  3739. return 0;
  3740. }
  3741. }
  3742. return !!test_bit(pending_bit, pending);
  3743. }
  3744. static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
  3745. struct bnx2x_queue_state_params *params)
  3746. {
  3747. enum bnx2x_queue_cmd cmd = params->cmd, bit;
  3748. /* ACTIVATE and DEACTIVATE commands are implemented on top of
  3749. * UPDATE command.
  3750. */
  3751. if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
  3752. (cmd == BNX2X_Q_CMD_DEACTIVATE))
  3753. bit = BNX2X_Q_CMD_UPDATE;
  3754. else
  3755. bit = cmd;
  3756. set_bit(bit, &obj->pending);
  3757. return bit;
  3758. }
  3759. static int bnx2x_queue_wait_comp(struct bnx2x *bp,
  3760. struct bnx2x_queue_sp_obj *o,
  3761. enum bnx2x_queue_cmd cmd)
  3762. {
  3763. return bnx2x_state_wait(bp, cmd, &o->pending);
  3764. }
  3765. /**
  3766. * bnx2x_queue_comp_cmd - complete the state change command.
  3767. *
  3768. * @bp: device handle
  3769. * @o:
  3770. * @cmd:
  3771. *
  3772. * Checks that the arrived completion is expected.
  3773. */
  3774. static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
  3775. struct bnx2x_queue_sp_obj *o,
  3776. enum bnx2x_queue_cmd cmd)
  3777. {
  3778. unsigned long cur_pending = o->pending;
  3779. if (!test_and_clear_bit(cmd, &cur_pending)) {
  3780. BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
  3781. cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
  3782. o->state, cur_pending, o->next_state);
  3783. return -EINVAL;
  3784. }
  3785. if (o->next_tx_only >= o->max_cos)
  3786. /* >= because tx only must always be smaller than cos since the
  3787. * primary connection supports COS 0
  3788. */
  3789. BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
  3790. o->next_tx_only, o->max_cos);
  3791. DP(BNX2X_MSG_SP,
  3792. "Completing command %d for queue %d, setting state to %d\n",
  3793. cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
  3794. if (o->next_tx_only) /* print num tx-only if any exist */
  3795. DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
  3796. o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
  3797. o->state = o->next_state;
  3798. o->num_tx_only = o->next_tx_only;
  3799. o->next_state = BNX2X_Q_STATE_MAX;
  3800. /* It's important that o->state and o->next_state are
  3801. * updated before o->pending.
  3802. */
  3803. wmb();
  3804. clear_bit(cmd, &o->pending);
  3805. smp_mb__after_clear_bit();
  3806. return 0;
  3807. }
  3808. static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
  3809. struct bnx2x_queue_state_params *cmd_params,
  3810. struct client_init_ramrod_data *data)
  3811. {
  3812. struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
  3813. /* Rx data */
  3814. /* IPv6 TPA supported for E2 and above only */
  3815. data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
  3816. CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
  3817. }
  3818. static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
  3819. struct bnx2x_queue_sp_obj *o,
  3820. struct bnx2x_general_setup_params *params,
  3821. struct client_init_general_data *gen_data,
  3822. unsigned long *flags)
  3823. {
  3824. gen_data->client_id = o->cl_id;
  3825. if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
  3826. gen_data->statistics_counter_id =
  3827. params->stat_id;
  3828. gen_data->statistics_en_flg = 1;
  3829. gen_data->statistics_zero_flg =
  3830. test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
  3831. } else
  3832. gen_data->statistics_counter_id =
  3833. DISABLE_STATISTIC_COUNTER_ID_VALUE;
  3834. gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
  3835. gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
  3836. gen_data->sp_client_id = params->spcl_id;
  3837. gen_data->mtu = cpu_to_le16(params->mtu);
  3838. gen_data->func_id = o->func_id;
  3839. gen_data->cos = params->cos;
  3840. gen_data->traffic_type =
  3841. test_bit(BNX2X_Q_FLG_FCOE, flags) ?
  3842. LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
  3843. DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
  3844. gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
  3845. }
  3846. static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
  3847. struct bnx2x_txq_setup_params *params,
  3848. struct client_init_tx_data *tx_data,
  3849. unsigned long *flags)
  3850. {
  3851. tx_data->enforce_security_flg =
  3852. test_bit(BNX2X_Q_FLG_TX_SEC, flags);
  3853. tx_data->default_vlan =
  3854. cpu_to_le16(params->default_vlan);
  3855. tx_data->default_vlan_flg =
  3856. test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
  3857. tx_data->tx_switching_flg =
  3858. test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
  3859. tx_data->anti_spoofing_flg =
  3860. test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
  3861. tx_data->force_default_pri_flg =
  3862. test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
  3863. tx_data->tunnel_lso_inc_ip_id =
  3864. test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
  3865. tx_data->tunnel_non_lso_pcsum_location =
  3866. test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
  3867. PCSUM_ON_BD;
  3868. tx_data->tx_status_block_id = params->fw_sb_id;
  3869. tx_data->tx_sb_index_number = params->sb_cq_index;
  3870. tx_data->tss_leading_client_id = params->tss_leading_cl_id;
  3871. tx_data->tx_bd_page_base.lo =
  3872. cpu_to_le32(U64_LO(params->dscr_map));
  3873. tx_data->tx_bd_page_base.hi =
  3874. cpu_to_le32(U64_HI(params->dscr_map));
  3875. /* Don't configure any Tx switching mode during queue SETUP */
  3876. tx_data->state = 0;
  3877. }
  3878. static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
  3879. struct rxq_pause_params *params,
  3880. struct client_init_rx_data *rx_data)
  3881. {
  3882. /* flow control data */
  3883. rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
  3884. rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
  3885. rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
  3886. rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
  3887. rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
  3888. rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
  3889. rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
  3890. }
  3891. static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
  3892. struct bnx2x_rxq_setup_params *params,
  3893. struct client_init_rx_data *rx_data,
  3894. unsigned long *flags)
  3895. {
  3896. rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
  3897. CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
  3898. rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
  3899. CLIENT_INIT_RX_DATA_TPA_MODE;
  3900. rx_data->vmqueue_mode_en_flg = 0;
  3901. rx_data->cache_line_alignment_log_size =
  3902. params->cache_line_log;
  3903. rx_data->enable_dynamic_hc =
  3904. test_bit(BNX2X_Q_FLG_DHC, flags);
  3905. rx_data->max_sges_for_packet = params->max_sges_pkt;
  3906. rx_data->client_qzone_id = params->cl_qzone_id;
  3907. rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
  3908. /* Always start in DROP_ALL mode */
  3909. rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
  3910. CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
  3911. /* We don't set drop flags */
  3912. rx_data->drop_ip_cs_err_flg = 0;
  3913. rx_data->drop_tcp_cs_err_flg = 0;
  3914. rx_data->drop_ttl0_flg = 0;
  3915. rx_data->drop_udp_cs_err_flg = 0;
  3916. rx_data->inner_vlan_removal_enable_flg =
  3917. test_bit(BNX2X_Q_FLG_VLAN, flags);
  3918. rx_data->outer_vlan_removal_enable_flg =
  3919. test_bit(BNX2X_Q_FLG_OV, flags);
  3920. rx_data->status_block_id = params->fw_sb_id;
  3921. rx_data->rx_sb_index_number = params->sb_cq_index;
  3922. rx_data->max_tpa_queues = params->max_tpa_queues;
  3923. rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
  3924. rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
  3925. rx_data->bd_page_base.lo =
  3926. cpu_to_le32(U64_LO(params->dscr_map));
  3927. rx_data->bd_page_base.hi =
  3928. cpu_to_le32(U64_HI(params->dscr_map));
  3929. rx_data->sge_page_base.lo =
  3930. cpu_to_le32(U64_LO(params->sge_map));
  3931. rx_data->sge_page_base.hi =
  3932. cpu_to_le32(U64_HI(params->sge_map));
  3933. rx_data->cqe_page_base.lo =
  3934. cpu_to_le32(U64_LO(params->rcq_map));
  3935. rx_data->cqe_page_base.hi =
  3936. cpu_to_le32(U64_HI(params->rcq_map));
  3937. rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
  3938. if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
  3939. rx_data->approx_mcast_engine_id = params->mcast_engine_id;
  3940. rx_data->is_approx_mcast = 1;
  3941. }
  3942. rx_data->rss_engine_id = params->rss_engine_id;
  3943. /* silent vlan removal */
  3944. rx_data->silent_vlan_removal_flg =
  3945. test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
  3946. rx_data->silent_vlan_value =
  3947. cpu_to_le16(params->silent_removal_value);
  3948. rx_data->silent_vlan_mask =
  3949. cpu_to_le16(params->silent_removal_mask);
  3950. }
  3951. /* initialize the general, tx and rx parts of a queue object */
  3952. static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
  3953. struct bnx2x_queue_state_params *cmd_params,
  3954. struct client_init_ramrod_data *data)
  3955. {
  3956. bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
  3957. &cmd_params->params.setup.gen_params,
  3958. &data->general,
  3959. &cmd_params->params.setup.flags);
  3960. bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
  3961. &cmd_params->params.setup.txq_params,
  3962. &data->tx,
  3963. &cmd_params->params.setup.flags);
  3964. bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
  3965. &cmd_params->params.setup.rxq_params,
  3966. &data->rx,
  3967. &cmd_params->params.setup.flags);
  3968. bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
  3969. &cmd_params->params.setup.pause_params,
  3970. &data->rx);
  3971. }
  3972. /* initialize the general and tx parts of a tx-only queue object */
  3973. static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
  3974. struct bnx2x_queue_state_params *cmd_params,
  3975. struct tx_queue_init_ramrod_data *data)
  3976. {
  3977. bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
  3978. &cmd_params->params.tx_only.gen_params,
  3979. &data->general,
  3980. &cmd_params->params.tx_only.flags);
  3981. bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
  3982. &cmd_params->params.tx_only.txq_params,
  3983. &data->tx,
  3984. &cmd_params->params.tx_only.flags);
  3985. DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
  3986. cmd_params->q_obj->cids[0],
  3987. data->tx.tx_bd_page_base.lo,
  3988. data->tx.tx_bd_page_base.hi);
  3989. }
  3990. /**
  3991. * bnx2x_q_init - init HW/FW queue
  3992. *
  3993. * @bp: device handle
  3994. * @params:
  3995. *
  3996. * HW/FW initial Queue configuration:
  3997. * - HC: Rx and Tx
  3998. * - CDU context validation
  3999. *
  4000. */
  4001. static inline int bnx2x_q_init(struct bnx2x *bp,
  4002. struct bnx2x_queue_state_params *params)
  4003. {
  4004. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4005. struct bnx2x_queue_init_params *init = &params->params.init;
  4006. u16 hc_usec;
  4007. u8 cos;
  4008. /* Tx HC configuration */
  4009. if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
  4010. test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
  4011. hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
  4012. bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
  4013. init->tx.sb_cq_index,
  4014. !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
  4015. hc_usec);
  4016. }
  4017. /* Rx HC configuration */
  4018. if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
  4019. test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
  4020. hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
  4021. bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
  4022. init->rx.sb_cq_index,
  4023. !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
  4024. hc_usec);
  4025. }
  4026. /* Set CDU context validation values */
  4027. for (cos = 0; cos < o->max_cos; cos++) {
  4028. DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
  4029. o->cids[cos], cos);
  4030. DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
  4031. bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
  4032. }
  4033. /* As no ramrod is sent, complete the command immediately */
  4034. o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
  4035. mmiowb();
  4036. smp_mb();
  4037. return 0;
  4038. }
  4039. static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
  4040. struct bnx2x_queue_state_params *params)
  4041. {
  4042. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4043. struct client_init_ramrod_data *rdata =
  4044. (struct client_init_ramrod_data *)o->rdata;
  4045. dma_addr_t data_mapping = o->rdata_mapping;
  4046. int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
  4047. /* Clear the ramrod data */
  4048. memset(rdata, 0, sizeof(*rdata));
  4049. /* Fill the ramrod data */
  4050. bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
  4051. /* No need for an explicit memory barrier here as long we would
  4052. * need to ensure the ordering of writing to the SPQ element
  4053. * and updating of the SPQ producer which involves a memory
  4054. * read and we will have to put a full memory barrier there
  4055. * (inside bnx2x_sp_post()).
  4056. */
  4057. return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
  4058. U64_HI(data_mapping),
  4059. U64_LO(data_mapping), ETH_CONNECTION_TYPE);
  4060. }
  4061. static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
  4062. struct bnx2x_queue_state_params *params)
  4063. {
  4064. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4065. struct client_init_ramrod_data *rdata =
  4066. (struct client_init_ramrod_data *)o->rdata;
  4067. dma_addr_t data_mapping = o->rdata_mapping;
  4068. int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
  4069. /* Clear the ramrod data */
  4070. memset(rdata, 0, sizeof(*rdata));
  4071. /* Fill the ramrod data */
  4072. bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
  4073. bnx2x_q_fill_setup_data_e2(bp, params, rdata);
  4074. /* No need for an explicit memory barrier here as long we would
  4075. * need to ensure the ordering of writing to the SPQ element
  4076. * and updating of the SPQ producer which involves a memory
  4077. * read and we will have to put a full memory barrier there
  4078. * (inside bnx2x_sp_post()).
  4079. */
  4080. return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
  4081. U64_HI(data_mapping),
  4082. U64_LO(data_mapping), ETH_CONNECTION_TYPE);
  4083. }
  4084. static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
  4085. struct bnx2x_queue_state_params *params)
  4086. {
  4087. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4088. struct tx_queue_init_ramrod_data *rdata =
  4089. (struct tx_queue_init_ramrod_data *)o->rdata;
  4090. dma_addr_t data_mapping = o->rdata_mapping;
  4091. int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
  4092. struct bnx2x_queue_setup_tx_only_params *tx_only_params =
  4093. &params->params.tx_only;
  4094. u8 cid_index = tx_only_params->cid_index;
  4095. if (cid_index >= o->max_cos) {
  4096. BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
  4097. o->cl_id, cid_index);
  4098. return -EINVAL;
  4099. }
  4100. DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
  4101. tx_only_params->gen_params.cos,
  4102. tx_only_params->gen_params.spcl_id);
  4103. /* Clear the ramrod data */
  4104. memset(rdata, 0, sizeof(*rdata));
  4105. /* Fill the ramrod data */
  4106. bnx2x_q_fill_setup_tx_only(bp, params, rdata);
  4107. DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
  4108. o->cids[cid_index], rdata->general.client_id,
  4109. rdata->general.sp_client_id, rdata->general.cos);
  4110. /* No need for an explicit memory barrier here as long we would
  4111. * need to ensure the ordering of writing to the SPQ element
  4112. * and updating of the SPQ producer which involves a memory
  4113. * read and we will have to put a full memory barrier there
  4114. * (inside bnx2x_sp_post()).
  4115. */
  4116. return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
  4117. U64_HI(data_mapping),
  4118. U64_LO(data_mapping), ETH_CONNECTION_TYPE);
  4119. }
  4120. static void bnx2x_q_fill_update_data(struct bnx2x *bp,
  4121. struct bnx2x_queue_sp_obj *obj,
  4122. struct bnx2x_queue_update_params *params,
  4123. struct client_update_ramrod_data *data)
  4124. {
  4125. /* Client ID of the client to update */
  4126. data->client_id = obj->cl_id;
  4127. /* Function ID of the client to update */
  4128. data->func_id = obj->func_id;
  4129. /* Default VLAN value */
  4130. data->default_vlan = cpu_to_le16(params->def_vlan);
  4131. /* Inner VLAN stripping */
  4132. data->inner_vlan_removal_enable_flg =
  4133. test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
  4134. data->inner_vlan_removal_change_flg =
  4135. test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
  4136. &params->update_flags);
  4137. /* Outer VLAN stripping */
  4138. data->outer_vlan_removal_enable_flg =
  4139. test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
  4140. data->outer_vlan_removal_change_flg =
  4141. test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
  4142. &params->update_flags);
  4143. /* Drop packets that have source MAC that doesn't belong to this
  4144. * Queue.
  4145. */
  4146. data->anti_spoofing_enable_flg =
  4147. test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
  4148. data->anti_spoofing_change_flg =
  4149. test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
  4150. /* Activate/Deactivate */
  4151. data->activate_flg =
  4152. test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
  4153. data->activate_change_flg =
  4154. test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
  4155. /* Enable default VLAN */
  4156. data->default_vlan_enable_flg =
  4157. test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
  4158. data->default_vlan_change_flg =
  4159. test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
  4160. &params->update_flags);
  4161. /* silent vlan removal */
  4162. data->silent_vlan_change_flg =
  4163. test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
  4164. &params->update_flags);
  4165. data->silent_vlan_removal_flg =
  4166. test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
  4167. data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
  4168. data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
  4169. }
  4170. static inline int bnx2x_q_send_update(struct bnx2x *bp,
  4171. struct bnx2x_queue_state_params *params)
  4172. {
  4173. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4174. struct client_update_ramrod_data *rdata =
  4175. (struct client_update_ramrod_data *)o->rdata;
  4176. dma_addr_t data_mapping = o->rdata_mapping;
  4177. struct bnx2x_queue_update_params *update_params =
  4178. &params->params.update;
  4179. u8 cid_index = update_params->cid_index;
  4180. if (cid_index >= o->max_cos) {
  4181. BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
  4182. o->cl_id, cid_index);
  4183. return -EINVAL;
  4184. }
  4185. /* Clear the ramrod data */
  4186. memset(rdata, 0, sizeof(*rdata));
  4187. /* Fill the ramrod data */
  4188. bnx2x_q_fill_update_data(bp, o, update_params, rdata);
  4189. /* No need for an explicit memory barrier here as long we would
  4190. * need to ensure the ordering of writing to the SPQ element
  4191. * and updating of the SPQ producer which involves a memory
  4192. * read and we will have to put a full memory barrier there
  4193. * (inside bnx2x_sp_post()).
  4194. */
  4195. return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
  4196. o->cids[cid_index], U64_HI(data_mapping),
  4197. U64_LO(data_mapping), ETH_CONNECTION_TYPE);
  4198. }
  4199. /**
  4200. * bnx2x_q_send_deactivate - send DEACTIVATE command
  4201. *
  4202. * @bp: device handle
  4203. * @params:
  4204. *
  4205. * implemented using the UPDATE command.
  4206. */
  4207. static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
  4208. struct bnx2x_queue_state_params *params)
  4209. {
  4210. struct bnx2x_queue_update_params *update = &params->params.update;
  4211. memset(update, 0, sizeof(*update));
  4212. __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
  4213. return bnx2x_q_send_update(bp, params);
  4214. }
  4215. /**
  4216. * bnx2x_q_send_activate - send ACTIVATE command
  4217. *
  4218. * @bp: device handle
  4219. * @params:
  4220. *
  4221. * implemented using the UPDATE command.
  4222. */
  4223. static inline int bnx2x_q_send_activate(struct bnx2x *bp,
  4224. struct bnx2x_queue_state_params *params)
  4225. {
  4226. struct bnx2x_queue_update_params *update = &params->params.update;
  4227. memset(update, 0, sizeof(*update));
  4228. __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
  4229. __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
  4230. return bnx2x_q_send_update(bp, params);
  4231. }
  4232. static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
  4233. struct bnx2x_queue_state_params *params)
  4234. {
  4235. /* TODO: Not implemented yet. */
  4236. return -1;
  4237. }
  4238. static inline int bnx2x_q_send_halt(struct bnx2x *bp,
  4239. struct bnx2x_queue_state_params *params)
  4240. {
  4241. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4242. return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
  4243. o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
  4244. ETH_CONNECTION_TYPE);
  4245. }
  4246. static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
  4247. struct bnx2x_queue_state_params *params)
  4248. {
  4249. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4250. u8 cid_idx = params->params.cfc_del.cid_index;
  4251. if (cid_idx >= o->max_cos) {
  4252. BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
  4253. o->cl_id, cid_idx);
  4254. return -EINVAL;
  4255. }
  4256. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
  4257. o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
  4258. }
  4259. static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
  4260. struct bnx2x_queue_state_params *params)
  4261. {
  4262. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4263. u8 cid_index = params->params.terminate.cid_index;
  4264. if (cid_index >= o->max_cos) {
  4265. BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
  4266. o->cl_id, cid_index);
  4267. return -EINVAL;
  4268. }
  4269. return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
  4270. o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
  4271. }
  4272. static inline int bnx2x_q_send_empty(struct bnx2x *bp,
  4273. struct bnx2x_queue_state_params *params)
  4274. {
  4275. struct bnx2x_queue_sp_obj *o = params->q_obj;
  4276. return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
  4277. o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
  4278. ETH_CONNECTION_TYPE);
  4279. }
  4280. static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
  4281. struct bnx2x_queue_state_params *params)
  4282. {
  4283. switch (params->cmd) {
  4284. case BNX2X_Q_CMD_INIT:
  4285. return bnx2x_q_init(bp, params);
  4286. case BNX2X_Q_CMD_SETUP_TX_ONLY:
  4287. return bnx2x_q_send_setup_tx_only(bp, params);
  4288. case BNX2X_Q_CMD_DEACTIVATE:
  4289. return bnx2x_q_send_deactivate(bp, params);
  4290. case BNX2X_Q_CMD_ACTIVATE:
  4291. return bnx2x_q_send_activate(bp, params);
  4292. case BNX2X_Q_CMD_UPDATE:
  4293. return bnx2x_q_send_update(bp, params);
  4294. case BNX2X_Q_CMD_UPDATE_TPA:
  4295. return bnx2x_q_send_update_tpa(bp, params);
  4296. case BNX2X_Q_CMD_HALT:
  4297. return bnx2x_q_send_halt(bp, params);
  4298. case BNX2X_Q_CMD_CFC_DEL:
  4299. return bnx2x_q_send_cfc_del(bp, params);
  4300. case BNX2X_Q_CMD_TERMINATE:
  4301. return bnx2x_q_send_terminate(bp, params);
  4302. case BNX2X_Q_CMD_EMPTY:
  4303. return bnx2x_q_send_empty(bp, params);
  4304. default:
  4305. BNX2X_ERR("Unknown command: %d\n", params->cmd);
  4306. return -EINVAL;
  4307. }
  4308. }
  4309. static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
  4310. struct bnx2x_queue_state_params *params)
  4311. {
  4312. switch (params->cmd) {
  4313. case BNX2X_Q_CMD_SETUP:
  4314. return bnx2x_q_send_setup_e1x(bp, params);
  4315. case BNX2X_Q_CMD_INIT:
  4316. case BNX2X_Q_CMD_SETUP_TX_ONLY:
  4317. case BNX2X_Q_CMD_DEACTIVATE:
  4318. case BNX2X_Q_CMD_ACTIVATE:
  4319. case BNX2X_Q_CMD_UPDATE:
  4320. case BNX2X_Q_CMD_UPDATE_TPA:
  4321. case BNX2X_Q_CMD_HALT:
  4322. case BNX2X_Q_CMD_CFC_DEL:
  4323. case BNX2X_Q_CMD_TERMINATE:
  4324. case BNX2X_Q_CMD_EMPTY:
  4325. return bnx2x_queue_send_cmd_cmn(bp, params);
  4326. default:
  4327. BNX2X_ERR("Unknown command: %d\n", params->cmd);
  4328. return -EINVAL;
  4329. }
  4330. }
  4331. static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
  4332. struct bnx2x_queue_state_params *params)
  4333. {
  4334. switch (params->cmd) {
  4335. case BNX2X_Q_CMD_SETUP:
  4336. return bnx2x_q_send_setup_e2(bp, params);
  4337. case BNX2X_Q_CMD_INIT:
  4338. case BNX2X_Q_CMD_SETUP_TX_ONLY:
  4339. case BNX2X_Q_CMD_DEACTIVATE:
  4340. case BNX2X_Q_CMD_ACTIVATE:
  4341. case BNX2X_Q_CMD_UPDATE:
  4342. case BNX2X_Q_CMD_UPDATE_TPA:
  4343. case BNX2X_Q_CMD_HALT:
  4344. case BNX2X_Q_CMD_CFC_DEL:
  4345. case BNX2X_Q_CMD_TERMINATE:
  4346. case BNX2X_Q_CMD_EMPTY:
  4347. return bnx2x_queue_send_cmd_cmn(bp, params);
  4348. default:
  4349. BNX2X_ERR("Unknown command: %d\n", params->cmd);
  4350. return -EINVAL;
  4351. }
  4352. }
  4353. /**
  4354. * bnx2x_queue_chk_transition - check state machine of a regular Queue
  4355. *
  4356. * @bp: device handle
  4357. * @o:
  4358. * @params:
  4359. *
  4360. * (not Forwarding)
  4361. * It both checks if the requested command is legal in a current
  4362. * state and, if it's legal, sets a `next_state' in the object
  4363. * that will be used in the completion flow to set the `state'
  4364. * of the object.
  4365. *
  4366. * returns 0 if a requested command is a legal transition,
  4367. * -EINVAL otherwise.
  4368. */
  4369. static int bnx2x_queue_chk_transition(struct bnx2x *bp,
  4370. struct bnx2x_queue_sp_obj *o,
  4371. struct bnx2x_queue_state_params *params)
  4372. {
  4373. enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
  4374. enum bnx2x_queue_cmd cmd = params->cmd;
  4375. struct bnx2x_queue_update_params *update_params =
  4376. &params->params.update;
  4377. u8 next_tx_only = o->num_tx_only;
  4378. /* Forget all pending for completion commands if a driver only state
  4379. * transition has been requested.
  4380. */
  4381. if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
  4382. o->pending = 0;
  4383. o->next_state = BNX2X_Q_STATE_MAX;
  4384. }
  4385. /* Don't allow a next state transition if we are in the middle of
  4386. * the previous one.
  4387. */
  4388. if (o->pending) {
  4389. BNX2X_ERR("Blocking transition since pending was %lx\n",
  4390. o->pending);
  4391. return -EBUSY;
  4392. }
  4393. switch (state) {
  4394. case BNX2X_Q_STATE_RESET:
  4395. if (cmd == BNX2X_Q_CMD_INIT)
  4396. next_state = BNX2X_Q_STATE_INITIALIZED;
  4397. break;
  4398. case BNX2X_Q_STATE_INITIALIZED:
  4399. if (cmd == BNX2X_Q_CMD_SETUP) {
  4400. if (test_bit(BNX2X_Q_FLG_ACTIVE,
  4401. &params->params.setup.flags))
  4402. next_state = BNX2X_Q_STATE_ACTIVE;
  4403. else
  4404. next_state = BNX2X_Q_STATE_INACTIVE;
  4405. }
  4406. break;
  4407. case BNX2X_Q_STATE_ACTIVE:
  4408. if (cmd == BNX2X_Q_CMD_DEACTIVATE)
  4409. next_state = BNX2X_Q_STATE_INACTIVE;
  4410. else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
  4411. (cmd == BNX2X_Q_CMD_UPDATE_TPA))
  4412. next_state = BNX2X_Q_STATE_ACTIVE;
  4413. else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
  4414. next_state = BNX2X_Q_STATE_MULTI_COS;
  4415. next_tx_only = 1;
  4416. }
  4417. else if (cmd == BNX2X_Q_CMD_HALT)
  4418. next_state = BNX2X_Q_STATE_STOPPED;
  4419. else if (cmd == BNX2X_Q_CMD_UPDATE) {
  4420. /* If "active" state change is requested, update the
  4421. * state accordingly.
  4422. */
  4423. if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
  4424. &update_params->update_flags) &&
  4425. !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
  4426. &update_params->update_flags))
  4427. next_state = BNX2X_Q_STATE_INACTIVE;
  4428. else
  4429. next_state = BNX2X_Q_STATE_ACTIVE;
  4430. }
  4431. break;
  4432. case BNX2X_Q_STATE_MULTI_COS:
  4433. if (cmd == BNX2X_Q_CMD_TERMINATE)
  4434. next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
  4435. else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
  4436. next_state = BNX2X_Q_STATE_MULTI_COS;
  4437. next_tx_only = o->num_tx_only + 1;
  4438. }
  4439. else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
  4440. (cmd == BNX2X_Q_CMD_UPDATE_TPA))
  4441. next_state = BNX2X_Q_STATE_MULTI_COS;
  4442. else if (cmd == BNX2X_Q_CMD_UPDATE) {
  4443. /* If "active" state change is requested, update the
  4444. * state accordingly.
  4445. */
  4446. if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
  4447. &update_params->update_flags) &&
  4448. !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
  4449. &update_params->update_flags))
  4450. next_state = BNX2X_Q_STATE_INACTIVE;
  4451. else
  4452. next_state = BNX2X_Q_STATE_MULTI_COS;
  4453. }
  4454. break;
  4455. case BNX2X_Q_STATE_MCOS_TERMINATED:
  4456. if (cmd == BNX2X_Q_CMD_CFC_DEL) {
  4457. next_tx_only = o->num_tx_only - 1;
  4458. if (next_tx_only == 0)
  4459. next_state = BNX2X_Q_STATE_ACTIVE;
  4460. else
  4461. next_state = BNX2X_Q_STATE_MULTI_COS;
  4462. }
  4463. break;
  4464. case BNX2X_Q_STATE_INACTIVE:
  4465. if (cmd == BNX2X_Q_CMD_ACTIVATE)
  4466. next_state = BNX2X_Q_STATE_ACTIVE;
  4467. else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
  4468. (cmd == BNX2X_Q_CMD_UPDATE_TPA))
  4469. next_state = BNX2X_Q_STATE_INACTIVE;
  4470. else if (cmd == BNX2X_Q_CMD_HALT)
  4471. next_state = BNX2X_Q_STATE_STOPPED;
  4472. else if (cmd == BNX2X_Q_CMD_UPDATE) {
  4473. /* If "active" state change is requested, update the
  4474. * state accordingly.
  4475. */
  4476. if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
  4477. &update_params->update_flags) &&
  4478. test_bit(BNX2X_Q_UPDATE_ACTIVATE,
  4479. &update_params->update_flags)){
  4480. if (o->num_tx_only == 0)
  4481. next_state = BNX2X_Q_STATE_ACTIVE;
  4482. else /* tx only queues exist for this queue */
  4483. next_state = BNX2X_Q_STATE_MULTI_COS;
  4484. } else
  4485. next_state = BNX2X_Q_STATE_INACTIVE;
  4486. }
  4487. break;
  4488. case BNX2X_Q_STATE_STOPPED:
  4489. if (cmd == BNX2X_Q_CMD_TERMINATE)
  4490. next_state = BNX2X_Q_STATE_TERMINATED;
  4491. break;
  4492. case BNX2X_Q_STATE_TERMINATED:
  4493. if (cmd == BNX2X_Q_CMD_CFC_DEL)
  4494. next_state = BNX2X_Q_STATE_RESET;
  4495. break;
  4496. default:
  4497. BNX2X_ERR("Illegal state: %d\n", state);
  4498. }
  4499. /* Transition is assured */
  4500. if (next_state != BNX2X_Q_STATE_MAX) {
  4501. DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
  4502. state, cmd, next_state);
  4503. o->next_state = next_state;
  4504. o->next_tx_only = next_tx_only;
  4505. return 0;
  4506. }
  4507. DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
  4508. return -EINVAL;
  4509. }
  4510. void bnx2x_init_queue_obj(struct bnx2x *bp,
  4511. struct bnx2x_queue_sp_obj *obj,
  4512. u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
  4513. void *rdata,
  4514. dma_addr_t rdata_mapping, unsigned long type)
  4515. {
  4516. memset(obj, 0, sizeof(*obj));
  4517. /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
  4518. BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
  4519. memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
  4520. obj->max_cos = cid_cnt;
  4521. obj->cl_id = cl_id;
  4522. obj->func_id = func_id;
  4523. obj->rdata = rdata;
  4524. obj->rdata_mapping = rdata_mapping;
  4525. obj->type = type;
  4526. obj->next_state = BNX2X_Q_STATE_MAX;
  4527. if (CHIP_IS_E1x(bp))
  4528. obj->send_cmd = bnx2x_queue_send_cmd_e1x;
  4529. else
  4530. obj->send_cmd = bnx2x_queue_send_cmd_e2;
  4531. obj->check_transition = bnx2x_queue_chk_transition;
  4532. obj->complete_cmd = bnx2x_queue_comp_cmd;
  4533. obj->wait_comp = bnx2x_queue_wait_comp;
  4534. obj->set_pending = bnx2x_queue_set_pending;
  4535. }
  4536. /* return a queue object's logical state*/
  4537. int bnx2x_get_q_logical_state(struct bnx2x *bp,
  4538. struct bnx2x_queue_sp_obj *obj)
  4539. {
  4540. switch (obj->state) {
  4541. case BNX2X_Q_STATE_ACTIVE:
  4542. case BNX2X_Q_STATE_MULTI_COS:
  4543. return BNX2X_Q_LOGICAL_STATE_ACTIVE;
  4544. case BNX2X_Q_STATE_RESET:
  4545. case BNX2X_Q_STATE_INITIALIZED:
  4546. case BNX2X_Q_STATE_MCOS_TERMINATED:
  4547. case BNX2X_Q_STATE_INACTIVE:
  4548. case BNX2X_Q_STATE_STOPPED:
  4549. case BNX2X_Q_STATE_TERMINATED:
  4550. case BNX2X_Q_STATE_FLRED:
  4551. return BNX2X_Q_LOGICAL_STATE_STOPPED;
  4552. default:
  4553. return -EINVAL;
  4554. }
  4555. }
  4556. /********************** Function state object *********************************/
  4557. enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
  4558. struct bnx2x_func_sp_obj *o)
  4559. {
  4560. /* in the middle of transaction - return INVALID state */
  4561. if (o->pending)
  4562. return BNX2X_F_STATE_MAX;
  4563. /* unsure the order of reading of o->pending and o->state
  4564. * o->pending should be read first
  4565. */
  4566. rmb();
  4567. return o->state;
  4568. }
  4569. static int bnx2x_func_wait_comp(struct bnx2x *bp,
  4570. struct bnx2x_func_sp_obj *o,
  4571. enum bnx2x_func_cmd cmd)
  4572. {
  4573. return bnx2x_state_wait(bp, cmd, &o->pending);
  4574. }
  4575. /**
  4576. * bnx2x_func_state_change_comp - complete the state machine transition
  4577. *
  4578. * @bp: device handle
  4579. * @o:
  4580. * @cmd:
  4581. *
  4582. * Called on state change transition. Completes the state
  4583. * machine transition only - no HW interaction.
  4584. */
  4585. static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
  4586. struct bnx2x_func_sp_obj *o,
  4587. enum bnx2x_func_cmd cmd)
  4588. {
  4589. unsigned long cur_pending = o->pending;
  4590. if (!test_and_clear_bit(cmd, &cur_pending)) {
  4591. BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
  4592. cmd, BP_FUNC(bp), o->state,
  4593. cur_pending, o->next_state);
  4594. return -EINVAL;
  4595. }
  4596. DP(BNX2X_MSG_SP,
  4597. "Completing command %d for func %d, setting state to %d\n",
  4598. cmd, BP_FUNC(bp), o->next_state);
  4599. o->state = o->next_state;
  4600. o->next_state = BNX2X_F_STATE_MAX;
  4601. /* It's important that o->state and o->next_state are
  4602. * updated before o->pending.
  4603. */
  4604. wmb();
  4605. clear_bit(cmd, &o->pending);
  4606. smp_mb__after_clear_bit();
  4607. return 0;
  4608. }
  4609. /**
  4610. * bnx2x_func_comp_cmd - complete the state change command
  4611. *
  4612. * @bp: device handle
  4613. * @o:
  4614. * @cmd:
  4615. *
  4616. * Checks that the arrived completion is expected.
  4617. */
  4618. static int bnx2x_func_comp_cmd(struct bnx2x *bp,
  4619. struct bnx2x_func_sp_obj *o,
  4620. enum bnx2x_func_cmd cmd)
  4621. {
  4622. /* Complete the state machine part first, check if it's a
  4623. * legal completion.
  4624. */
  4625. int rc = bnx2x_func_state_change_comp(bp, o, cmd);
  4626. return rc;
  4627. }
  4628. /**
  4629. * bnx2x_func_chk_transition - perform function state machine transition
  4630. *
  4631. * @bp: device handle
  4632. * @o:
  4633. * @params:
  4634. *
  4635. * It both checks if the requested command is legal in a current
  4636. * state and, if it's legal, sets a `next_state' in the object
  4637. * that will be used in the completion flow to set the `state'
  4638. * of the object.
  4639. *
  4640. * returns 0 if a requested command is a legal transition,
  4641. * -EINVAL otherwise.
  4642. */
  4643. static int bnx2x_func_chk_transition(struct bnx2x *bp,
  4644. struct bnx2x_func_sp_obj *o,
  4645. struct bnx2x_func_state_params *params)
  4646. {
  4647. enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
  4648. enum bnx2x_func_cmd cmd = params->cmd;
  4649. /* Forget all pending for completion commands if a driver only state
  4650. * transition has been requested.
  4651. */
  4652. if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
  4653. o->pending = 0;
  4654. o->next_state = BNX2X_F_STATE_MAX;
  4655. }
  4656. /* Don't allow a next state transition if we are in the middle of
  4657. * the previous one.
  4658. */
  4659. if (o->pending)
  4660. return -EBUSY;
  4661. switch (state) {
  4662. case BNX2X_F_STATE_RESET:
  4663. if (cmd == BNX2X_F_CMD_HW_INIT)
  4664. next_state = BNX2X_F_STATE_INITIALIZED;
  4665. break;
  4666. case BNX2X_F_STATE_INITIALIZED:
  4667. if (cmd == BNX2X_F_CMD_START)
  4668. next_state = BNX2X_F_STATE_STARTED;
  4669. else if (cmd == BNX2X_F_CMD_HW_RESET)
  4670. next_state = BNX2X_F_STATE_RESET;
  4671. break;
  4672. case BNX2X_F_STATE_STARTED:
  4673. if (cmd == BNX2X_F_CMD_STOP)
  4674. next_state = BNX2X_F_STATE_INITIALIZED;
  4675. /* afex ramrods can be sent only in started mode, and only
  4676. * if not pending for function_stop ramrod completion
  4677. * for these events - next state remained STARTED.
  4678. */
  4679. else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
  4680. (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
  4681. next_state = BNX2X_F_STATE_STARTED;
  4682. else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
  4683. (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
  4684. next_state = BNX2X_F_STATE_STARTED;
  4685. /* Switch_update ramrod can be sent in either started or
  4686. * tx_stopped state, and it doesn't change the state.
  4687. */
  4688. else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
  4689. (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
  4690. next_state = BNX2X_F_STATE_STARTED;
  4691. else if (cmd == BNX2X_F_CMD_TX_STOP)
  4692. next_state = BNX2X_F_STATE_TX_STOPPED;
  4693. break;
  4694. case BNX2X_F_STATE_TX_STOPPED:
  4695. if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
  4696. (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
  4697. next_state = BNX2X_F_STATE_TX_STOPPED;
  4698. else if (cmd == BNX2X_F_CMD_TX_START)
  4699. next_state = BNX2X_F_STATE_STARTED;
  4700. break;
  4701. default:
  4702. BNX2X_ERR("Unknown state: %d\n", state);
  4703. }
  4704. /* Transition is assured */
  4705. if (next_state != BNX2X_F_STATE_MAX) {
  4706. DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
  4707. state, cmd, next_state);
  4708. o->next_state = next_state;
  4709. return 0;
  4710. }
  4711. DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
  4712. state, cmd);
  4713. return -EINVAL;
  4714. }
  4715. /**
  4716. * bnx2x_func_init_func - performs HW init at function stage
  4717. *
  4718. * @bp: device handle
  4719. * @drv:
  4720. *
  4721. * Init HW when the current phase is
  4722. * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
  4723. * HW blocks.
  4724. */
  4725. static inline int bnx2x_func_init_func(struct bnx2x *bp,
  4726. const struct bnx2x_func_sp_drv_ops *drv)
  4727. {
  4728. return drv->init_hw_func(bp);
  4729. }
  4730. /**
  4731. * bnx2x_func_init_port - performs HW init at port stage
  4732. *
  4733. * @bp: device handle
  4734. * @drv:
  4735. *
  4736. * Init HW when the current phase is
  4737. * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
  4738. * FUNCTION-only HW blocks.
  4739. *
  4740. */
  4741. static inline int bnx2x_func_init_port(struct bnx2x *bp,
  4742. const struct bnx2x_func_sp_drv_ops *drv)
  4743. {
  4744. int rc = drv->init_hw_port(bp);
  4745. if (rc)
  4746. return rc;
  4747. return bnx2x_func_init_func(bp, drv);
  4748. }
  4749. /**
  4750. * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
  4751. *
  4752. * @bp: device handle
  4753. * @drv:
  4754. *
  4755. * Init HW when the current phase is
  4756. * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
  4757. * PORT-only and FUNCTION-only HW blocks.
  4758. */
  4759. static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
  4760. const struct bnx2x_func_sp_drv_ops *drv)
  4761. {
  4762. int rc = drv->init_hw_cmn_chip(bp);
  4763. if (rc)
  4764. return rc;
  4765. return bnx2x_func_init_port(bp, drv);
  4766. }
  4767. /**
  4768. * bnx2x_func_init_cmn - performs HW init at common stage
  4769. *
  4770. * @bp: device handle
  4771. * @drv:
  4772. *
  4773. * Init HW when the current phase is
  4774. * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
  4775. * PORT-only and FUNCTION-only HW blocks.
  4776. */
  4777. static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
  4778. const struct bnx2x_func_sp_drv_ops *drv)
  4779. {
  4780. int rc = drv->init_hw_cmn(bp);
  4781. if (rc)
  4782. return rc;
  4783. return bnx2x_func_init_port(bp, drv);
  4784. }
  4785. static int bnx2x_func_hw_init(struct bnx2x *bp,
  4786. struct bnx2x_func_state_params *params)
  4787. {
  4788. u32 load_code = params->params.hw_init.load_phase;
  4789. struct bnx2x_func_sp_obj *o = params->f_obj;
  4790. const struct bnx2x_func_sp_drv_ops *drv = o->drv;
  4791. int rc = 0;
  4792. DP(BNX2X_MSG_SP, "function %d load_code %x\n",
  4793. BP_ABS_FUNC(bp), load_code);
  4794. /* Prepare buffers for unzipping the FW */
  4795. rc = drv->gunzip_init(bp);
  4796. if (rc)
  4797. return rc;
  4798. /* Prepare FW */
  4799. rc = drv->init_fw(bp);
  4800. if (rc) {
  4801. BNX2X_ERR("Error loading firmware\n");
  4802. goto init_err;
  4803. }
  4804. /* Handle the beginning of COMMON_XXX pases separately... */
  4805. switch (load_code) {
  4806. case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
  4807. rc = bnx2x_func_init_cmn_chip(bp, drv);
  4808. if (rc)
  4809. goto init_err;
  4810. break;
  4811. case FW_MSG_CODE_DRV_LOAD_COMMON:
  4812. rc = bnx2x_func_init_cmn(bp, drv);
  4813. if (rc)
  4814. goto init_err;
  4815. break;
  4816. case FW_MSG_CODE_DRV_LOAD_PORT:
  4817. rc = bnx2x_func_init_port(bp, drv);
  4818. if (rc)
  4819. goto init_err;
  4820. break;
  4821. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  4822. rc = bnx2x_func_init_func(bp, drv);
  4823. if (rc)
  4824. goto init_err;
  4825. break;
  4826. default:
  4827. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  4828. rc = -EINVAL;
  4829. }
  4830. init_err:
  4831. drv->gunzip_end(bp);
  4832. /* In case of success, complete the command immediately: no ramrods
  4833. * have been sent.
  4834. */
  4835. if (!rc)
  4836. o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
  4837. return rc;
  4838. }
  4839. /**
  4840. * bnx2x_func_reset_func - reset HW at function stage
  4841. *
  4842. * @bp: device handle
  4843. * @drv:
  4844. *
  4845. * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
  4846. * FUNCTION-only HW blocks.
  4847. */
  4848. static inline void bnx2x_func_reset_func(struct bnx2x *bp,
  4849. const struct bnx2x_func_sp_drv_ops *drv)
  4850. {
  4851. drv->reset_hw_func(bp);
  4852. }
  4853. /**
  4854. * bnx2x_func_reset_port - reset HW at port stage
  4855. *
  4856. * @bp: device handle
  4857. * @drv:
  4858. *
  4859. * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
  4860. * FUNCTION-only and PORT-only HW blocks.
  4861. *
  4862. * !!!IMPORTANT!!!
  4863. *
  4864. * It's important to call reset_port before reset_func() as the last thing
  4865. * reset_func does is pf_disable() thus disabling PGLUE_B, which
  4866. * makes impossible any DMAE transactions.
  4867. */
  4868. static inline void bnx2x_func_reset_port(struct bnx2x *bp,
  4869. const struct bnx2x_func_sp_drv_ops *drv)
  4870. {
  4871. drv->reset_hw_port(bp);
  4872. bnx2x_func_reset_func(bp, drv);
  4873. }
  4874. /**
  4875. * bnx2x_func_reset_cmn - reset HW at common stage
  4876. *
  4877. * @bp: device handle
  4878. * @drv:
  4879. *
  4880. * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
  4881. * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
  4882. * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
  4883. */
  4884. static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
  4885. const struct bnx2x_func_sp_drv_ops *drv)
  4886. {
  4887. bnx2x_func_reset_port(bp, drv);
  4888. drv->reset_hw_cmn(bp);
  4889. }
  4890. static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
  4891. struct bnx2x_func_state_params *params)
  4892. {
  4893. u32 reset_phase = params->params.hw_reset.reset_phase;
  4894. struct bnx2x_func_sp_obj *o = params->f_obj;
  4895. const struct bnx2x_func_sp_drv_ops *drv = o->drv;
  4896. DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
  4897. reset_phase);
  4898. switch (reset_phase) {
  4899. case FW_MSG_CODE_DRV_UNLOAD_COMMON:
  4900. bnx2x_func_reset_cmn(bp, drv);
  4901. break;
  4902. case FW_MSG_CODE_DRV_UNLOAD_PORT:
  4903. bnx2x_func_reset_port(bp, drv);
  4904. break;
  4905. case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
  4906. bnx2x_func_reset_func(bp, drv);
  4907. break;
  4908. default:
  4909. BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
  4910. reset_phase);
  4911. break;
  4912. }
  4913. /* Complete the command immediately: no ramrods have been sent. */
  4914. o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
  4915. return 0;
  4916. }
  4917. static inline int bnx2x_func_send_start(struct bnx2x *bp,
  4918. struct bnx2x_func_state_params *params)
  4919. {
  4920. struct bnx2x_func_sp_obj *o = params->f_obj;
  4921. struct function_start_data *rdata =
  4922. (struct function_start_data *)o->rdata;
  4923. dma_addr_t data_mapping = o->rdata_mapping;
  4924. struct bnx2x_func_start_params *start_params = &params->params.start;
  4925. memset(rdata, 0, sizeof(*rdata));
  4926. /* Fill the ramrod data with provided parameters */
  4927. rdata->function_mode = (u8)start_params->mf_mode;
  4928. rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
  4929. rdata->path_id = BP_PATH(bp);
  4930. rdata->network_cos_mode = start_params->network_cos_mode;
  4931. rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
  4932. rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
  4933. /* No need for an explicit memory barrier here as long we would
  4934. * need to ensure the ordering of writing to the SPQ element
  4935. * and updating of the SPQ producer which involves a memory
  4936. * read and we will have to put a full memory barrier there
  4937. * (inside bnx2x_sp_post()).
  4938. */
  4939. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
  4940. U64_HI(data_mapping),
  4941. U64_LO(data_mapping), NONE_CONNECTION_TYPE);
  4942. }
  4943. static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
  4944. struct bnx2x_func_state_params *params)
  4945. {
  4946. struct bnx2x_func_sp_obj *o = params->f_obj;
  4947. struct function_update_data *rdata =
  4948. (struct function_update_data *)o->rdata;
  4949. dma_addr_t data_mapping = o->rdata_mapping;
  4950. struct bnx2x_func_switch_update_params *switch_update_params =
  4951. &params->params.switch_update;
  4952. memset(rdata, 0, sizeof(*rdata));
  4953. /* Fill the ramrod data with provided parameters */
  4954. rdata->tx_switch_suspend_change_flg = 1;
  4955. rdata->tx_switch_suspend = switch_update_params->suspend;
  4956. rdata->echo = SWITCH_UPDATE;
  4957. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
  4958. U64_HI(data_mapping),
  4959. U64_LO(data_mapping), NONE_CONNECTION_TYPE);
  4960. }
  4961. static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
  4962. struct bnx2x_func_state_params *params)
  4963. {
  4964. struct bnx2x_func_sp_obj *o = params->f_obj;
  4965. struct function_update_data *rdata =
  4966. (struct function_update_data *)o->afex_rdata;
  4967. dma_addr_t data_mapping = o->afex_rdata_mapping;
  4968. struct bnx2x_func_afex_update_params *afex_update_params =
  4969. &params->params.afex_update;
  4970. memset(rdata, 0, sizeof(*rdata));
  4971. /* Fill the ramrod data with provided parameters */
  4972. rdata->vif_id_change_flg = 1;
  4973. rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
  4974. rdata->afex_default_vlan_change_flg = 1;
  4975. rdata->afex_default_vlan =
  4976. cpu_to_le16(afex_update_params->afex_default_vlan);
  4977. rdata->allowed_priorities_change_flg = 1;
  4978. rdata->allowed_priorities = afex_update_params->allowed_priorities;
  4979. rdata->echo = AFEX_UPDATE;
  4980. /* No need for an explicit memory barrier here as long we would
  4981. * need to ensure the ordering of writing to the SPQ element
  4982. * and updating of the SPQ producer which involves a memory
  4983. * read and we will have to put a full memory barrier there
  4984. * (inside bnx2x_sp_post()).
  4985. */
  4986. DP(BNX2X_MSG_SP,
  4987. "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
  4988. rdata->vif_id,
  4989. rdata->afex_default_vlan, rdata->allowed_priorities);
  4990. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
  4991. U64_HI(data_mapping),
  4992. U64_LO(data_mapping), NONE_CONNECTION_TYPE);
  4993. }
  4994. static
  4995. inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
  4996. struct bnx2x_func_state_params *params)
  4997. {
  4998. struct bnx2x_func_sp_obj *o = params->f_obj;
  4999. struct afex_vif_list_ramrod_data *rdata =
  5000. (struct afex_vif_list_ramrod_data *)o->afex_rdata;
  5001. struct bnx2x_func_afex_viflists_params *afex_vif_params =
  5002. &params->params.afex_viflists;
  5003. u64 *p_rdata = (u64 *)rdata;
  5004. memset(rdata, 0, sizeof(*rdata));
  5005. /* Fill the ramrod data with provided parameters */
  5006. rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
  5007. rdata->func_bit_map = afex_vif_params->func_bit_map;
  5008. rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
  5009. rdata->func_to_clear = afex_vif_params->func_to_clear;
  5010. /* send in echo type of sub command */
  5011. rdata->echo = afex_vif_params->afex_vif_list_command;
  5012. /* No need for an explicit memory barrier here as long we would
  5013. * need to ensure the ordering of writing to the SPQ element
  5014. * and updating of the SPQ producer which involves a memory
  5015. * read and we will have to put a full memory barrier there
  5016. * (inside bnx2x_sp_post()).
  5017. */
  5018. DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
  5019. rdata->afex_vif_list_command, rdata->vif_list_index,
  5020. rdata->func_bit_map, rdata->func_to_clear);
  5021. /* this ramrod sends data directly and not through DMA mapping */
  5022. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
  5023. U64_HI(*p_rdata), U64_LO(*p_rdata),
  5024. NONE_CONNECTION_TYPE);
  5025. }
  5026. static inline int bnx2x_func_send_stop(struct bnx2x *bp,
  5027. struct bnx2x_func_state_params *params)
  5028. {
  5029. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
  5030. NONE_CONNECTION_TYPE);
  5031. }
  5032. static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
  5033. struct bnx2x_func_state_params *params)
  5034. {
  5035. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
  5036. NONE_CONNECTION_TYPE);
  5037. }
  5038. static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
  5039. struct bnx2x_func_state_params *params)
  5040. {
  5041. struct bnx2x_func_sp_obj *o = params->f_obj;
  5042. struct flow_control_configuration *rdata =
  5043. (struct flow_control_configuration *)o->rdata;
  5044. dma_addr_t data_mapping = o->rdata_mapping;
  5045. struct bnx2x_func_tx_start_params *tx_start_params =
  5046. &params->params.tx_start;
  5047. int i;
  5048. memset(rdata, 0, sizeof(*rdata));
  5049. rdata->dcb_enabled = tx_start_params->dcb_enabled;
  5050. rdata->dcb_version = tx_start_params->dcb_version;
  5051. rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
  5052. for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
  5053. rdata->traffic_type_to_priority_cos[i] =
  5054. tx_start_params->traffic_type_to_priority_cos[i];
  5055. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
  5056. U64_HI(data_mapping),
  5057. U64_LO(data_mapping), NONE_CONNECTION_TYPE);
  5058. }
  5059. static int bnx2x_func_send_cmd(struct bnx2x *bp,
  5060. struct bnx2x_func_state_params *params)
  5061. {
  5062. switch (params->cmd) {
  5063. case BNX2X_F_CMD_HW_INIT:
  5064. return bnx2x_func_hw_init(bp, params);
  5065. case BNX2X_F_CMD_START:
  5066. return bnx2x_func_send_start(bp, params);
  5067. case BNX2X_F_CMD_STOP:
  5068. return bnx2x_func_send_stop(bp, params);
  5069. case BNX2X_F_CMD_HW_RESET:
  5070. return bnx2x_func_hw_reset(bp, params);
  5071. case BNX2X_F_CMD_AFEX_UPDATE:
  5072. return bnx2x_func_send_afex_update(bp, params);
  5073. case BNX2X_F_CMD_AFEX_VIFLISTS:
  5074. return bnx2x_func_send_afex_viflists(bp, params);
  5075. case BNX2X_F_CMD_TX_STOP:
  5076. return bnx2x_func_send_tx_stop(bp, params);
  5077. case BNX2X_F_CMD_TX_START:
  5078. return bnx2x_func_send_tx_start(bp, params);
  5079. case BNX2X_F_CMD_SWITCH_UPDATE:
  5080. return bnx2x_func_send_switch_update(bp, params);
  5081. default:
  5082. BNX2X_ERR("Unknown command: %d\n", params->cmd);
  5083. return -EINVAL;
  5084. }
  5085. }
  5086. void bnx2x_init_func_obj(struct bnx2x *bp,
  5087. struct bnx2x_func_sp_obj *obj,
  5088. void *rdata, dma_addr_t rdata_mapping,
  5089. void *afex_rdata, dma_addr_t afex_rdata_mapping,
  5090. struct bnx2x_func_sp_drv_ops *drv_iface)
  5091. {
  5092. memset(obj, 0, sizeof(*obj));
  5093. mutex_init(&obj->one_pending_mutex);
  5094. obj->rdata = rdata;
  5095. obj->rdata_mapping = rdata_mapping;
  5096. obj->afex_rdata = afex_rdata;
  5097. obj->afex_rdata_mapping = afex_rdata_mapping;
  5098. obj->send_cmd = bnx2x_func_send_cmd;
  5099. obj->check_transition = bnx2x_func_chk_transition;
  5100. obj->complete_cmd = bnx2x_func_comp_cmd;
  5101. obj->wait_comp = bnx2x_func_wait_comp;
  5102. obj->drv = drv_iface;
  5103. }
  5104. /**
  5105. * bnx2x_func_state_change - perform Function state change transition
  5106. *
  5107. * @bp: device handle
  5108. * @params: parameters to perform the transaction
  5109. *
  5110. * returns 0 in case of successfully completed transition,
  5111. * negative error code in case of failure, positive
  5112. * (EBUSY) value if there is a completion to that is
  5113. * still pending (possible only if RAMROD_COMP_WAIT is
  5114. * not set in params->ramrod_flags for asynchronous
  5115. * commands).
  5116. */
  5117. int bnx2x_func_state_change(struct bnx2x *bp,
  5118. struct bnx2x_func_state_params *params)
  5119. {
  5120. struct bnx2x_func_sp_obj *o = params->f_obj;
  5121. int rc, cnt = 300;
  5122. enum bnx2x_func_cmd cmd = params->cmd;
  5123. unsigned long *pending = &o->pending;
  5124. mutex_lock(&o->one_pending_mutex);
  5125. /* Check that the requested transition is legal */
  5126. rc = o->check_transition(bp, o, params);
  5127. if ((rc == -EBUSY) &&
  5128. (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
  5129. while ((rc == -EBUSY) && (--cnt > 0)) {
  5130. mutex_unlock(&o->one_pending_mutex);
  5131. msleep(10);
  5132. mutex_lock(&o->one_pending_mutex);
  5133. rc = o->check_transition(bp, o, params);
  5134. }
  5135. if (rc == -EBUSY) {
  5136. mutex_unlock(&o->one_pending_mutex);
  5137. BNX2X_ERR("timeout waiting for previous ramrod completion\n");
  5138. return rc;
  5139. }
  5140. } else if (rc) {
  5141. mutex_unlock(&o->one_pending_mutex);
  5142. return rc;
  5143. }
  5144. /* Set "pending" bit */
  5145. set_bit(cmd, pending);
  5146. /* Don't send a command if only driver cleanup was requested */
  5147. if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
  5148. bnx2x_func_state_change_comp(bp, o, cmd);
  5149. mutex_unlock(&o->one_pending_mutex);
  5150. } else {
  5151. /* Send a ramrod */
  5152. rc = o->send_cmd(bp, params);
  5153. mutex_unlock(&o->one_pending_mutex);
  5154. if (rc) {
  5155. o->next_state = BNX2X_F_STATE_MAX;
  5156. clear_bit(cmd, pending);
  5157. smp_mb__after_clear_bit();
  5158. return rc;
  5159. }
  5160. if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
  5161. rc = o->wait_comp(bp, o, cmd);
  5162. if (rc)
  5163. return rc;
  5164. return 0;
  5165. }
  5166. }
  5167. return !!test_bit(cmd, pending);
  5168. }