bna_tx_rx.c 91 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bna.h"
  19. #include "bfi.h"
  20. /* IB */
  21. static void
  22. bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
  23. {
  24. ib->coalescing_timeo = coalescing_timeo;
  25. ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
  26. (u32)ib->coalescing_timeo, 0);
  27. }
  28. /* RXF */
  29. #define bna_rxf_vlan_cfg_soft_reset(rxf) \
  30. do { \
  31. (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
  32. (rxf)->vlan_strip_pending = true; \
  33. } while (0)
  34. #define bna_rxf_rss_cfg_soft_reset(rxf) \
  35. do { \
  36. if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
  37. (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
  38. BNA_RSS_F_CFG_PENDING | \
  39. BNA_RSS_F_STATUS_PENDING); \
  40. } while (0)
  41. static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
  42. static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
  43. static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
  44. static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
  45. static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
  46. static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
  47. static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
  48. static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
  49. enum bna_cleanup_type cleanup);
  50. static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
  51. enum bna_cleanup_type cleanup);
  52. static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
  53. enum bna_cleanup_type cleanup);
  54. bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
  55. enum bna_rxf_event);
  56. bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
  57. enum bna_rxf_event);
  58. bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
  59. enum bna_rxf_event);
  60. bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
  61. enum bna_rxf_event);
  62. bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
  63. enum bna_rxf_event);
  64. bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
  65. enum bna_rxf_event);
  66. static void
  67. bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
  68. {
  69. call_rxf_stop_cbfn(rxf);
  70. }
  71. static void
  72. bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
  73. {
  74. switch (event) {
  75. case RXF_E_START:
  76. if (rxf->flags & BNA_RXF_F_PAUSED) {
  77. bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
  78. call_rxf_start_cbfn(rxf);
  79. } else
  80. bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
  81. break;
  82. case RXF_E_STOP:
  83. call_rxf_stop_cbfn(rxf);
  84. break;
  85. case RXF_E_FAIL:
  86. /* No-op */
  87. break;
  88. case RXF_E_CONFIG:
  89. call_rxf_cam_fltr_cbfn(rxf);
  90. break;
  91. case RXF_E_PAUSE:
  92. rxf->flags |= BNA_RXF_F_PAUSED;
  93. call_rxf_pause_cbfn(rxf);
  94. break;
  95. case RXF_E_RESUME:
  96. rxf->flags &= ~BNA_RXF_F_PAUSED;
  97. call_rxf_resume_cbfn(rxf);
  98. break;
  99. default:
  100. bfa_sm_fault(event);
  101. }
  102. }
  103. static void
  104. bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
  105. {
  106. call_rxf_pause_cbfn(rxf);
  107. }
  108. static void
  109. bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
  110. {
  111. switch (event) {
  112. case RXF_E_STOP:
  113. case RXF_E_FAIL:
  114. bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
  115. break;
  116. case RXF_E_CONFIG:
  117. call_rxf_cam_fltr_cbfn(rxf);
  118. break;
  119. case RXF_E_RESUME:
  120. rxf->flags &= ~BNA_RXF_F_PAUSED;
  121. bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
  122. break;
  123. default:
  124. bfa_sm_fault(event);
  125. }
  126. }
  127. static void
  128. bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
  129. {
  130. if (!bna_rxf_cfg_apply(rxf)) {
  131. /* No more pending config updates */
  132. bfa_fsm_set_state(rxf, bna_rxf_sm_started);
  133. }
  134. }
  135. static void
  136. bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
  137. {
  138. switch (event) {
  139. case RXF_E_STOP:
  140. bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
  141. break;
  142. case RXF_E_FAIL:
  143. bna_rxf_cfg_reset(rxf);
  144. call_rxf_start_cbfn(rxf);
  145. call_rxf_cam_fltr_cbfn(rxf);
  146. call_rxf_resume_cbfn(rxf);
  147. bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
  148. break;
  149. case RXF_E_CONFIG:
  150. /* No-op */
  151. break;
  152. case RXF_E_PAUSE:
  153. rxf->flags |= BNA_RXF_F_PAUSED;
  154. call_rxf_start_cbfn(rxf);
  155. bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
  156. break;
  157. case RXF_E_FW_RESP:
  158. if (!bna_rxf_cfg_apply(rxf)) {
  159. /* No more pending config updates */
  160. bfa_fsm_set_state(rxf, bna_rxf_sm_started);
  161. }
  162. break;
  163. default:
  164. bfa_sm_fault(event);
  165. }
  166. }
  167. static void
  168. bna_rxf_sm_started_entry(struct bna_rxf *rxf)
  169. {
  170. call_rxf_start_cbfn(rxf);
  171. call_rxf_cam_fltr_cbfn(rxf);
  172. call_rxf_resume_cbfn(rxf);
  173. }
  174. static void
  175. bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
  176. {
  177. switch (event) {
  178. case RXF_E_STOP:
  179. case RXF_E_FAIL:
  180. bna_rxf_cfg_reset(rxf);
  181. bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
  182. break;
  183. case RXF_E_CONFIG:
  184. bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
  185. break;
  186. case RXF_E_PAUSE:
  187. rxf->flags |= BNA_RXF_F_PAUSED;
  188. if (!bna_rxf_fltr_clear(rxf))
  189. bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
  190. else
  191. bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
  192. break;
  193. default:
  194. bfa_sm_fault(event);
  195. }
  196. }
  197. static void
  198. bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
  199. {
  200. }
  201. static void
  202. bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
  203. {
  204. switch (event) {
  205. case RXF_E_FAIL:
  206. bna_rxf_cfg_reset(rxf);
  207. call_rxf_pause_cbfn(rxf);
  208. bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
  209. break;
  210. case RXF_E_FW_RESP:
  211. if (!bna_rxf_fltr_clear(rxf)) {
  212. /* No more pending CAM entries to clear */
  213. bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
  214. }
  215. break;
  216. default:
  217. bfa_sm_fault(event);
  218. }
  219. }
  220. static void
  221. bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
  222. {
  223. }
  224. static void
  225. bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
  226. {
  227. switch (event) {
  228. case RXF_E_FAIL:
  229. case RXF_E_FW_RESP:
  230. bna_rxf_cfg_reset(rxf);
  231. bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
  232. break;
  233. default:
  234. bfa_sm_fault(event);
  235. }
  236. }
  237. static void
  238. bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
  239. enum bfi_enet_h2i_msgs req_type)
  240. {
  241. struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
  242. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
  243. req->mh.num_entries = htons(
  244. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
  245. memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
  246. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  247. sizeof(struct bfi_enet_ucast_req), &req->mh);
  248. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  249. }
  250. static void
  251. bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
  252. {
  253. struct bfi_enet_mcast_add_req *req =
  254. &rxf->bfi_enet_cmd.mcast_add_req;
  255. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
  256. 0, rxf->rx->rid);
  257. req->mh.num_entries = htons(
  258. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
  259. memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
  260. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  261. sizeof(struct bfi_enet_mcast_add_req), &req->mh);
  262. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  263. }
  264. static void
  265. bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
  266. {
  267. struct bfi_enet_mcast_del_req *req =
  268. &rxf->bfi_enet_cmd.mcast_del_req;
  269. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
  270. 0, rxf->rx->rid);
  271. req->mh.num_entries = htons(
  272. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
  273. req->handle = htons(handle);
  274. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  275. sizeof(struct bfi_enet_mcast_del_req), &req->mh);
  276. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  277. }
  278. static void
  279. bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
  280. {
  281. struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
  282. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  283. BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
  284. req->mh.num_entries = htons(
  285. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  286. req->enable = status;
  287. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  288. sizeof(struct bfi_enet_enable_req), &req->mh);
  289. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  290. }
  291. static void
  292. bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
  293. {
  294. struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
  295. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  296. BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
  297. req->mh.num_entries = htons(
  298. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  299. req->enable = status;
  300. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  301. sizeof(struct bfi_enet_enable_req), &req->mh);
  302. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  303. }
  304. static void
  305. bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
  306. {
  307. struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
  308. int i;
  309. int j;
  310. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  311. BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
  312. req->mh.num_entries = htons(
  313. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
  314. req->block_idx = block_idx;
  315. for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
  316. j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
  317. if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
  318. req->bit_mask[i] =
  319. htonl(rxf->vlan_filter_table[j]);
  320. else
  321. req->bit_mask[i] = 0xFFFFFFFF;
  322. }
  323. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  324. sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
  325. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  326. }
  327. static void
  328. bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
  329. {
  330. struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
  331. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  332. BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
  333. req->mh.num_entries = htons(
  334. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  335. req->enable = rxf->vlan_strip_status;
  336. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  337. sizeof(struct bfi_enet_enable_req), &req->mh);
  338. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  339. }
  340. static void
  341. bna_bfi_rit_cfg(struct bna_rxf *rxf)
  342. {
  343. struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
  344. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  345. BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
  346. req->mh.num_entries = htons(
  347. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
  348. req->size = htons(rxf->rit_size);
  349. memcpy(&req->table[0], rxf->rit, rxf->rit_size);
  350. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  351. sizeof(struct bfi_enet_rit_req), &req->mh);
  352. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  353. }
  354. static void
  355. bna_bfi_rss_cfg(struct bna_rxf *rxf)
  356. {
  357. struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
  358. int i;
  359. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  360. BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
  361. req->mh.num_entries = htons(
  362. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
  363. req->cfg.type = rxf->rss_cfg.hash_type;
  364. req->cfg.mask = rxf->rss_cfg.hash_mask;
  365. for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
  366. req->cfg.key[i] =
  367. htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
  368. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  369. sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
  370. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  371. }
  372. static void
  373. bna_bfi_rss_enable(struct bna_rxf *rxf)
  374. {
  375. struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
  376. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  377. BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
  378. req->mh.num_entries = htons(
  379. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  380. req->enable = rxf->rss_status;
  381. bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
  382. sizeof(struct bfi_enet_enable_req), &req->mh);
  383. bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
  384. }
  385. /* This function gets the multicast MAC that has already been added to CAM */
  386. static struct bna_mac *
  387. bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
  388. {
  389. struct bna_mac *mac;
  390. struct list_head *qe;
  391. list_for_each(qe, &rxf->mcast_active_q) {
  392. mac = (struct bna_mac *)qe;
  393. if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
  394. return mac;
  395. }
  396. list_for_each(qe, &rxf->mcast_pending_del_q) {
  397. mac = (struct bna_mac *)qe;
  398. if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
  399. return mac;
  400. }
  401. return NULL;
  402. }
  403. static struct bna_mcam_handle *
  404. bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
  405. {
  406. struct bna_mcam_handle *mchandle;
  407. struct list_head *qe;
  408. list_for_each(qe, &rxf->mcast_handle_q) {
  409. mchandle = (struct bna_mcam_handle *)qe;
  410. if (mchandle->handle == handle)
  411. return mchandle;
  412. }
  413. return NULL;
  414. }
  415. static void
  416. bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
  417. {
  418. struct bna_mac *mcmac;
  419. struct bna_mcam_handle *mchandle;
  420. mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
  421. mchandle = bna_rxf_mchandle_get(rxf, handle);
  422. if (mchandle == NULL) {
  423. mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
  424. mchandle->handle = handle;
  425. mchandle->refcnt = 0;
  426. list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
  427. }
  428. mchandle->refcnt++;
  429. mcmac->handle = mchandle;
  430. }
  431. static int
  432. bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
  433. enum bna_cleanup_type cleanup)
  434. {
  435. struct bna_mcam_handle *mchandle;
  436. int ret = 0;
  437. mchandle = mac->handle;
  438. if (mchandle == NULL)
  439. return ret;
  440. mchandle->refcnt--;
  441. if (mchandle->refcnt == 0) {
  442. if (cleanup == BNA_HARD_CLEANUP) {
  443. bna_bfi_mcast_del_req(rxf, mchandle->handle);
  444. ret = 1;
  445. }
  446. list_del(&mchandle->qe);
  447. bfa_q_qe_init(&mchandle->qe);
  448. bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
  449. }
  450. mac->handle = NULL;
  451. return ret;
  452. }
  453. static int
  454. bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
  455. {
  456. struct bna_mac *mac = NULL;
  457. struct list_head *qe;
  458. int ret;
  459. /* Delete multicast entries previousely added */
  460. while (!list_empty(&rxf->mcast_pending_del_q)) {
  461. bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
  462. bfa_q_qe_init(qe);
  463. mac = (struct bna_mac *)qe;
  464. ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
  465. bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
  466. if (ret)
  467. return ret;
  468. }
  469. /* Add multicast entries */
  470. if (!list_empty(&rxf->mcast_pending_add_q)) {
  471. bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
  472. bfa_q_qe_init(qe);
  473. mac = (struct bna_mac *)qe;
  474. list_add_tail(&mac->qe, &rxf->mcast_active_q);
  475. bna_bfi_mcast_add_req(rxf, mac);
  476. return 1;
  477. }
  478. return 0;
  479. }
  480. static int
  481. bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
  482. {
  483. u8 vlan_pending_bitmask;
  484. int block_idx = 0;
  485. if (rxf->vlan_pending_bitmask) {
  486. vlan_pending_bitmask = rxf->vlan_pending_bitmask;
  487. while (!(vlan_pending_bitmask & 0x1)) {
  488. block_idx++;
  489. vlan_pending_bitmask >>= 1;
  490. }
  491. rxf->vlan_pending_bitmask &= ~(1 << block_idx);
  492. bna_bfi_rx_vlan_filter_set(rxf, block_idx);
  493. return 1;
  494. }
  495. return 0;
  496. }
  497. static int
  498. bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
  499. {
  500. struct list_head *qe;
  501. struct bna_mac *mac;
  502. int ret;
  503. /* Throw away delete pending mcast entries */
  504. while (!list_empty(&rxf->mcast_pending_del_q)) {
  505. bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
  506. bfa_q_qe_init(qe);
  507. mac = (struct bna_mac *)qe;
  508. ret = bna_rxf_mcast_del(rxf, mac, cleanup);
  509. bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
  510. if (ret)
  511. return ret;
  512. }
  513. /* Move active mcast entries to pending_add_q */
  514. while (!list_empty(&rxf->mcast_active_q)) {
  515. bfa_q_deq(&rxf->mcast_active_q, &qe);
  516. bfa_q_qe_init(qe);
  517. list_add_tail(qe, &rxf->mcast_pending_add_q);
  518. mac = (struct bna_mac *)qe;
  519. if (bna_rxf_mcast_del(rxf, mac, cleanup))
  520. return 1;
  521. }
  522. return 0;
  523. }
  524. static int
  525. bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
  526. {
  527. if (rxf->rss_pending) {
  528. if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
  529. rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
  530. bna_bfi_rit_cfg(rxf);
  531. return 1;
  532. }
  533. if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
  534. rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
  535. bna_bfi_rss_cfg(rxf);
  536. return 1;
  537. }
  538. if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
  539. rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
  540. bna_bfi_rss_enable(rxf);
  541. return 1;
  542. }
  543. }
  544. return 0;
  545. }
  546. static int
  547. bna_rxf_cfg_apply(struct bna_rxf *rxf)
  548. {
  549. if (bna_rxf_ucast_cfg_apply(rxf))
  550. return 1;
  551. if (bna_rxf_mcast_cfg_apply(rxf))
  552. return 1;
  553. if (bna_rxf_promisc_cfg_apply(rxf))
  554. return 1;
  555. if (bna_rxf_allmulti_cfg_apply(rxf))
  556. return 1;
  557. if (bna_rxf_vlan_cfg_apply(rxf))
  558. return 1;
  559. if (bna_rxf_vlan_strip_cfg_apply(rxf))
  560. return 1;
  561. if (bna_rxf_rss_cfg_apply(rxf))
  562. return 1;
  563. return 0;
  564. }
  565. /* Only software reset */
  566. static int
  567. bna_rxf_fltr_clear(struct bna_rxf *rxf)
  568. {
  569. if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
  570. return 1;
  571. if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
  572. return 1;
  573. if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
  574. return 1;
  575. if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
  576. return 1;
  577. return 0;
  578. }
  579. static void
  580. bna_rxf_cfg_reset(struct bna_rxf *rxf)
  581. {
  582. bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
  583. bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
  584. bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
  585. bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
  586. bna_rxf_vlan_cfg_soft_reset(rxf);
  587. bna_rxf_rss_cfg_soft_reset(rxf);
  588. }
  589. static void
  590. bna_rit_init(struct bna_rxf *rxf, int rit_size)
  591. {
  592. struct bna_rx *rx = rxf->rx;
  593. struct bna_rxp *rxp;
  594. struct list_head *qe;
  595. int offset = 0;
  596. rxf->rit_size = rit_size;
  597. list_for_each(qe, &rx->rxp_q) {
  598. rxp = (struct bna_rxp *)qe;
  599. rxf->rit[offset] = rxp->cq.ccb->id;
  600. offset++;
  601. }
  602. }
  603. void
  604. bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
  605. {
  606. bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
  607. }
  608. void
  609. bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
  610. struct bfi_msgq_mhdr *msghdr)
  611. {
  612. struct bfi_enet_mcast_add_req *req =
  613. &rxf->bfi_enet_cmd.mcast_add_req;
  614. struct bfi_enet_mcast_add_rsp *rsp =
  615. (struct bfi_enet_mcast_add_rsp *)msghdr;
  616. bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
  617. ntohs(rsp->handle));
  618. bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
  619. }
  620. static void
  621. bna_rxf_init(struct bna_rxf *rxf,
  622. struct bna_rx *rx,
  623. struct bna_rx_config *q_config,
  624. struct bna_res_info *res_info)
  625. {
  626. rxf->rx = rx;
  627. INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
  628. INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
  629. rxf->ucast_pending_set = 0;
  630. rxf->ucast_active_set = 0;
  631. INIT_LIST_HEAD(&rxf->ucast_active_q);
  632. rxf->ucast_pending_mac = NULL;
  633. INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
  634. INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
  635. INIT_LIST_HEAD(&rxf->mcast_active_q);
  636. INIT_LIST_HEAD(&rxf->mcast_handle_q);
  637. if (q_config->paused)
  638. rxf->flags |= BNA_RXF_F_PAUSED;
  639. rxf->rit = (u8 *)
  640. res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
  641. bna_rit_init(rxf, q_config->num_paths);
  642. rxf->rss_status = q_config->rss_status;
  643. if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
  644. rxf->rss_cfg = q_config->rss_config;
  645. rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
  646. rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
  647. rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
  648. }
  649. rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
  650. memset(rxf->vlan_filter_table, 0,
  651. (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
  652. rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
  653. rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
  654. rxf->vlan_strip_status = q_config->vlan_strip_status;
  655. bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
  656. }
  657. static void
  658. bna_rxf_uninit(struct bna_rxf *rxf)
  659. {
  660. struct bna_mac *mac;
  661. rxf->ucast_pending_set = 0;
  662. rxf->ucast_active_set = 0;
  663. while (!list_empty(&rxf->ucast_pending_add_q)) {
  664. bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
  665. bfa_q_qe_init(&mac->qe);
  666. bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
  667. }
  668. if (rxf->ucast_pending_mac) {
  669. bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
  670. bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
  671. rxf->ucast_pending_mac);
  672. rxf->ucast_pending_mac = NULL;
  673. }
  674. while (!list_empty(&rxf->mcast_pending_add_q)) {
  675. bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
  676. bfa_q_qe_init(&mac->qe);
  677. bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
  678. }
  679. rxf->rxmode_pending = 0;
  680. rxf->rxmode_pending_bitmask = 0;
  681. if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
  682. rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
  683. if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
  684. rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
  685. rxf->rss_pending = 0;
  686. rxf->vlan_strip_pending = false;
  687. rxf->flags = 0;
  688. rxf->rx = NULL;
  689. }
  690. static void
  691. bna_rx_cb_rxf_started(struct bna_rx *rx)
  692. {
  693. bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
  694. }
  695. static void
  696. bna_rxf_start(struct bna_rxf *rxf)
  697. {
  698. rxf->start_cbfn = bna_rx_cb_rxf_started;
  699. rxf->start_cbarg = rxf->rx;
  700. bfa_fsm_send_event(rxf, RXF_E_START);
  701. }
  702. static void
  703. bna_rx_cb_rxf_stopped(struct bna_rx *rx)
  704. {
  705. bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
  706. }
  707. static void
  708. bna_rxf_stop(struct bna_rxf *rxf)
  709. {
  710. rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
  711. rxf->stop_cbarg = rxf->rx;
  712. bfa_fsm_send_event(rxf, RXF_E_STOP);
  713. }
  714. static void
  715. bna_rxf_fail(struct bna_rxf *rxf)
  716. {
  717. bfa_fsm_send_event(rxf, RXF_E_FAIL);
  718. }
  719. enum bna_cb_status
  720. bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
  721. void (*cbfn)(struct bnad *, struct bna_rx *))
  722. {
  723. struct bna_rxf *rxf = &rx->rxf;
  724. if (rxf->ucast_pending_mac == NULL) {
  725. rxf->ucast_pending_mac =
  726. bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
  727. if (rxf->ucast_pending_mac == NULL)
  728. return BNA_CB_UCAST_CAM_FULL;
  729. bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
  730. }
  731. memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
  732. rxf->ucast_pending_set = 1;
  733. rxf->cam_fltr_cbfn = cbfn;
  734. rxf->cam_fltr_cbarg = rx->bna->bnad;
  735. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  736. return BNA_CB_SUCCESS;
  737. }
  738. enum bna_cb_status
  739. bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
  740. void (*cbfn)(struct bnad *, struct bna_rx *))
  741. {
  742. struct bna_rxf *rxf = &rx->rxf;
  743. struct bna_mac *mac;
  744. /* Check if already added or pending addition */
  745. if (bna_mac_find(&rxf->mcast_active_q, addr) ||
  746. bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
  747. if (cbfn)
  748. cbfn(rx->bna->bnad, rx);
  749. return BNA_CB_SUCCESS;
  750. }
  751. mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
  752. if (mac == NULL)
  753. return BNA_CB_MCAST_LIST_FULL;
  754. bfa_q_qe_init(&mac->qe);
  755. memcpy(mac->addr, addr, ETH_ALEN);
  756. list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
  757. rxf->cam_fltr_cbfn = cbfn;
  758. rxf->cam_fltr_cbarg = rx->bna->bnad;
  759. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  760. return BNA_CB_SUCCESS;
  761. }
  762. enum bna_cb_status
  763. bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
  764. void (*cbfn)(struct bnad *, struct bna_rx *))
  765. {
  766. struct bna_rxf *rxf = &rx->rxf;
  767. struct list_head list_head;
  768. struct list_head *qe;
  769. u8 *mcaddr;
  770. struct bna_mac *mac;
  771. int i;
  772. /* Allocate nodes */
  773. INIT_LIST_HEAD(&list_head);
  774. for (i = 0, mcaddr = mclist; i < count; i++) {
  775. mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
  776. if (mac == NULL)
  777. goto err_return;
  778. bfa_q_qe_init(&mac->qe);
  779. memcpy(mac->addr, mcaddr, ETH_ALEN);
  780. list_add_tail(&mac->qe, &list_head);
  781. mcaddr += ETH_ALEN;
  782. }
  783. /* Purge the pending_add_q */
  784. while (!list_empty(&rxf->mcast_pending_add_q)) {
  785. bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
  786. bfa_q_qe_init(qe);
  787. mac = (struct bna_mac *)qe;
  788. bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
  789. }
  790. /* Schedule active_q entries for deletion */
  791. while (!list_empty(&rxf->mcast_active_q)) {
  792. bfa_q_deq(&rxf->mcast_active_q, &qe);
  793. mac = (struct bna_mac *)qe;
  794. bfa_q_qe_init(&mac->qe);
  795. list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
  796. }
  797. /* Add the new entries */
  798. while (!list_empty(&list_head)) {
  799. bfa_q_deq(&list_head, &qe);
  800. mac = (struct bna_mac *)qe;
  801. bfa_q_qe_init(&mac->qe);
  802. list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
  803. }
  804. rxf->cam_fltr_cbfn = cbfn;
  805. rxf->cam_fltr_cbarg = rx->bna->bnad;
  806. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  807. return BNA_CB_SUCCESS;
  808. err_return:
  809. while (!list_empty(&list_head)) {
  810. bfa_q_deq(&list_head, &qe);
  811. mac = (struct bna_mac *)qe;
  812. bfa_q_qe_init(&mac->qe);
  813. bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
  814. }
  815. return BNA_CB_MCAST_LIST_FULL;
  816. }
  817. void
  818. bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
  819. {
  820. struct bna_rxf *rxf = &rx->rxf;
  821. int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
  822. int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
  823. int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
  824. rxf->vlan_filter_table[index] |= bit;
  825. if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
  826. rxf->vlan_pending_bitmask |= (1 << group_id);
  827. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  828. }
  829. }
  830. void
  831. bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
  832. {
  833. struct bna_rxf *rxf = &rx->rxf;
  834. int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
  835. int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
  836. int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
  837. rxf->vlan_filter_table[index] &= ~bit;
  838. if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
  839. rxf->vlan_pending_bitmask |= (1 << group_id);
  840. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  841. }
  842. }
  843. static int
  844. bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
  845. {
  846. struct bna_mac *mac = NULL;
  847. struct list_head *qe;
  848. /* Delete MAC addresses previousely added */
  849. if (!list_empty(&rxf->ucast_pending_del_q)) {
  850. bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
  851. bfa_q_qe_init(qe);
  852. mac = (struct bna_mac *)qe;
  853. bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
  854. bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
  855. return 1;
  856. }
  857. /* Set default unicast MAC */
  858. if (rxf->ucast_pending_set) {
  859. rxf->ucast_pending_set = 0;
  860. memcpy(rxf->ucast_active_mac.addr,
  861. rxf->ucast_pending_mac->addr, ETH_ALEN);
  862. rxf->ucast_active_set = 1;
  863. bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
  864. BFI_ENET_H2I_MAC_UCAST_SET_REQ);
  865. return 1;
  866. }
  867. /* Add additional MAC entries */
  868. if (!list_empty(&rxf->ucast_pending_add_q)) {
  869. bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
  870. bfa_q_qe_init(qe);
  871. mac = (struct bna_mac *)qe;
  872. list_add_tail(&mac->qe, &rxf->ucast_active_q);
  873. bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
  874. return 1;
  875. }
  876. return 0;
  877. }
  878. static int
  879. bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
  880. {
  881. struct list_head *qe;
  882. struct bna_mac *mac;
  883. /* Throw away delete pending ucast entries */
  884. while (!list_empty(&rxf->ucast_pending_del_q)) {
  885. bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
  886. bfa_q_qe_init(qe);
  887. mac = (struct bna_mac *)qe;
  888. if (cleanup == BNA_SOFT_CLEANUP)
  889. bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
  890. else {
  891. bna_bfi_ucast_req(rxf, mac,
  892. BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
  893. bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
  894. return 1;
  895. }
  896. }
  897. /* Move active ucast entries to pending_add_q */
  898. while (!list_empty(&rxf->ucast_active_q)) {
  899. bfa_q_deq(&rxf->ucast_active_q, &qe);
  900. bfa_q_qe_init(qe);
  901. list_add_tail(qe, &rxf->ucast_pending_add_q);
  902. if (cleanup == BNA_HARD_CLEANUP) {
  903. mac = (struct bna_mac *)qe;
  904. bna_bfi_ucast_req(rxf, mac,
  905. BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
  906. return 1;
  907. }
  908. }
  909. if (rxf->ucast_active_set) {
  910. rxf->ucast_pending_set = 1;
  911. rxf->ucast_active_set = 0;
  912. if (cleanup == BNA_HARD_CLEANUP) {
  913. bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
  914. BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
  915. return 1;
  916. }
  917. }
  918. return 0;
  919. }
  920. static int
  921. bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
  922. {
  923. struct bna *bna = rxf->rx->bna;
  924. /* Enable/disable promiscuous mode */
  925. if (is_promisc_enable(rxf->rxmode_pending,
  926. rxf->rxmode_pending_bitmask)) {
  927. /* move promisc configuration from pending -> active */
  928. promisc_inactive(rxf->rxmode_pending,
  929. rxf->rxmode_pending_bitmask);
  930. rxf->rxmode_active |= BNA_RXMODE_PROMISC;
  931. bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
  932. return 1;
  933. } else if (is_promisc_disable(rxf->rxmode_pending,
  934. rxf->rxmode_pending_bitmask)) {
  935. /* move promisc configuration from pending -> active */
  936. promisc_inactive(rxf->rxmode_pending,
  937. rxf->rxmode_pending_bitmask);
  938. rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
  939. bna->promisc_rid = BFI_INVALID_RID;
  940. bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
  941. return 1;
  942. }
  943. return 0;
  944. }
  945. static int
  946. bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
  947. {
  948. struct bna *bna = rxf->rx->bna;
  949. /* Clear pending promisc mode disable */
  950. if (is_promisc_disable(rxf->rxmode_pending,
  951. rxf->rxmode_pending_bitmask)) {
  952. promisc_inactive(rxf->rxmode_pending,
  953. rxf->rxmode_pending_bitmask);
  954. rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
  955. bna->promisc_rid = BFI_INVALID_RID;
  956. if (cleanup == BNA_HARD_CLEANUP) {
  957. bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
  958. return 1;
  959. }
  960. }
  961. /* Move promisc mode config from active -> pending */
  962. if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
  963. promisc_enable(rxf->rxmode_pending,
  964. rxf->rxmode_pending_bitmask);
  965. rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
  966. if (cleanup == BNA_HARD_CLEANUP) {
  967. bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
  968. return 1;
  969. }
  970. }
  971. return 0;
  972. }
  973. static int
  974. bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
  975. {
  976. /* Enable/disable allmulti mode */
  977. if (is_allmulti_enable(rxf->rxmode_pending,
  978. rxf->rxmode_pending_bitmask)) {
  979. /* move allmulti configuration from pending -> active */
  980. allmulti_inactive(rxf->rxmode_pending,
  981. rxf->rxmode_pending_bitmask);
  982. rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
  983. bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
  984. return 1;
  985. } else if (is_allmulti_disable(rxf->rxmode_pending,
  986. rxf->rxmode_pending_bitmask)) {
  987. /* move allmulti configuration from pending -> active */
  988. allmulti_inactive(rxf->rxmode_pending,
  989. rxf->rxmode_pending_bitmask);
  990. rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
  991. bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
  992. return 1;
  993. }
  994. return 0;
  995. }
  996. static int
  997. bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
  998. {
  999. /* Clear pending allmulti mode disable */
  1000. if (is_allmulti_disable(rxf->rxmode_pending,
  1001. rxf->rxmode_pending_bitmask)) {
  1002. allmulti_inactive(rxf->rxmode_pending,
  1003. rxf->rxmode_pending_bitmask);
  1004. rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
  1005. if (cleanup == BNA_HARD_CLEANUP) {
  1006. bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
  1007. return 1;
  1008. }
  1009. }
  1010. /* Move allmulti mode config from active -> pending */
  1011. if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
  1012. allmulti_enable(rxf->rxmode_pending,
  1013. rxf->rxmode_pending_bitmask);
  1014. rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
  1015. if (cleanup == BNA_HARD_CLEANUP) {
  1016. bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
  1017. return 1;
  1018. }
  1019. }
  1020. return 0;
  1021. }
  1022. static int
  1023. bna_rxf_promisc_enable(struct bna_rxf *rxf)
  1024. {
  1025. struct bna *bna = rxf->rx->bna;
  1026. int ret = 0;
  1027. if (is_promisc_enable(rxf->rxmode_pending,
  1028. rxf->rxmode_pending_bitmask) ||
  1029. (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
  1030. /* Do nothing if pending enable or already enabled */
  1031. } else if (is_promisc_disable(rxf->rxmode_pending,
  1032. rxf->rxmode_pending_bitmask)) {
  1033. /* Turn off pending disable command */
  1034. promisc_inactive(rxf->rxmode_pending,
  1035. rxf->rxmode_pending_bitmask);
  1036. } else {
  1037. /* Schedule enable */
  1038. promisc_enable(rxf->rxmode_pending,
  1039. rxf->rxmode_pending_bitmask);
  1040. bna->promisc_rid = rxf->rx->rid;
  1041. ret = 1;
  1042. }
  1043. return ret;
  1044. }
  1045. static int
  1046. bna_rxf_promisc_disable(struct bna_rxf *rxf)
  1047. {
  1048. struct bna *bna = rxf->rx->bna;
  1049. int ret = 0;
  1050. if (is_promisc_disable(rxf->rxmode_pending,
  1051. rxf->rxmode_pending_bitmask) ||
  1052. (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
  1053. /* Do nothing if pending disable or already disabled */
  1054. } else if (is_promisc_enable(rxf->rxmode_pending,
  1055. rxf->rxmode_pending_bitmask)) {
  1056. /* Turn off pending enable command */
  1057. promisc_inactive(rxf->rxmode_pending,
  1058. rxf->rxmode_pending_bitmask);
  1059. bna->promisc_rid = BFI_INVALID_RID;
  1060. } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
  1061. /* Schedule disable */
  1062. promisc_disable(rxf->rxmode_pending,
  1063. rxf->rxmode_pending_bitmask);
  1064. ret = 1;
  1065. }
  1066. return ret;
  1067. }
  1068. static int
  1069. bna_rxf_allmulti_enable(struct bna_rxf *rxf)
  1070. {
  1071. int ret = 0;
  1072. if (is_allmulti_enable(rxf->rxmode_pending,
  1073. rxf->rxmode_pending_bitmask) ||
  1074. (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
  1075. /* Do nothing if pending enable or already enabled */
  1076. } else if (is_allmulti_disable(rxf->rxmode_pending,
  1077. rxf->rxmode_pending_bitmask)) {
  1078. /* Turn off pending disable command */
  1079. allmulti_inactive(rxf->rxmode_pending,
  1080. rxf->rxmode_pending_bitmask);
  1081. } else {
  1082. /* Schedule enable */
  1083. allmulti_enable(rxf->rxmode_pending,
  1084. rxf->rxmode_pending_bitmask);
  1085. ret = 1;
  1086. }
  1087. return ret;
  1088. }
  1089. static int
  1090. bna_rxf_allmulti_disable(struct bna_rxf *rxf)
  1091. {
  1092. int ret = 0;
  1093. if (is_allmulti_disable(rxf->rxmode_pending,
  1094. rxf->rxmode_pending_bitmask) ||
  1095. (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
  1096. /* Do nothing if pending disable or already disabled */
  1097. } else if (is_allmulti_enable(rxf->rxmode_pending,
  1098. rxf->rxmode_pending_bitmask)) {
  1099. /* Turn off pending enable command */
  1100. allmulti_inactive(rxf->rxmode_pending,
  1101. rxf->rxmode_pending_bitmask);
  1102. } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
  1103. /* Schedule disable */
  1104. allmulti_disable(rxf->rxmode_pending,
  1105. rxf->rxmode_pending_bitmask);
  1106. ret = 1;
  1107. }
  1108. return ret;
  1109. }
  1110. static int
  1111. bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
  1112. {
  1113. if (rxf->vlan_strip_pending) {
  1114. rxf->vlan_strip_pending = false;
  1115. bna_bfi_vlan_strip_enable(rxf);
  1116. return 1;
  1117. }
  1118. return 0;
  1119. }
  1120. /* RX */
  1121. #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
  1122. (qcfg)->num_paths : ((qcfg)->num_paths * 2))
  1123. #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
  1124. (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
  1125. #define call_rx_stop_cbfn(rx) \
  1126. do { \
  1127. if ((rx)->stop_cbfn) { \
  1128. void (*cbfn)(void *, struct bna_rx *); \
  1129. void *cbarg; \
  1130. cbfn = (rx)->stop_cbfn; \
  1131. cbarg = (rx)->stop_cbarg; \
  1132. (rx)->stop_cbfn = NULL; \
  1133. (rx)->stop_cbarg = NULL; \
  1134. cbfn(cbarg, rx); \
  1135. } \
  1136. } while (0)
  1137. #define call_rx_stall_cbfn(rx) \
  1138. do { \
  1139. if ((rx)->rx_stall_cbfn) \
  1140. (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
  1141. } while (0)
  1142. #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
  1143. do { \
  1144. struct bna_dma_addr cur_q_addr = \
  1145. *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
  1146. (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
  1147. (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
  1148. (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
  1149. (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
  1150. (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
  1151. (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
  1152. } while (0)
  1153. static void bna_bfi_rx_enet_start(struct bna_rx *rx);
  1154. static void bna_rx_enet_stop(struct bna_rx *rx);
  1155. static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
  1156. bfa_fsm_state_decl(bna_rx, stopped,
  1157. struct bna_rx, enum bna_rx_event);
  1158. bfa_fsm_state_decl(bna_rx, start_wait,
  1159. struct bna_rx, enum bna_rx_event);
  1160. bfa_fsm_state_decl(bna_rx, rxf_start_wait,
  1161. struct bna_rx, enum bna_rx_event);
  1162. bfa_fsm_state_decl(bna_rx, started,
  1163. struct bna_rx, enum bna_rx_event);
  1164. bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
  1165. struct bna_rx, enum bna_rx_event);
  1166. bfa_fsm_state_decl(bna_rx, stop_wait,
  1167. struct bna_rx, enum bna_rx_event);
  1168. bfa_fsm_state_decl(bna_rx, cleanup_wait,
  1169. struct bna_rx, enum bna_rx_event);
  1170. bfa_fsm_state_decl(bna_rx, failed,
  1171. struct bna_rx, enum bna_rx_event);
  1172. bfa_fsm_state_decl(bna_rx, quiesce_wait,
  1173. struct bna_rx, enum bna_rx_event);
  1174. static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
  1175. {
  1176. call_rx_stop_cbfn(rx);
  1177. }
  1178. static void bna_rx_sm_stopped(struct bna_rx *rx,
  1179. enum bna_rx_event event)
  1180. {
  1181. switch (event) {
  1182. case RX_E_START:
  1183. bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
  1184. break;
  1185. case RX_E_STOP:
  1186. call_rx_stop_cbfn(rx);
  1187. break;
  1188. case RX_E_FAIL:
  1189. /* no-op */
  1190. break;
  1191. default:
  1192. bfa_sm_fault(event);
  1193. break;
  1194. }
  1195. }
  1196. static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
  1197. {
  1198. bna_bfi_rx_enet_start(rx);
  1199. }
  1200. void
  1201. bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
  1202. {
  1203. }
  1204. static void
  1205. bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
  1206. {
  1207. switch (event) {
  1208. case RX_E_FAIL:
  1209. case RX_E_STOPPED:
  1210. bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
  1211. rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
  1212. break;
  1213. case RX_E_STARTED:
  1214. bna_rx_enet_stop(rx);
  1215. break;
  1216. default:
  1217. bfa_sm_fault(event);
  1218. break;
  1219. }
  1220. }
  1221. static void bna_rx_sm_start_wait(struct bna_rx *rx,
  1222. enum bna_rx_event event)
  1223. {
  1224. switch (event) {
  1225. case RX_E_STOP:
  1226. bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
  1227. break;
  1228. case RX_E_FAIL:
  1229. bfa_fsm_set_state(rx, bna_rx_sm_stopped);
  1230. break;
  1231. case RX_E_STARTED:
  1232. bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
  1233. break;
  1234. default:
  1235. bfa_sm_fault(event);
  1236. break;
  1237. }
  1238. }
  1239. static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
  1240. {
  1241. rx->rx_post_cbfn(rx->bna->bnad, rx);
  1242. bna_rxf_start(&rx->rxf);
  1243. }
  1244. void
  1245. bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
  1246. {
  1247. }
  1248. static void
  1249. bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
  1250. {
  1251. switch (event) {
  1252. case RX_E_FAIL:
  1253. bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
  1254. bna_rxf_fail(&rx->rxf);
  1255. call_rx_stall_cbfn(rx);
  1256. rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
  1257. break;
  1258. case RX_E_RXF_STARTED:
  1259. bna_rxf_stop(&rx->rxf);
  1260. break;
  1261. case RX_E_RXF_STOPPED:
  1262. bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
  1263. call_rx_stall_cbfn(rx);
  1264. bna_rx_enet_stop(rx);
  1265. break;
  1266. default:
  1267. bfa_sm_fault(event);
  1268. break;
  1269. }
  1270. }
  1271. void
  1272. bna_rx_sm_started_entry(struct bna_rx *rx)
  1273. {
  1274. struct bna_rxp *rxp;
  1275. struct list_head *qe_rxp;
  1276. int is_regular = (rx->type == BNA_RX_T_REGULAR);
  1277. /* Start IB */
  1278. list_for_each(qe_rxp, &rx->rxp_q) {
  1279. rxp = (struct bna_rxp *)qe_rxp;
  1280. bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
  1281. }
  1282. bna_ethport_cb_rx_started(&rx->bna->ethport);
  1283. }
  1284. static void
  1285. bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
  1286. {
  1287. switch (event) {
  1288. case RX_E_STOP:
  1289. bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
  1290. bna_ethport_cb_rx_stopped(&rx->bna->ethport);
  1291. bna_rxf_stop(&rx->rxf);
  1292. break;
  1293. case RX_E_FAIL:
  1294. bfa_fsm_set_state(rx, bna_rx_sm_failed);
  1295. bna_ethport_cb_rx_stopped(&rx->bna->ethport);
  1296. bna_rxf_fail(&rx->rxf);
  1297. call_rx_stall_cbfn(rx);
  1298. rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
  1299. break;
  1300. default:
  1301. bfa_sm_fault(event);
  1302. break;
  1303. }
  1304. }
  1305. static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
  1306. enum bna_rx_event event)
  1307. {
  1308. switch (event) {
  1309. case RX_E_STOP:
  1310. bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
  1311. break;
  1312. case RX_E_FAIL:
  1313. bfa_fsm_set_state(rx, bna_rx_sm_failed);
  1314. bna_rxf_fail(&rx->rxf);
  1315. call_rx_stall_cbfn(rx);
  1316. rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
  1317. break;
  1318. case RX_E_RXF_STARTED:
  1319. bfa_fsm_set_state(rx, bna_rx_sm_started);
  1320. break;
  1321. default:
  1322. bfa_sm_fault(event);
  1323. break;
  1324. }
  1325. }
  1326. void
  1327. bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
  1328. {
  1329. }
  1330. void
  1331. bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
  1332. {
  1333. switch (event) {
  1334. case RX_E_FAIL:
  1335. case RX_E_RXF_STOPPED:
  1336. /* No-op */
  1337. break;
  1338. case RX_E_CLEANUP_DONE:
  1339. bfa_fsm_set_state(rx, bna_rx_sm_stopped);
  1340. break;
  1341. default:
  1342. bfa_sm_fault(event);
  1343. break;
  1344. }
  1345. }
  1346. static void
  1347. bna_rx_sm_failed_entry(struct bna_rx *rx)
  1348. {
  1349. }
  1350. static void
  1351. bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
  1352. {
  1353. switch (event) {
  1354. case RX_E_START:
  1355. bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
  1356. break;
  1357. case RX_E_STOP:
  1358. bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
  1359. break;
  1360. case RX_E_FAIL:
  1361. case RX_E_RXF_STARTED:
  1362. case RX_E_RXF_STOPPED:
  1363. /* No-op */
  1364. break;
  1365. case RX_E_CLEANUP_DONE:
  1366. bfa_fsm_set_state(rx, bna_rx_sm_stopped);
  1367. break;
  1368. default:
  1369. bfa_sm_fault(event);
  1370. break;
  1371. } }
  1372. static void
  1373. bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
  1374. {
  1375. }
  1376. static void
  1377. bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
  1378. {
  1379. switch (event) {
  1380. case RX_E_STOP:
  1381. bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
  1382. break;
  1383. case RX_E_FAIL:
  1384. bfa_fsm_set_state(rx, bna_rx_sm_failed);
  1385. break;
  1386. case RX_E_CLEANUP_DONE:
  1387. bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
  1388. break;
  1389. default:
  1390. bfa_sm_fault(event);
  1391. break;
  1392. }
  1393. }
  1394. static void
  1395. bna_bfi_rx_enet_start(struct bna_rx *rx)
  1396. {
  1397. struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
  1398. struct bna_rxp *rxp = NULL;
  1399. struct bna_rxq *q0 = NULL, *q1 = NULL;
  1400. struct list_head *rxp_qe;
  1401. int i;
  1402. bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
  1403. BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
  1404. cfg_req->mh.num_entries = htons(
  1405. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
  1406. cfg_req->num_queue_sets = rx->num_paths;
  1407. for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
  1408. i < rx->num_paths;
  1409. i++, rxp_qe = bfa_q_next(rxp_qe)) {
  1410. rxp = (struct bna_rxp *)rxp_qe;
  1411. GET_RXQS(rxp, q0, q1);
  1412. switch (rxp->type) {
  1413. case BNA_RXP_SLR:
  1414. case BNA_RXP_HDS:
  1415. /* Small RxQ */
  1416. bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
  1417. &q1->qpt);
  1418. cfg_req->q_cfg[i].qs.rx_buffer_size =
  1419. htons((u16)q1->buffer_size);
  1420. /* Fall through */
  1421. case BNA_RXP_SINGLE:
  1422. /* Large/Single RxQ */
  1423. bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
  1424. &q0->qpt);
  1425. q0->buffer_size =
  1426. bna_enet_mtu_get(&rx->bna->enet);
  1427. cfg_req->q_cfg[i].ql.rx_buffer_size =
  1428. htons((u16)q0->buffer_size);
  1429. break;
  1430. default:
  1431. BUG_ON(1);
  1432. }
  1433. bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
  1434. &rxp->cq.qpt);
  1435. cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
  1436. rxp->cq.ib.ib_seg_host_addr.lsb;
  1437. cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
  1438. rxp->cq.ib.ib_seg_host_addr.msb;
  1439. cfg_req->q_cfg[i].ib.intr.msix_index =
  1440. htons((u16)rxp->cq.ib.intr_vector);
  1441. }
  1442. cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
  1443. cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
  1444. cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
  1445. cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
  1446. cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
  1447. ? BNA_STATUS_T_ENABLED :
  1448. BNA_STATUS_T_DISABLED;
  1449. cfg_req->ib_cfg.coalescing_timeout =
  1450. htonl((u32)rxp->cq.ib.coalescing_timeo);
  1451. cfg_req->ib_cfg.inter_pkt_timeout =
  1452. htonl((u32)rxp->cq.ib.interpkt_timeo);
  1453. cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
  1454. switch (rxp->type) {
  1455. case BNA_RXP_SLR:
  1456. cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
  1457. break;
  1458. case BNA_RXP_HDS:
  1459. cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
  1460. cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
  1461. cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
  1462. cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
  1463. break;
  1464. case BNA_RXP_SINGLE:
  1465. cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
  1466. break;
  1467. default:
  1468. BUG_ON(1);
  1469. }
  1470. cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
  1471. bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
  1472. sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
  1473. bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
  1474. }
  1475. static void
  1476. bna_bfi_rx_enet_stop(struct bna_rx *rx)
  1477. {
  1478. struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
  1479. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  1480. BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
  1481. req->mh.num_entries = htons(
  1482. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
  1483. bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
  1484. &req->mh);
  1485. bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
  1486. }
  1487. static void
  1488. bna_rx_enet_stop(struct bna_rx *rx)
  1489. {
  1490. struct bna_rxp *rxp;
  1491. struct list_head *qe_rxp;
  1492. /* Stop IB */
  1493. list_for_each(qe_rxp, &rx->rxp_q) {
  1494. rxp = (struct bna_rxp *)qe_rxp;
  1495. bna_ib_stop(rx->bna, &rxp->cq.ib);
  1496. }
  1497. bna_bfi_rx_enet_stop(rx);
  1498. }
  1499. static int
  1500. bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
  1501. {
  1502. if ((rx_mod->rx_free_count == 0) ||
  1503. (rx_mod->rxp_free_count == 0) ||
  1504. (rx_mod->rxq_free_count == 0))
  1505. return 0;
  1506. if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
  1507. if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
  1508. (rx_mod->rxq_free_count < rx_cfg->num_paths))
  1509. return 0;
  1510. } else {
  1511. if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
  1512. (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
  1513. return 0;
  1514. }
  1515. return 1;
  1516. }
  1517. static struct bna_rxq *
  1518. bna_rxq_get(struct bna_rx_mod *rx_mod)
  1519. {
  1520. struct bna_rxq *rxq = NULL;
  1521. struct list_head *qe = NULL;
  1522. bfa_q_deq(&rx_mod->rxq_free_q, &qe);
  1523. rx_mod->rxq_free_count--;
  1524. rxq = (struct bna_rxq *)qe;
  1525. bfa_q_qe_init(&rxq->qe);
  1526. return rxq;
  1527. }
  1528. static void
  1529. bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
  1530. {
  1531. bfa_q_qe_init(&rxq->qe);
  1532. list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
  1533. rx_mod->rxq_free_count++;
  1534. }
  1535. static struct bna_rxp *
  1536. bna_rxp_get(struct bna_rx_mod *rx_mod)
  1537. {
  1538. struct list_head *qe = NULL;
  1539. struct bna_rxp *rxp = NULL;
  1540. bfa_q_deq(&rx_mod->rxp_free_q, &qe);
  1541. rx_mod->rxp_free_count--;
  1542. rxp = (struct bna_rxp *)qe;
  1543. bfa_q_qe_init(&rxp->qe);
  1544. return rxp;
  1545. }
  1546. static void
  1547. bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
  1548. {
  1549. bfa_q_qe_init(&rxp->qe);
  1550. list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
  1551. rx_mod->rxp_free_count++;
  1552. }
  1553. static struct bna_rx *
  1554. bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
  1555. {
  1556. struct list_head *qe = NULL;
  1557. struct bna_rx *rx = NULL;
  1558. if (type == BNA_RX_T_REGULAR) {
  1559. bfa_q_deq(&rx_mod->rx_free_q, &qe);
  1560. } else
  1561. bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
  1562. rx_mod->rx_free_count--;
  1563. rx = (struct bna_rx *)qe;
  1564. bfa_q_qe_init(&rx->qe);
  1565. list_add_tail(&rx->qe, &rx_mod->rx_active_q);
  1566. rx->type = type;
  1567. return rx;
  1568. }
  1569. static void
  1570. bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
  1571. {
  1572. struct list_head *prev_qe = NULL;
  1573. struct list_head *qe;
  1574. bfa_q_qe_init(&rx->qe);
  1575. list_for_each(qe, &rx_mod->rx_free_q) {
  1576. if (((struct bna_rx *)qe)->rid < rx->rid)
  1577. prev_qe = qe;
  1578. else
  1579. break;
  1580. }
  1581. if (prev_qe == NULL) {
  1582. /* This is the first entry */
  1583. bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
  1584. } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
  1585. /* This is the last entry */
  1586. list_add_tail(&rx->qe, &rx_mod->rx_free_q);
  1587. } else {
  1588. /* Somewhere in the middle */
  1589. bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
  1590. bfa_q_prev(&rx->qe) = prev_qe;
  1591. bfa_q_next(prev_qe) = &rx->qe;
  1592. bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
  1593. }
  1594. rx_mod->rx_free_count++;
  1595. }
  1596. static void
  1597. bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
  1598. struct bna_rxq *q1)
  1599. {
  1600. switch (rxp->type) {
  1601. case BNA_RXP_SINGLE:
  1602. rxp->rxq.single.only = q0;
  1603. rxp->rxq.single.reserved = NULL;
  1604. break;
  1605. case BNA_RXP_SLR:
  1606. rxp->rxq.slr.large = q0;
  1607. rxp->rxq.slr.small = q1;
  1608. break;
  1609. case BNA_RXP_HDS:
  1610. rxp->rxq.hds.data = q0;
  1611. rxp->rxq.hds.hdr = q1;
  1612. break;
  1613. default:
  1614. break;
  1615. }
  1616. }
  1617. static void
  1618. bna_rxq_qpt_setup(struct bna_rxq *rxq,
  1619. struct bna_rxp *rxp,
  1620. u32 page_count,
  1621. u32 page_size,
  1622. struct bna_mem_descr *qpt_mem,
  1623. struct bna_mem_descr *swqpt_mem,
  1624. struct bna_mem_descr *page_mem)
  1625. {
  1626. int i;
  1627. rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
  1628. rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
  1629. rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
  1630. rxq->qpt.page_count = page_count;
  1631. rxq->qpt.page_size = page_size;
  1632. rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
  1633. for (i = 0; i < rxq->qpt.page_count; i++) {
  1634. rxq->rcb->sw_qpt[i] = page_mem[i].kva;
  1635. ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
  1636. page_mem[i].dma.lsb;
  1637. ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
  1638. page_mem[i].dma.msb;
  1639. }
  1640. }
  1641. static void
  1642. bna_rxp_cqpt_setup(struct bna_rxp *rxp,
  1643. u32 page_count,
  1644. u32 page_size,
  1645. struct bna_mem_descr *qpt_mem,
  1646. struct bna_mem_descr *swqpt_mem,
  1647. struct bna_mem_descr *page_mem)
  1648. {
  1649. int i;
  1650. rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
  1651. rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
  1652. rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
  1653. rxp->cq.qpt.page_count = page_count;
  1654. rxp->cq.qpt.page_size = page_size;
  1655. rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
  1656. for (i = 0; i < rxp->cq.qpt.page_count; i++) {
  1657. rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
  1658. ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
  1659. page_mem[i].dma.lsb;
  1660. ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
  1661. page_mem[i].dma.msb;
  1662. }
  1663. }
  1664. static void
  1665. bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
  1666. {
  1667. struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
  1668. bfa_wc_down(&rx_mod->rx_stop_wc);
  1669. }
  1670. static void
  1671. bna_rx_mod_cb_rx_stopped_all(void *arg)
  1672. {
  1673. struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
  1674. if (rx_mod->stop_cbfn)
  1675. rx_mod->stop_cbfn(&rx_mod->bna->enet);
  1676. rx_mod->stop_cbfn = NULL;
  1677. }
  1678. static void
  1679. bna_rx_start(struct bna_rx *rx)
  1680. {
  1681. rx->rx_flags |= BNA_RX_F_ENET_STARTED;
  1682. if (rx->rx_flags & BNA_RX_F_ENABLED)
  1683. bfa_fsm_send_event(rx, RX_E_START);
  1684. }
  1685. static void
  1686. bna_rx_stop(struct bna_rx *rx)
  1687. {
  1688. rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
  1689. if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
  1690. bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
  1691. else {
  1692. rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
  1693. rx->stop_cbarg = &rx->bna->rx_mod;
  1694. bfa_fsm_send_event(rx, RX_E_STOP);
  1695. }
  1696. }
  1697. static void
  1698. bna_rx_fail(struct bna_rx *rx)
  1699. {
  1700. /* Indicate Enet is not enabled, and failed */
  1701. rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
  1702. bfa_fsm_send_event(rx, RX_E_FAIL);
  1703. }
  1704. void
  1705. bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
  1706. {
  1707. struct bna_rx *rx;
  1708. struct list_head *qe;
  1709. rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
  1710. if (type == BNA_RX_T_LOOPBACK)
  1711. rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
  1712. list_for_each(qe, &rx_mod->rx_active_q) {
  1713. rx = (struct bna_rx *)qe;
  1714. if (rx->type == type)
  1715. bna_rx_start(rx);
  1716. }
  1717. }
  1718. void
  1719. bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
  1720. {
  1721. struct bna_rx *rx;
  1722. struct list_head *qe;
  1723. rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
  1724. rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
  1725. rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
  1726. bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
  1727. list_for_each(qe, &rx_mod->rx_active_q) {
  1728. rx = (struct bna_rx *)qe;
  1729. if (rx->type == type) {
  1730. bfa_wc_up(&rx_mod->rx_stop_wc);
  1731. bna_rx_stop(rx);
  1732. }
  1733. }
  1734. bfa_wc_wait(&rx_mod->rx_stop_wc);
  1735. }
  1736. void
  1737. bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
  1738. {
  1739. struct bna_rx *rx;
  1740. struct list_head *qe;
  1741. rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
  1742. rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
  1743. list_for_each(qe, &rx_mod->rx_active_q) {
  1744. rx = (struct bna_rx *)qe;
  1745. bna_rx_fail(rx);
  1746. }
  1747. }
  1748. void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
  1749. struct bna_res_info *res_info)
  1750. {
  1751. int index;
  1752. struct bna_rx *rx_ptr;
  1753. struct bna_rxp *rxp_ptr;
  1754. struct bna_rxq *rxq_ptr;
  1755. rx_mod->bna = bna;
  1756. rx_mod->flags = 0;
  1757. rx_mod->rx = (struct bna_rx *)
  1758. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
  1759. rx_mod->rxp = (struct bna_rxp *)
  1760. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
  1761. rx_mod->rxq = (struct bna_rxq *)
  1762. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
  1763. /* Initialize the queues */
  1764. INIT_LIST_HEAD(&rx_mod->rx_free_q);
  1765. rx_mod->rx_free_count = 0;
  1766. INIT_LIST_HEAD(&rx_mod->rxq_free_q);
  1767. rx_mod->rxq_free_count = 0;
  1768. INIT_LIST_HEAD(&rx_mod->rxp_free_q);
  1769. rx_mod->rxp_free_count = 0;
  1770. INIT_LIST_HEAD(&rx_mod->rx_active_q);
  1771. /* Build RX queues */
  1772. for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
  1773. rx_ptr = &rx_mod->rx[index];
  1774. bfa_q_qe_init(&rx_ptr->qe);
  1775. INIT_LIST_HEAD(&rx_ptr->rxp_q);
  1776. rx_ptr->bna = NULL;
  1777. rx_ptr->rid = index;
  1778. rx_ptr->stop_cbfn = NULL;
  1779. rx_ptr->stop_cbarg = NULL;
  1780. list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
  1781. rx_mod->rx_free_count++;
  1782. }
  1783. /* build RX-path queue */
  1784. for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
  1785. rxp_ptr = &rx_mod->rxp[index];
  1786. bfa_q_qe_init(&rxp_ptr->qe);
  1787. list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
  1788. rx_mod->rxp_free_count++;
  1789. }
  1790. /* build RXQ queue */
  1791. for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
  1792. rxq_ptr = &rx_mod->rxq[index];
  1793. bfa_q_qe_init(&rxq_ptr->qe);
  1794. list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
  1795. rx_mod->rxq_free_count++;
  1796. }
  1797. }
  1798. void
  1799. bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
  1800. {
  1801. struct list_head *qe;
  1802. int i;
  1803. i = 0;
  1804. list_for_each(qe, &rx_mod->rx_free_q)
  1805. i++;
  1806. i = 0;
  1807. list_for_each(qe, &rx_mod->rxp_free_q)
  1808. i++;
  1809. i = 0;
  1810. list_for_each(qe, &rx_mod->rxq_free_q)
  1811. i++;
  1812. rx_mod->bna = NULL;
  1813. }
  1814. void
  1815. bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
  1816. {
  1817. struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
  1818. struct bna_rxp *rxp = NULL;
  1819. struct bna_rxq *q0 = NULL, *q1 = NULL;
  1820. struct list_head *rxp_qe;
  1821. int i;
  1822. bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
  1823. sizeof(struct bfi_enet_rx_cfg_rsp));
  1824. rx->hw_id = cfg_rsp->hw_id;
  1825. for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
  1826. i < rx->num_paths;
  1827. i++, rxp_qe = bfa_q_next(rxp_qe)) {
  1828. rxp = (struct bna_rxp *)rxp_qe;
  1829. GET_RXQS(rxp, q0, q1);
  1830. /* Setup doorbells */
  1831. rxp->cq.ccb->i_dbell->doorbell_addr =
  1832. rx->bna->pcidev.pci_bar_kva
  1833. + ntohl(cfg_rsp->q_handles[i].i_dbell);
  1834. rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
  1835. q0->rcb->q_dbell =
  1836. rx->bna->pcidev.pci_bar_kva
  1837. + ntohl(cfg_rsp->q_handles[i].ql_dbell);
  1838. q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
  1839. if (q1) {
  1840. q1->rcb->q_dbell =
  1841. rx->bna->pcidev.pci_bar_kva
  1842. + ntohl(cfg_rsp->q_handles[i].qs_dbell);
  1843. q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
  1844. }
  1845. /* Initialize producer/consumer indexes */
  1846. (*rxp->cq.ccb->hw_producer_index) = 0;
  1847. rxp->cq.ccb->producer_index = 0;
  1848. q0->rcb->producer_index = q0->rcb->consumer_index = 0;
  1849. if (q1)
  1850. q1->rcb->producer_index = q1->rcb->consumer_index = 0;
  1851. }
  1852. bfa_fsm_send_event(rx, RX_E_STARTED);
  1853. }
  1854. void
  1855. bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
  1856. {
  1857. bfa_fsm_send_event(rx, RX_E_STOPPED);
  1858. }
  1859. void
  1860. bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
  1861. {
  1862. u32 cq_size, hq_size, dq_size;
  1863. u32 cpage_count, hpage_count, dpage_count;
  1864. struct bna_mem_info *mem_info;
  1865. u32 cq_depth;
  1866. u32 hq_depth;
  1867. u32 dq_depth;
  1868. dq_depth = q_cfg->q_depth;
  1869. hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
  1870. cq_depth = dq_depth + hq_depth;
  1871. BNA_TO_POWER_OF_2_HIGH(cq_depth);
  1872. cq_size = cq_depth * BFI_CQ_WI_SIZE;
  1873. cq_size = ALIGN(cq_size, PAGE_SIZE);
  1874. cpage_count = SIZE_TO_PAGES(cq_size);
  1875. BNA_TO_POWER_OF_2_HIGH(dq_depth);
  1876. dq_size = dq_depth * BFI_RXQ_WI_SIZE;
  1877. dq_size = ALIGN(dq_size, PAGE_SIZE);
  1878. dpage_count = SIZE_TO_PAGES(dq_size);
  1879. if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
  1880. BNA_TO_POWER_OF_2_HIGH(hq_depth);
  1881. hq_size = hq_depth * BFI_RXQ_WI_SIZE;
  1882. hq_size = ALIGN(hq_size, PAGE_SIZE);
  1883. hpage_count = SIZE_TO_PAGES(hq_size);
  1884. } else
  1885. hpage_count = 0;
  1886. res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
  1887. mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
  1888. mem_info->mem_type = BNA_MEM_T_KVA;
  1889. mem_info->len = sizeof(struct bna_ccb);
  1890. mem_info->num = q_cfg->num_paths;
  1891. res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
  1892. mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
  1893. mem_info->mem_type = BNA_MEM_T_KVA;
  1894. mem_info->len = sizeof(struct bna_rcb);
  1895. mem_info->num = BNA_GET_RXQS(q_cfg);
  1896. res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
  1897. mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
  1898. mem_info->mem_type = BNA_MEM_T_DMA;
  1899. mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
  1900. mem_info->num = q_cfg->num_paths;
  1901. res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
  1902. mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
  1903. mem_info->mem_type = BNA_MEM_T_KVA;
  1904. mem_info->len = cpage_count * sizeof(void *);
  1905. mem_info->num = q_cfg->num_paths;
  1906. res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
  1907. mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
  1908. mem_info->mem_type = BNA_MEM_T_DMA;
  1909. mem_info->len = PAGE_SIZE;
  1910. mem_info->num = cpage_count * q_cfg->num_paths;
  1911. res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
  1912. mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
  1913. mem_info->mem_type = BNA_MEM_T_DMA;
  1914. mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
  1915. mem_info->num = q_cfg->num_paths;
  1916. res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
  1917. mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
  1918. mem_info->mem_type = BNA_MEM_T_KVA;
  1919. mem_info->len = dpage_count * sizeof(void *);
  1920. mem_info->num = q_cfg->num_paths;
  1921. res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
  1922. mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
  1923. mem_info->mem_type = BNA_MEM_T_DMA;
  1924. mem_info->len = PAGE_SIZE;
  1925. mem_info->num = dpage_count * q_cfg->num_paths;
  1926. res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
  1927. mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
  1928. mem_info->mem_type = BNA_MEM_T_DMA;
  1929. mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
  1930. mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
  1931. res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
  1932. mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
  1933. mem_info->mem_type = BNA_MEM_T_KVA;
  1934. mem_info->len = hpage_count * sizeof(void *);
  1935. mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
  1936. res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
  1937. mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
  1938. mem_info->mem_type = BNA_MEM_T_DMA;
  1939. mem_info->len = (hpage_count ? PAGE_SIZE : 0);
  1940. mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
  1941. res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
  1942. mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
  1943. mem_info->mem_type = BNA_MEM_T_DMA;
  1944. mem_info->len = BFI_IBIDX_SIZE;
  1945. mem_info->num = q_cfg->num_paths;
  1946. res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
  1947. mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
  1948. mem_info->mem_type = BNA_MEM_T_KVA;
  1949. mem_info->len = BFI_ENET_RSS_RIT_MAX;
  1950. mem_info->num = 1;
  1951. res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
  1952. res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
  1953. res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
  1954. }
  1955. struct bna_rx *
  1956. bna_rx_create(struct bna *bna, struct bnad *bnad,
  1957. struct bna_rx_config *rx_cfg,
  1958. const struct bna_rx_event_cbfn *rx_cbfn,
  1959. struct bna_res_info *res_info,
  1960. void *priv)
  1961. {
  1962. struct bna_rx_mod *rx_mod = &bna->rx_mod;
  1963. struct bna_rx *rx;
  1964. struct bna_rxp *rxp;
  1965. struct bna_rxq *q0;
  1966. struct bna_rxq *q1;
  1967. struct bna_intr_info *intr_info;
  1968. u32 page_count;
  1969. struct bna_mem_descr *ccb_mem;
  1970. struct bna_mem_descr *rcb_mem;
  1971. struct bna_mem_descr *unmapq_mem;
  1972. struct bna_mem_descr *cqpt_mem;
  1973. struct bna_mem_descr *cswqpt_mem;
  1974. struct bna_mem_descr *cpage_mem;
  1975. struct bna_mem_descr *hqpt_mem;
  1976. struct bna_mem_descr *dqpt_mem;
  1977. struct bna_mem_descr *hsqpt_mem;
  1978. struct bna_mem_descr *dsqpt_mem;
  1979. struct bna_mem_descr *hpage_mem;
  1980. struct bna_mem_descr *dpage_mem;
  1981. int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
  1982. int dpage_count, hpage_count, rcb_idx;
  1983. if (!bna_rx_res_check(rx_mod, rx_cfg))
  1984. return NULL;
  1985. intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
  1986. ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
  1987. rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
  1988. unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
  1989. cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
  1990. cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
  1991. cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
  1992. hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
  1993. dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
  1994. hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
  1995. dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
  1996. hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
  1997. dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
  1998. page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
  1999. rx_cfg->num_paths;
  2000. dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
  2001. rx_cfg->num_paths;
  2002. hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
  2003. rx_cfg->num_paths;
  2004. rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
  2005. rx->bna = bna;
  2006. rx->rx_flags = 0;
  2007. INIT_LIST_HEAD(&rx->rxp_q);
  2008. rx->stop_cbfn = NULL;
  2009. rx->stop_cbarg = NULL;
  2010. rx->priv = priv;
  2011. rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
  2012. rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
  2013. rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
  2014. rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
  2015. rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
  2016. /* Following callbacks are mandatory */
  2017. rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
  2018. rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
  2019. if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
  2020. switch (rx->type) {
  2021. case BNA_RX_T_REGULAR:
  2022. if (!(rx->bna->rx_mod.flags &
  2023. BNA_RX_MOD_F_ENET_LOOPBACK))
  2024. rx->rx_flags |= BNA_RX_F_ENET_STARTED;
  2025. break;
  2026. case BNA_RX_T_LOOPBACK:
  2027. if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
  2028. rx->rx_flags |= BNA_RX_F_ENET_STARTED;
  2029. break;
  2030. }
  2031. }
  2032. rx->num_paths = rx_cfg->num_paths;
  2033. for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
  2034. rxp = bna_rxp_get(rx_mod);
  2035. list_add_tail(&rxp->qe, &rx->rxp_q);
  2036. rxp->type = rx_cfg->rxp_type;
  2037. rxp->rx = rx;
  2038. rxp->cq.rx = rx;
  2039. q0 = bna_rxq_get(rx_mod);
  2040. if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
  2041. q1 = NULL;
  2042. else
  2043. q1 = bna_rxq_get(rx_mod);
  2044. if (1 == intr_info->num)
  2045. rxp->vector = intr_info->idl[0].vector;
  2046. else
  2047. rxp->vector = intr_info->idl[i].vector;
  2048. /* Setup IB */
  2049. rxp->cq.ib.ib_seg_host_addr.lsb =
  2050. res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
  2051. rxp->cq.ib.ib_seg_host_addr.msb =
  2052. res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
  2053. rxp->cq.ib.ib_seg_host_addr_kva =
  2054. res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
  2055. rxp->cq.ib.intr_type = intr_info->intr_type;
  2056. if (intr_info->intr_type == BNA_INTR_T_MSIX)
  2057. rxp->cq.ib.intr_vector = rxp->vector;
  2058. else
  2059. rxp->cq.ib.intr_vector = (1 << rxp->vector);
  2060. rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
  2061. rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
  2062. rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
  2063. bna_rxp_add_rxqs(rxp, q0, q1);
  2064. /* Setup large Q */
  2065. q0->rx = rx;
  2066. q0->rxp = rxp;
  2067. q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
  2068. q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
  2069. rcb_idx++;
  2070. q0->rcb->q_depth = rx_cfg->q_depth;
  2071. q0->rcb->rxq = q0;
  2072. q0->rcb->bnad = bna->bnad;
  2073. q0->rcb->id = 0;
  2074. q0->rx_packets = q0->rx_bytes = 0;
  2075. q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
  2076. bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
  2077. &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
  2078. q0->rcb->page_idx = dpage_idx;
  2079. q0->rcb->page_count = dpage_count;
  2080. dpage_idx += dpage_count;
  2081. if (rx->rcb_setup_cbfn)
  2082. rx->rcb_setup_cbfn(bnad, q0->rcb);
  2083. /* Setup small Q */
  2084. if (q1) {
  2085. q1->rx = rx;
  2086. q1->rxp = rxp;
  2087. q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
  2088. q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
  2089. rcb_idx++;
  2090. q1->rcb->q_depth = rx_cfg->q_depth;
  2091. q1->rcb->rxq = q1;
  2092. q1->rcb->bnad = bna->bnad;
  2093. q1->rcb->id = 1;
  2094. q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
  2095. rx_cfg->hds_config.forced_offset
  2096. : rx_cfg->small_buff_size;
  2097. q1->rx_packets = q1->rx_bytes = 0;
  2098. q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
  2099. bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
  2100. &hqpt_mem[i], &hsqpt_mem[i],
  2101. &hpage_mem[hpage_idx]);
  2102. q1->rcb->page_idx = hpage_idx;
  2103. q1->rcb->page_count = hpage_count;
  2104. hpage_idx += hpage_count;
  2105. if (rx->rcb_setup_cbfn)
  2106. rx->rcb_setup_cbfn(bnad, q1->rcb);
  2107. }
  2108. /* Setup CQ */
  2109. rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
  2110. rxp->cq.ccb->q_depth = rx_cfg->q_depth +
  2111. ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
  2112. 0 : rx_cfg->q_depth);
  2113. rxp->cq.ccb->cq = &rxp->cq;
  2114. rxp->cq.ccb->rcb[0] = q0->rcb;
  2115. q0->rcb->ccb = rxp->cq.ccb;
  2116. if (q1) {
  2117. rxp->cq.ccb->rcb[1] = q1->rcb;
  2118. q1->rcb->ccb = rxp->cq.ccb;
  2119. }
  2120. rxp->cq.ccb->hw_producer_index =
  2121. (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
  2122. rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
  2123. rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
  2124. rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
  2125. rxp->cq.ccb->rx_coalescing_timeo =
  2126. rxp->cq.ib.coalescing_timeo;
  2127. rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
  2128. rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
  2129. rxp->cq.ccb->bnad = bna->bnad;
  2130. rxp->cq.ccb->id = i;
  2131. bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
  2132. &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
  2133. rxp->cq.ccb->page_idx = cpage_idx;
  2134. rxp->cq.ccb->page_count = page_count;
  2135. cpage_idx += page_count;
  2136. if (rx->ccb_setup_cbfn)
  2137. rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
  2138. }
  2139. rx->hds_cfg = rx_cfg->hds_config;
  2140. bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
  2141. bfa_fsm_set_state(rx, bna_rx_sm_stopped);
  2142. rx_mod->rid_mask |= (1 << rx->rid);
  2143. return rx;
  2144. }
  2145. void
  2146. bna_rx_destroy(struct bna_rx *rx)
  2147. {
  2148. struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
  2149. struct bna_rxq *q0 = NULL;
  2150. struct bna_rxq *q1 = NULL;
  2151. struct bna_rxp *rxp;
  2152. struct list_head *qe;
  2153. bna_rxf_uninit(&rx->rxf);
  2154. while (!list_empty(&rx->rxp_q)) {
  2155. bfa_q_deq(&rx->rxp_q, &rxp);
  2156. GET_RXQS(rxp, q0, q1);
  2157. if (rx->rcb_destroy_cbfn)
  2158. rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
  2159. q0->rcb = NULL;
  2160. q0->rxp = NULL;
  2161. q0->rx = NULL;
  2162. bna_rxq_put(rx_mod, q0);
  2163. if (q1) {
  2164. if (rx->rcb_destroy_cbfn)
  2165. rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
  2166. q1->rcb = NULL;
  2167. q1->rxp = NULL;
  2168. q1->rx = NULL;
  2169. bna_rxq_put(rx_mod, q1);
  2170. }
  2171. rxp->rxq.slr.large = NULL;
  2172. rxp->rxq.slr.small = NULL;
  2173. if (rx->ccb_destroy_cbfn)
  2174. rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
  2175. rxp->cq.ccb = NULL;
  2176. rxp->rx = NULL;
  2177. bna_rxp_put(rx_mod, rxp);
  2178. }
  2179. list_for_each(qe, &rx_mod->rx_active_q) {
  2180. if (qe == &rx->qe) {
  2181. list_del(&rx->qe);
  2182. bfa_q_qe_init(&rx->qe);
  2183. break;
  2184. }
  2185. }
  2186. rx_mod->rid_mask &= ~(1 << rx->rid);
  2187. rx->bna = NULL;
  2188. rx->priv = NULL;
  2189. bna_rx_put(rx_mod, rx);
  2190. }
  2191. void
  2192. bna_rx_enable(struct bna_rx *rx)
  2193. {
  2194. if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
  2195. return;
  2196. rx->rx_flags |= BNA_RX_F_ENABLED;
  2197. if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
  2198. bfa_fsm_send_event(rx, RX_E_START);
  2199. }
  2200. void
  2201. bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
  2202. void (*cbfn)(void *, struct bna_rx *))
  2203. {
  2204. if (type == BNA_SOFT_CLEANUP) {
  2205. /* h/w should not be accessed. Treat we're stopped */
  2206. (*cbfn)(rx->bna->bnad, rx);
  2207. } else {
  2208. rx->stop_cbfn = cbfn;
  2209. rx->stop_cbarg = rx->bna->bnad;
  2210. rx->rx_flags &= ~BNA_RX_F_ENABLED;
  2211. bfa_fsm_send_event(rx, RX_E_STOP);
  2212. }
  2213. }
  2214. void
  2215. bna_rx_cleanup_complete(struct bna_rx *rx)
  2216. {
  2217. bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
  2218. }
  2219. enum bna_cb_status
  2220. bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
  2221. enum bna_rxmode bitmask,
  2222. void (*cbfn)(struct bnad *, struct bna_rx *))
  2223. {
  2224. struct bna_rxf *rxf = &rx->rxf;
  2225. int need_hw_config = 0;
  2226. /* Error checks */
  2227. if (is_promisc_enable(new_mode, bitmask)) {
  2228. /* If promisc mode is already enabled elsewhere in the system */
  2229. if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
  2230. (rx->bna->promisc_rid != rxf->rx->rid))
  2231. goto err_return;
  2232. /* If default mode is already enabled in the system */
  2233. if (rx->bna->default_mode_rid != BFI_INVALID_RID)
  2234. goto err_return;
  2235. /* Trying to enable promiscuous and default mode together */
  2236. if (is_default_enable(new_mode, bitmask))
  2237. goto err_return;
  2238. }
  2239. if (is_default_enable(new_mode, bitmask)) {
  2240. /* If default mode is already enabled elsewhere in the system */
  2241. if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
  2242. (rx->bna->default_mode_rid != rxf->rx->rid)) {
  2243. goto err_return;
  2244. }
  2245. /* If promiscuous mode is already enabled in the system */
  2246. if (rx->bna->promisc_rid != BFI_INVALID_RID)
  2247. goto err_return;
  2248. }
  2249. /* Process the commands */
  2250. if (is_promisc_enable(new_mode, bitmask)) {
  2251. if (bna_rxf_promisc_enable(rxf))
  2252. need_hw_config = 1;
  2253. } else if (is_promisc_disable(new_mode, bitmask)) {
  2254. if (bna_rxf_promisc_disable(rxf))
  2255. need_hw_config = 1;
  2256. }
  2257. if (is_allmulti_enable(new_mode, bitmask)) {
  2258. if (bna_rxf_allmulti_enable(rxf))
  2259. need_hw_config = 1;
  2260. } else if (is_allmulti_disable(new_mode, bitmask)) {
  2261. if (bna_rxf_allmulti_disable(rxf))
  2262. need_hw_config = 1;
  2263. }
  2264. /* Trigger h/w if needed */
  2265. if (need_hw_config) {
  2266. rxf->cam_fltr_cbfn = cbfn;
  2267. rxf->cam_fltr_cbarg = rx->bna->bnad;
  2268. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  2269. } else if (cbfn)
  2270. (*cbfn)(rx->bna->bnad, rx);
  2271. return BNA_CB_SUCCESS;
  2272. err_return:
  2273. return BNA_CB_FAIL;
  2274. }
  2275. void
  2276. bna_rx_vlanfilter_enable(struct bna_rx *rx)
  2277. {
  2278. struct bna_rxf *rxf = &rx->rxf;
  2279. if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
  2280. rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
  2281. rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
  2282. bfa_fsm_send_event(rxf, RXF_E_CONFIG);
  2283. }
  2284. }
  2285. void
  2286. bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
  2287. {
  2288. struct bna_rxp *rxp;
  2289. struct list_head *qe;
  2290. list_for_each(qe, &rx->rxp_q) {
  2291. rxp = (struct bna_rxp *)qe;
  2292. rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
  2293. bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
  2294. }
  2295. }
  2296. void
  2297. bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
  2298. {
  2299. int i, j;
  2300. for (i = 0; i < BNA_LOAD_T_MAX; i++)
  2301. for (j = 0; j < BNA_BIAS_T_MAX; j++)
  2302. bna->rx_mod.dim_vector[i][j] = vector[i][j];
  2303. }
  2304. void
  2305. bna_rx_dim_update(struct bna_ccb *ccb)
  2306. {
  2307. struct bna *bna = ccb->cq->rx->bna;
  2308. u32 load, bias;
  2309. u32 pkt_rt, small_rt, large_rt;
  2310. u8 coalescing_timeo;
  2311. if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
  2312. (ccb->pkt_rate.large_pkt_cnt == 0))
  2313. return;
  2314. /* Arrive at preconfigured coalescing timeo value based on pkt rate */
  2315. small_rt = ccb->pkt_rate.small_pkt_cnt;
  2316. large_rt = ccb->pkt_rate.large_pkt_cnt;
  2317. pkt_rt = small_rt + large_rt;
  2318. if (pkt_rt < BNA_PKT_RATE_10K)
  2319. load = BNA_LOAD_T_LOW_4;
  2320. else if (pkt_rt < BNA_PKT_RATE_20K)
  2321. load = BNA_LOAD_T_LOW_3;
  2322. else if (pkt_rt < BNA_PKT_RATE_30K)
  2323. load = BNA_LOAD_T_LOW_2;
  2324. else if (pkt_rt < BNA_PKT_RATE_40K)
  2325. load = BNA_LOAD_T_LOW_1;
  2326. else if (pkt_rt < BNA_PKT_RATE_50K)
  2327. load = BNA_LOAD_T_HIGH_1;
  2328. else if (pkt_rt < BNA_PKT_RATE_60K)
  2329. load = BNA_LOAD_T_HIGH_2;
  2330. else if (pkt_rt < BNA_PKT_RATE_80K)
  2331. load = BNA_LOAD_T_HIGH_3;
  2332. else
  2333. load = BNA_LOAD_T_HIGH_4;
  2334. if (small_rt > (large_rt << 1))
  2335. bias = 0;
  2336. else
  2337. bias = 1;
  2338. ccb->pkt_rate.small_pkt_cnt = 0;
  2339. ccb->pkt_rate.large_pkt_cnt = 0;
  2340. coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
  2341. ccb->rx_coalescing_timeo = coalescing_timeo;
  2342. /* Set it to IB */
  2343. bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
  2344. }
  2345. const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
  2346. {12, 12},
  2347. {6, 10},
  2348. {5, 10},
  2349. {4, 8},
  2350. {3, 6},
  2351. {3, 6},
  2352. {2, 4},
  2353. {1, 2},
  2354. };
  2355. /* TX */
  2356. #define call_tx_stop_cbfn(tx) \
  2357. do { \
  2358. if ((tx)->stop_cbfn) { \
  2359. void (*cbfn)(void *, struct bna_tx *); \
  2360. void *cbarg; \
  2361. cbfn = (tx)->stop_cbfn; \
  2362. cbarg = (tx)->stop_cbarg; \
  2363. (tx)->stop_cbfn = NULL; \
  2364. (tx)->stop_cbarg = NULL; \
  2365. cbfn(cbarg, (tx)); \
  2366. } \
  2367. } while (0)
  2368. #define call_tx_prio_change_cbfn(tx) \
  2369. do { \
  2370. if ((tx)->prio_change_cbfn) { \
  2371. void (*cbfn)(struct bnad *, struct bna_tx *); \
  2372. cbfn = (tx)->prio_change_cbfn; \
  2373. (tx)->prio_change_cbfn = NULL; \
  2374. cbfn((tx)->bna->bnad, (tx)); \
  2375. } \
  2376. } while (0)
  2377. static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
  2378. static void bna_bfi_tx_enet_start(struct bna_tx *tx);
  2379. static void bna_tx_enet_stop(struct bna_tx *tx);
  2380. enum bna_tx_event {
  2381. TX_E_START = 1,
  2382. TX_E_STOP = 2,
  2383. TX_E_FAIL = 3,
  2384. TX_E_STARTED = 4,
  2385. TX_E_STOPPED = 5,
  2386. TX_E_PRIO_CHANGE = 6,
  2387. TX_E_CLEANUP_DONE = 7,
  2388. TX_E_BW_UPDATE = 8,
  2389. };
  2390. bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
  2391. bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
  2392. bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
  2393. bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
  2394. bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
  2395. enum bna_tx_event);
  2396. bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
  2397. enum bna_tx_event);
  2398. bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
  2399. enum bna_tx_event);
  2400. bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
  2401. bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
  2402. enum bna_tx_event);
  2403. static void
  2404. bna_tx_sm_stopped_entry(struct bna_tx *tx)
  2405. {
  2406. call_tx_stop_cbfn(tx);
  2407. }
  2408. static void
  2409. bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
  2410. {
  2411. switch (event) {
  2412. case TX_E_START:
  2413. bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
  2414. break;
  2415. case TX_E_STOP:
  2416. call_tx_stop_cbfn(tx);
  2417. break;
  2418. case TX_E_FAIL:
  2419. /* No-op */
  2420. break;
  2421. case TX_E_PRIO_CHANGE:
  2422. call_tx_prio_change_cbfn(tx);
  2423. break;
  2424. case TX_E_BW_UPDATE:
  2425. /* No-op */
  2426. break;
  2427. default:
  2428. bfa_sm_fault(event);
  2429. }
  2430. }
  2431. static void
  2432. bna_tx_sm_start_wait_entry(struct bna_tx *tx)
  2433. {
  2434. bna_bfi_tx_enet_start(tx);
  2435. }
  2436. static void
  2437. bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
  2438. {
  2439. switch (event) {
  2440. case TX_E_STOP:
  2441. tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
  2442. bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
  2443. break;
  2444. case TX_E_FAIL:
  2445. tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
  2446. bfa_fsm_set_state(tx, bna_tx_sm_stopped);
  2447. break;
  2448. case TX_E_STARTED:
  2449. if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
  2450. tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
  2451. BNA_TX_F_BW_UPDATED);
  2452. bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
  2453. } else
  2454. bfa_fsm_set_state(tx, bna_tx_sm_started);
  2455. break;
  2456. case TX_E_PRIO_CHANGE:
  2457. tx->flags |= BNA_TX_F_PRIO_CHANGED;
  2458. break;
  2459. case TX_E_BW_UPDATE:
  2460. tx->flags |= BNA_TX_F_BW_UPDATED;
  2461. break;
  2462. default:
  2463. bfa_sm_fault(event);
  2464. }
  2465. }
  2466. static void
  2467. bna_tx_sm_started_entry(struct bna_tx *tx)
  2468. {
  2469. struct bna_txq *txq;
  2470. struct list_head *qe;
  2471. int is_regular = (tx->type == BNA_TX_T_REGULAR);
  2472. list_for_each(qe, &tx->txq_q) {
  2473. txq = (struct bna_txq *)qe;
  2474. txq->tcb->priority = txq->priority;
  2475. /* Start IB */
  2476. bna_ib_start(tx->bna, &txq->ib, is_regular);
  2477. }
  2478. tx->tx_resume_cbfn(tx->bna->bnad, tx);
  2479. }
  2480. static void
  2481. bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
  2482. {
  2483. switch (event) {
  2484. case TX_E_STOP:
  2485. bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
  2486. tx->tx_stall_cbfn(tx->bna->bnad, tx);
  2487. bna_tx_enet_stop(tx);
  2488. break;
  2489. case TX_E_FAIL:
  2490. bfa_fsm_set_state(tx, bna_tx_sm_failed);
  2491. tx->tx_stall_cbfn(tx->bna->bnad, tx);
  2492. tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
  2493. break;
  2494. case TX_E_PRIO_CHANGE:
  2495. case TX_E_BW_UPDATE:
  2496. bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
  2497. break;
  2498. default:
  2499. bfa_sm_fault(event);
  2500. }
  2501. }
  2502. static void
  2503. bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
  2504. {
  2505. }
  2506. static void
  2507. bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
  2508. {
  2509. switch (event) {
  2510. case TX_E_FAIL:
  2511. case TX_E_STOPPED:
  2512. bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
  2513. tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
  2514. break;
  2515. case TX_E_STARTED:
  2516. /**
  2517. * We are here due to start_wait -> stop_wait transition on
  2518. * TX_E_STOP event
  2519. */
  2520. bna_tx_enet_stop(tx);
  2521. break;
  2522. case TX_E_PRIO_CHANGE:
  2523. case TX_E_BW_UPDATE:
  2524. /* No-op */
  2525. break;
  2526. default:
  2527. bfa_sm_fault(event);
  2528. }
  2529. }
  2530. static void
  2531. bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
  2532. {
  2533. }
  2534. static void
  2535. bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
  2536. {
  2537. switch (event) {
  2538. case TX_E_FAIL:
  2539. case TX_E_PRIO_CHANGE:
  2540. case TX_E_BW_UPDATE:
  2541. /* No-op */
  2542. break;
  2543. case TX_E_CLEANUP_DONE:
  2544. bfa_fsm_set_state(tx, bna_tx_sm_stopped);
  2545. break;
  2546. default:
  2547. bfa_sm_fault(event);
  2548. }
  2549. }
  2550. static void
  2551. bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
  2552. {
  2553. tx->tx_stall_cbfn(tx->bna->bnad, tx);
  2554. bna_tx_enet_stop(tx);
  2555. }
  2556. static void
  2557. bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
  2558. {
  2559. switch (event) {
  2560. case TX_E_STOP:
  2561. bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
  2562. break;
  2563. case TX_E_FAIL:
  2564. bfa_fsm_set_state(tx, bna_tx_sm_failed);
  2565. call_tx_prio_change_cbfn(tx);
  2566. tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
  2567. break;
  2568. case TX_E_STOPPED:
  2569. bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
  2570. break;
  2571. case TX_E_PRIO_CHANGE:
  2572. case TX_E_BW_UPDATE:
  2573. /* No-op */
  2574. break;
  2575. default:
  2576. bfa_sm_fault(event);
  2577. }
  2578. }
  2579. static void
  2580. bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
  2581. {
  2582. call_tx_prio_change_cbfn(tx);
  2583. tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
  2584. }
  2585. static void
  2586. bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
  2587. {
  2588. switch (event) {
  2589. case TX_E_STOP:
  2590. bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
  2591. break;
  2592. case TX_E_FAIL:
  2593. bfa_fsm_set_state(tx, bna_tx_sm_failed);
  2594. break;
  2595. case TX_E_PRIO_CHANGE:
  2596. case TX_E_BW_UPDATE:
  2597. /* No-op */
  2598. break;
  2599. case TX_E_CLEANUP_DONE:
  2600. bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
  2601. break;
  2602. default:
  2603. bfa_sm_fault(event);
  2604. }
  2605. }
  2606. static void
  2607. bna_tx_sm_failed_entry(struct bna_tx *tx)
  2608. {
  2609. }
  2610. static void
  2611. bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
  2612. {
  2613. switch (event) {
  2614. case TX_E_START:
  2615. bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
  2616. break;
  2617. case TX_E_STOP:
  2618. bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
  2619. break;
  2620. case TX_E_FAIL:
  2621. /* No-op */
  2622. break;
  2623. case TX_E_CLEANUP_DONE:
  2624. bfa_fsm_set_state(tx, bna_tx_sm_stopped);
  2625. break;
  2626. default:
  2627. bfa_sm_fault(event);
  2628. }
  2629. }
  2630. static void
  2631. bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
  2632. {
  2633. }
  2634. static void
  2635. bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
  2636. {
  2637. switch (event) {
  2638. case TX_E_STOP:
  2639. bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
  2640. break;
  2641. case TX_E_FAIL:
  2642. bfa_fsm_set_state(tx, bna_tx_sm_failed);
  2643. break;
  2644. case TX_E_CLEANUP_DONE:
  2645. bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
  2646. break;
  2647. case TX_E_BW_UPDATE:
  2648. /* No-op */
  2649. break;
  2650. default:
  2651. bfa_sm_fault(event);
  2652. }
  2653. }
  2654. static void
  2655. bna_bfi_tx_enet_start(struct bna_tx *tx)
  2656. {
  2657. struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
  2658. struct bna_txq *txq = NULL;
  2659. struct list_head *qe;
  2660. int i;
  2661. bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
  2662. BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
  2663. cfg_req->mh.num_entries = htons(
  2664. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
  2665. cfg_req->num_queues = tx->num_txq;
  2666. for (i = 0, qe = bfa_q_first(&tx->txq_q);
  2667. i < tx->num_txq;
  2668. i++, qe = bfa_q_next(qe)) {
  2669. txq = (struct bna_txq *)qe;
  2670. bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
  2671. cfg_req->q_cfg[i].q.priority = txq->priority;
  2672. cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
  2673. txq->ib.ib_seg_host_addr.lsb;
  2674. cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
  2675. txq->ib.ib_seg_host_addr.msb;
  2676. cfg_req->q_cfg[i].ib.intr.msix_index =
  2677. htons((u16)txq->ib.intr_vector);
  2678. }
  2679. cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
  2680. cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
  2681. cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
  2682. cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
  2683. cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
  2684. ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
  2685. cfg_req->ib_cfg.coalescing_timeout =
  2686. htonl((u32)txq->ib.coalescing_timeo);
  2687. cfg_req->ib_cfg.inter_pkt_timeout =
  2688. htonl((u32)txq->ib.interpkt_timeo);
  2689. cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
  2690. cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
  2691. cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
  2692. cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
  2693. cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
  2694. bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
  2695. sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
  2696. bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
  2697. }
  2698. static void
  2699. bna_bfi_tx_enet_stop(struct bna_tx *tx)
  2700. {
  2701. struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
  2702. bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
  2703. BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
  2704. req->mh.num_entries = htons(
  2705. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
  2706. bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
  2707. &req->mh);
  2708. bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
  2709. }
  2710. static void
  2711. bna_tx_enet_stop(struct bna_tx *tx)
  2712. {
  2713. struct bna_txq *txq;
  2714. struct list_head *qe;
  2715. /* Stop IB */
  2716. list_for_each(qe, &tx->txq_q) {
  2717. txq = (struct bna_txq *)qe;
  2718. bna_ib_stop(tx->bna, &txq->ib);
  2719. }
  2720. bna_bfi_tx_enet_stop(tx);
  2721. }
  2722. static void
  2723. bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
  2724. struct bna_mem_descr *qpt_mem,
  2725. struct bna_mem_descr *swqpt_mem,
  2726. struct bna_mem_descr *page_mem)
  2727. {
  2728. int i;
  2729. txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
  2730. txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
  2731. txq->qpt.kv_qpt_ptr = qpt_mem->kva;
  2732. txq->qpt.page_count = page_count;
  2733. txq->qpt.page_size = page_size;
  2734. txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
  2735. for (i = 0; i < page_count; i++) {
  2736. txq->tcb->sw_qpt[i] = page_mem[i].kva;
  2737. ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
  2738. page_mem[i].dma.lsb;
  2739. ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
  2740. page_mem[i].dma.msb;
  2741. }
  2742. }
  2743. static struct bna_tx *
  2744. bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
  2745. {
  2746. struct list_head *qe = NULL;
  2747. struct bna_tx *tx = NULL;
  2748. if (list_empty(&tx_mod->tx_free_q))
  2749. return NULL;
  2750. if (type == BNA_TX_T_REGULAR) {
  2751. bfa_q_deq(&tx_mod->tx_free_q, &qe);
  2752. } else {
  2753. bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
  2754. }
  2755. tx = (struct bna_tx *)qe;
  2756. bfa_q_qe_init(&tx->qe);
  2757. tx->type = type;
  2758. return tx;
  2759. }
  2760. static void
  2761. bna_tx_free(struct bna_tx *tx)
  2762. {
  2763. struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
  2764. struct bna_txq *txq;
  2765. struct list_head *prev_qe;
  2766. struct list_head *qe;
  2767. while (!list_empty(&tx->txq_q)) {
  2768. bfa_q_deq(&tx->txq_q, &txq);
  2769. bfa_q_qe_init(&txq->qe);
  2770. txq->tcb = NULL;
  2771. txq->tx = NULL;
  2772. list_add_tail(&txq->qe, &tx_mod->txq_free_q);
  2773. }
  2774. list_for_each(qe, &tx_mod->tx_active_q) {
  2775. if (qe == &tx->qe) {
  2776. list_del(&tx->qe);
  2777. bfa_q_qe_init(&tx->qe);
  2778. break;
  2779. }
  2780. }
  2781. tx->bna = NULL;
  2782. tx->priv = NULL;
  2783. prev_qe = NULL;
  2784. list_for_each(qe, &tx_mod->tx_free_q) {
  2785. if (((struct bna_tx *)qe)->rid < tx->rid)
  2786. prev_qe = qe;
  2787. else {
  2788. break;
  2789. }
  2790. }
  2791. if (prev_qe == NULL) {
  2792. /* This is the first entry */
  2793. bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
  2794. } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
  2795. /* This is the last entry */
  2796. list_add_tail(&tx->qe, &tx_mod->tx_free_q);
  2797. } else {
  2798. /* Somewhere in the middle */
  2799. bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
  2800. bfa_q_prev(&tx->qe) = prev_qe;
  2801. bfa_q_next(prev_qe) = &tx->qe;
  2802. bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
  2803. }
  2804. }
  2805. static void
  2806. bna_tx_start(struct bna_tx *tx)
  2807. {
  2808. tx->flags |= BNA_TX_F_ENET_STARTED;
  2809. if (tx->flags & BNA_TX_F_ENABLED)
  2810. bfa_fsm_send_event(tx, TX_E_START);
  2811. }
  2812. static void
  2813. bna_tx_stop(struct bna_tx *tx)
  2814. {
  2815. tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
  2816. tx->stop_cbarg = &tx->bna->tx_mod;
  2817. tx->flags &= ~BNA_TX_F_ENET_STARTED;
  2818. bfa_fsm_send_event(tx, TX_E_STOP);
  2819. }
  2820. static void
  2821. bna_tx_fail(struct bna_tx *tx)
  2822. {
  2823. tx->flags &= ~BNA_TX_F_ENET_STARTED;
  2824. bfa_fsm_send_event(tx, TX_E_FAIL);
  2825. }
  2826. void
  2827. bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
  2828. {
  2829. struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
  2830. struct bna_txq *txq = NULL;
  2831. struct list_head *qe;
  2832. int i;
  2833. bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
  2834. sizeof(struct bfi_enet_tx_cfg_rsp));
  2835. tx->hw_id = cfg_rsp->hw_id;
  2836. for (i = 0, qe = bfa_q_first(&tx->txq_q);
  2837. i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
  2838. txq = (struct bna_txq *)qe;
  2839. /* Setup doorbells */
  2840. txq->tcb->i_dbell->doorbell_addr =
  2841. tx->bna->pcidev.pci_bar_kva
  2842. + ntohl(cfg_rsp->q_handles[i].i_dbell);
  2843. txq->tcb->q_dbell =
  2844. tx->bna->pcidev.pci_bar_kva
  2845. + ntohl(cfg_rsp->q_handles[i].q_dbell);
  2846. txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
  2847. /* Initialize producer/consumer indexes */
  2848. (*txq->tcb->hw_consumer_index) = 0;
  2849. txq->tcb->producer_index = txq->tcb->consumer_index = 0;
  2850. }
  2851. bfa_fsm_send_event(tx, TX_E_STARTED);
  2852. }
  2853. void
  2854. bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
  2855. {
  2856. bfa_fsm_send_event(tx, TX_E_STOPPED);
  2857. }
  2858. void
  2859. bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
  2860. {
  2861. struct bna_tx *tx;
  2862. struct list_head *qe;
  2863. list_for_each(qe, &tx_mod->tx_active_q) {
  2864. tx = (struct bna_tx *)qe;
  2865. bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
  2866. }
  2867. }
  2868. void
  2869. bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
  2870. {
  2871. u32 q_size;
  2872. u32 page_count;
  2873. struct bna_mem_info *mem_info;
  2874. res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
  2875. mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
  2876. mem_info->mem_type = BNA_MEM_T_KVA;
  2877. mem_info->len = sizeof(struct bna_tcb);
  2878. mem_info->num = num_txq;
  2879. q_size = txq_depth * BFI_TXQ_WI_SIZE;
  2880. q_size = ALIGN(q_size, PAGE_SIZE);
  2881. page_count = q_size >> PAGE_SHIFT;
  2882. res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
  2883. mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
  2884. mem_info->mem_type = BNA_MEM_T_DMA;
  2885. mem_info->len = page_count * sizeof(struct bna_dma_addr);
  2886. mem_info->num = num_txq;
  2887. res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
  2888. mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
  2889. mem_info->mem_type = BNA_MEM_T_KVA;
  2890. mem_info->len = page_count * sizeof(void *);
  2891. mem_info->num = num_txq;
  2892. res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
  2893. mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
  2894. mem_info->mem_type = BNA_MEM_T_DMA;
  2895. mem_info->len = PAGE_SIZE;
  2896. mem_info->num = num_txq * page_count;
  2897. res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
  2898. mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
  2899. mem_info->mem_type = BNA_MEM_T_DMA;
  2900. mem_info->len = BFI_IBIDX_SIZE;
  2901. mem_info->num = num_txq;
  2902. res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
  2903. res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
  2904. BNA_INTR_T_MSIX;
  2905. res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
  2906. }
  2907. struct bna_tx *
  2908. bna_tx_create(struct bna *bna, struct bnad *bnad,
  2909. struct bna_tx_config *tx_cfg,
  2910. const struct bna_tx_event_cbfn *tx_cbfn,
  2911. struct bna_res_info *res_info, void *priv)
  2912. {
  2913. struct bna_intr_info *intr_info;
  2914. struct bna_tx_mod *tx_mod = &bna->tx_mod;
  2915. struct bna_tx *tx;
  2916. struct bna_txq *txq;
  2917. struct list_head *qe;
  2918. int page_count;
  2919. int page_size;
  2920. int page_idx;
  2921. int i;
  2922. intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
  2923. page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
  2924. tx_cfg->num_txq;
  2925. page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
  2926. /**
  2927. * Get resources
  2928. */
  2929. if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
  2930. return NULL;
  2931. /* Tx */
  2932. tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
  2933. if (!tx)
  2934. return NULL;
  2935. tx->bna = bna;
  2936. tx->priv = priv;
  2937. /* TxQs */
  2938. INIT_LIST_HEAD(&tx->txq_q);
  2939. for (i = 0; i < tx_cfg->num_txq; i++) {
  2940. if (list_empty(&tx_mod->txq_free_q))
  2941. goto err_return;
  2942. bfa_q_deq(&tx_mod->txq_free_q, &txq);
  2943. bfa_q_qe_init(&txq->qe);
  2944. list_add_tail(&txq->qe, &tx->txq_q);
  2945. txq->tx = tx;
  2946. }
  2947. /*
  2948. * Initialize
  2949. */
  2950. /* Tx */
  2951. tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
  2952. tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
  2953. /* Following callbacks are mandatory */
  2954. tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
  2955. tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
  2956. tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
  2957. list_add_tail(&tx->qe, &tx_mod->tx_active_q);
  2958. tx->num_txq = tx_cfg->num_txq;
  2959. tx->flags = 0;
  2960. if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
  2961. switch (tx->type) {
  2962. case BNA_TX_T_REGULAR:
  2963. if (!(tx->bna->tx_mod.flags &
  2964. BNA_TX_MOD_F_ENET_LOOPBACK))
  2965. tx->flags |= BNA_TX_F_ENET_STARTED;
  2966. break;
  2967. case BNA_TX_T_LOOPBACK:
  2968. if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
  2969. tx->flags |= BNA_TX_F_ENET_STARTED;
  2970. break;
  2971. }
  2972. }
  2973. /* TxQ */
  2974. i = 0;
  2975. page_idx = 0;
  2976. list_for_each(qe, &tx->txq_q) {
  2977. txq = (struct bna_txq *)qe;
  2978. txq->tcb = (struct bna_tcb *)
  2979. res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
  2980. txq->tx_packets = 0;
  2981. txq->tx_bytes = 0;
  2982. /* IB */
  2983. txq->ib.ib_seg_host_addr.lsb =
  2984. res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
  2985. txq->ib.ib_seg_host_addr.msb =
  2986. res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
  2987. txq->ib.ib_seg_host_addr_kva =
  2988. res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
  2989. txq->ib.intr_type = intr_info->intr_type;
  2990. txq->ib.intr_vector = (intr_info->num == 1) ?
  2991. intr_info->idl[0].vector :
  2992. intr_info->idl[i].vector;
  2993. if (intr_info->intr_type == BNA_INTR_T_INTX)
  2994. txq->ib.intr_vector = (1 << txq->ib.intr_vector);
  2995. txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
  2996. txq->ib.interpkt_timeo = 0; /* Not used */
  2997. txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
  2998. /* TCB */
  2999. txq->tcb->q_depth = tx_cfg->txq_depth;
  3000. txq->tcb->unmap_q = (void *)
  3001. res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
  3002. txq->tcb->hw_consumer_index =
  3003. (u32 *)txq->ib.ib_seg_host_addr_kva;
  3004. txq->tcb->i_dbell = &txq->ib.door_bell;
  3005. txq->tcb->intr_type = txq->ib.intr_type;
  3006. txq->tcb->intr_vector = txq->ib.intr_vector;
  3007. txq->tcb->txq = txq;
  3008. txq->tcb->bnad = bnad;
  3009. txq->tcb->id = i;
  3010. /* QPT, SWQPT, Pages */
  3011. bna_txq_qpt_setup(txq, page_count, page_size,
  3012. &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
  3013. &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
  3014. &res_info[BNA_TX_RES_MEM_T_PAGE].
  3015. res_u.mem_info.mdl[page_idx]);
  3016. txq->tcb->page_idx = page_idx;
  3017. txq->tcb->page_count = page_count;
  3018. page_idx += page_count;
  3019. /* Callback to bnad for setting up TCB */
  3020. if (tx->tcb_setup_cbfn)
  3021. (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
  3022. if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
  3023. txq->priority = txq->tcb->id;
  3024. else
  3025. txq->priority = tx_mod->default_prio;
  3026. i++;
  3027. }
  3028. tx->txf_vlan_id = 0;
  3029. bfa_fsm_set_state(tx, bna_tx_sm_stopped);
  3030. tx_mod->rid_mask |= (1 << tx->rid);
  3031. return tx;
  3032. err_return:
  3033. bna_tx_free(tx);
  3034. return NULL;
  3035. }
  3036. void
  3037. bna_tx_destroy(struct bna_tx *tx)
  3038. {
  3039. struct bna_txq *txq;
  3040. struct list_head *qe;
  3041. list_for_each(qe, &tx->txq_q) {
  3042. txq = (struct bna_txq *)qe;
  3043. if (tx->tcb_destroy_cbfn)
  3044. (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
  3045. }
  3046. tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
  3047. bna_tx_free(tx);
  3048. }
  3049. void
  3050. bna_tx_enable(struct bna_tx *tx)
  3051. {
  3052. if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
  3053. return;
  3054. tx->flags |= BNA_TX_F_ENABLED;
  3055. if (tx->flags & BNA_TX_F_ENET_STARTED)
  3056. bfa_fsm_send_event(tx, TX_E_START);
  3057. }
  3058. void
  3059. bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
  3060. void (*cbfn)(void *, struct bna_tx *))
  3061. {
  3062. if (type == BNA_SOFT_CLEANUP) {
  3063. (*cbfn)(tx->bna->bnad, tx);
  3064. return;
  3065. }
  3066. tx->stop_cbfn = cbfn;
  3067. tx->stop_cbarg = tx->bna->bnad;
  3068. tx->flags &= ~BNA_TX_F_ENABLED;
  3069. bfa_fsm_send_event(tx, TX_E_STOP);
  3070. }
  3071. void
  3072. bna_tx_cleanup_complete(struct bna_tx *tx)
  3073. {
  3074. bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
  3075. }
  3076. static void
  3077. bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
  3078. {
  3079. struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
  3080. bfa_wc_down(&tx_mod->tx_stop_wc);
  3081. }
  3082. static void
  3083. bna_tx_mod_cb_tx_stopped_all(void *arg)
  3084. {
  3085. struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
  3086. if (tx_mod->stop_cbfn)
  3087. tx_mod->stop_cbfn(&tx_mod->bna->enet);
  3088. tx_mod->stop_cbfn = NULL;
  3089. }
  3090. void
  3091. bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
  3092. struct bna_res_info *res_info)
  3093. {
  3094. int i;
  3095. tx_mod->bna = bna;
  3096. tx_mod->flags = 0;
  3097. tx_mod->tx = (struct bna_tx *)
  3098. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
  3099. tx_mod->txq = (struct bna_txq *)
  3100. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
  3101. INIT_LIST_HEAD(&tx_mod->tx_free_q);
  3102. INIT_LIST_HEAD(&tx_mod->tx_active_q);
  3103. INIT_LIST_HEAD(&tx_mod->txq_free_q);
  3104. for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
  3105. tx_mod->tx[i].rid = i;
  3106. bfa_q_qe_init(&tx_mod->tx[i].qe);
  3107. list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
  3108. bfa_q_qe_init(&tx_mod->txq[i].qe);
  3109. list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
  3110. }
  3111. tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
  3112. tx_mod->default_prio = 0;
  3113. tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
  3114. tx_mod->iscsi_prio = -1;
  3115. }
  3116. void
  3117. bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
  3118. {
  3119. struct list_head *qe;
  3120. int i;
  3121. i = 0;
  3122. list_for_each(qe, &tx_mod->tx_free_q)
  3123. i++;
  3124. i = 0;
  3125. list_for_each(qe, &tx_mod->txq_free_q)
  3126. i++;
  3127. tx_mod->bna = NULL;
  3128. }
  3129. void
  3130. bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
  3131. {
  3132. struct bna_tx *tx;
  3133. struct list_head *qe;
  3134. tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
  3135. if (type == BNA_TX_T_LOOPBACK)
  3136. tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
  3137. list_for_each(qe, &tx_mod->tx_active_q) {
  3138. tx = (struct bna_tx *)qe;
  3139. if (tx->type == type)
  3140. bna_tx_start(tx);
  3141. }
  3142. }
  3143. void
  3144. bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
  3145. {
  3146. struct bna_tx *tx;
  3147. struct list_head *qe;
  3148. tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
  3149. tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
  3150. tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
  3151. bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
  3152. list_for_each(qe, &tx_mod->tx_active_q) {
  3153. tx = (struct bna_tx *)qe;
  3154. if (tx->type == type) {
  3155. bfa_wc_up(&tx_mod->tx_stop_wc);
  3156. bna_tx_stop(tx);
  3157. }
  3158. }
  3159. bfa_wc_wait(&tx_mod->tx_stop_wc);
  3160. }
  3161. void
  3162. bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
  3163. {
  3164. struct bna_tx *tx;
  3165. struct list_head *qe;
  3166. tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
  3167. tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
  3168. list_for_each(qe, &tx_mod->tx_active_q) {
  3169. tx = (struct bna_tx *)qe;
  3170. bna_tx_fail(tx);
  3171. }
  3172. }
  3173. void
  3174. bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
  3175. {
  3176. struct bna_txq *txq;
  3177. struct list_head *qe;
  3178. list_for_each(qe, &tx->txq_q) {
  3179. txq = (struct bna_txq *)qe;
  3180. bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
  3181. }
  3182. }