vxge-main.c 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2010 Exar Corp.
  13. *
  14. * The module loadable parameters that are supported by the driver and a brief
  15. * explanation of all the variables:
  16. * vlan_tag_strip:
  17. * Strip VLAN Tag enable/disable. Instructs the device to remove
  18. * the VLAN tag from all received tagged frames that are not
  19. * replicated at the internal L2 switch.
  20. * 0 - Do not strip the VLAN tag.
  21. * 1 - Strip the VLAN tag.
  22. *
  23. * addr_learn_en:
  24. * Enable learning the mac address of the guest OS interface in
  25. * a virtualization environment.
  26. * 0 - DISABLE
  27. * 1 - ENABLE
  28. *
  29. * max_config_port:
  30. * Maximum number of port to be supported.
  31. * MIN -1 and MAX - 2
  32. *
  33. * max_config_vpath:
  34. * This configures the maximum no of VPATH configures for each
  35. * device function.
  36. * MIN - 1 and MAX - 17
  37. *
  38. * max_config_dev:
  39. * This configures maximum no of Device function to be enabled.
  40. * MIN - 1 and MAX - 17
  41. *
  42. ******************************************************************************/
  43. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  44. #include <linux/if_vlan.h>
  45. #include <linux/pci.h>
  46. #include <linux/slab.h>
  47. #include <linux/tcp.h>
  48. #include <net/ip.h>
  49. #include <linux/netdevice.h>
  50. #include <linux/etherdevice.h>
  51. #include "vxge-main.h"
  52. #include "vxge-reg.h"
  53. MODULE_LICENSE("Dual BSD/GPL");
  54. MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
  55. "Virtualized Server Adapter");
  56. static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
  57. {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
  58. PCI_ANY_ID},
  59. {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
  60. PCI_ANY_ID},
  61. {0}
  62. };
  63. MODULE_DEVICE_TABLE(pci, vxge_id_table);
  64. VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
  65. VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
  66. VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
  67. VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
  68. VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
  69. VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
  70. static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
  71. {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
  72. static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
  73. {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
  74. module_param_array(bw_percentage, uint, NULL, 0);
  75. static struct vxge_drv_config *driver_config;
  76. static inline int is_vxge_card_up(struct vxgedev *vdev)
  77. {
  78. return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  79. }
  80. static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
  81. {
  82. struct sk_buff **skb_ptr = NULL;
  83. struct sk_buff **temp;
  84. #define NR_SKB_COMPLETED 128
  85. struct sk_buff *completed[NR_SKB_COMPLETED];
  86. int more;
  87. do {
  88. more = 0;
  89. skb_ptr = completed;
  90. if (__netif_tx_trylock(fifo->txq)) {
  91. vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
  92. NR_SKB_COMPLETED, &more);
  93. __netif_tx_unlock(fifo->txq);
  94. }
  95. /* free SKBs */
  96. for (temp = completed; temp != skb_ptr; temp++)
  97. dev_kfree_skb_irq(*temp);
  98. } while (more);
  99. }
  100. static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
  101. {
  102. int i;
  103. /* Complete all transmits */
  104. for (i = 0; i < vdev->no_of_vpath; i++)
  105. VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
  106. }
  107. static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
  108. {
  109. int i;
  110. struct vxge_ring *ring;
  111. /* Complete all receives*/
  112. for (i = 0; i < vdev->no_of_vpath; i++) {
  113. ring = &vdev->vpaths[i].ring;
  114. vxge_hw_vpath_poll_rx(ring->handle);
  115. }
  116. }
  117. /*
  118. * vxge_callback_link_up
  119. *
  120. * This function is called during interrupt context to notify link up state
  121. * change.
  122. */
  123. void
  124. vxge_callback_link_up(struct __vxge_hw_device *hldev)
  125. {
  126. struct net_device *dev = hldev->ndev;
  127. struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
  128. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  129. vdev->ndev->name, __func__, __LINE__);
  130. netdev_notice(vdev->ndev, "Link Up\n");
  131. vdev->stats.link_up++;
  132. netif_carrier_on(vdev->ndev);
  133. netif_tx_wake_all_queues(vdev->ndev);
  134. vxge_debug_entryexit(VXGE_TRACE,
  135. "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
  136. }
  137. /*
  138. * vxge_callback_link_down
  139. *
  140. * This function is called during interrupt context to notify link down state
  141. * change.
  142. */
  143. void
  144. vxge_callback_link_down(struct __vxge_hw_device *hldev)
  145. {
  146. struct net_device *dev = hldev->ndev;
  147. struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
  148. vxge_debug_entryexit(VXGE_TRACE,
  149. "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
  150. netdev_notice(vdev->ndev, "Link Down\n");
  151. vdev->stats.link_down++;
  152. netif_carrier_off(vdev->ndev);
  153. netif_tx_stop_all_queues(vdev->ndev);
  154. vxge_debug_entryexit(VXGE_TRACE,
  155. "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
  156. }
  157. /*
  158. * vxge_rx_alloc
  159. *
  160. * Allocate SKB.
  161. */
  162. static struct sk_buff*
  163. vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
  164. {
  165. struct net_device *dev;
  166. struct sk_buff *skb;
  167. struct vxge_rx_priv *rx_priv;
  168. dev = ring->ndev;
  169. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  170. ring->ndev->name, __func__, __LINE__);
  171. rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
  172. /* try to allocate skb first. this one may fail */
  173. skb = netdev_alloc_skb(dev, skb_size +
  174. VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  175. if (skb == NULL) {
  176. vxge_debug_mem(VXGE_ERR,
  177. "%s: out of memory to allocate SKB", dev->name);
  178. ring->stats.skb_alloc_fail++;
  179. return NULL;
  180. }
  181. vxge_debug_mem(VXGE_TRACE,
  182. "%s: %s:%d Skb : 0x%p", ring->ndev->name,
  183. __func__, __LINE__, skb);
  184. skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  185. rx_priv->skb = skb;
  186. rx_priv->skb_data = NULL;
  187. rx_priv->data_size = skb_size;
  188. vxge_debug_entryexit(VXGE_TRACE,
  189. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  190. return skb;
  191. }
  192. /*
  193. * vxge_rx_map
  194. */
  195. static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
  196. {
  197. struct vxge_rx_priv *rx_priv;
  198. dma_addr_t dma_addr;
  199. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  200. ring->ndev->name, __func__, __LINE__);
  201. rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
  202. rx_priv->skb_data = rx_priv->skb->data;
  203. dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
  204. rx_priv->data_size, PCI_DMA_FROMDEVICE);
  205. if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
  206. ring->stats.pci_map_fail++;
  207. return -EIO;
  208. }
  209. vxge_debug_mem(VXGE_TRACE,
  210. "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
  211. ring->ndev->name, __func__, __LINE__,
  212. (unsigned long long)dma_addr);
  213. vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
  214. rx_priv->data_dma = dma_addr;
  215. vxge_debug_entryexit(VXGE_TRACE,
  216. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  217. return 0;
  218. }
  219. /*
  220. * vxge_rx_initial_replenish
  221. * Allocation of RxD as an initial replenish procedure.
  222. */
  223. static enum vxge_hw_status
  224. vxge_rx_initial_replenish(void *dtrh, void *userdata)
  225. {
  226. struct vxge_ring *ring = (struct vxge_ring *)userdata;
  227. struct vxge_rx_priv *rx_priv;
  228. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  229. ring->ndev->name, __func__, __LINE__);
  230. if (vxge_rx_alloc(dtrh, ring,
  231. VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
  232. return VXGE_HW_FAIL;
  233. if (vxge_rx_map(dtrh, ring)) {
  234. rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
  235. dev_kfree_skb(rx_priv->skb);
  236. return VXGE_HW_FAIL;
  237. }
  238. vxge_debug_entryexit(VXGE_TRACE,
  239. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  240. return VXGE_HW_OK;
  241. }
  242. static inline void
  243. vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
  244. int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
  245. {
  246. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  247. ring->ndev->name, __func__, __LINE__);
  248. skb_record_rx_queue(skb, ring->driver_id);
  249. skb->protocol = eth_type_trans(skb, ring->ndev);
  250. ring->stats.rx_frms++;
  251. ring->stats.rx_bytes += pkt_length;
  252. if (skb->pkt_type == PACKET_MULTICAST)
  253. ring->stats.rx_mcast++;
  254. vxge_debug_rx(VXGE_TRACE,
  255. "%s: %s:%d skb protocol = %d",
  256. ring->ndev->name, __func__, __LINE__, skb->protocol);
  257. if (ring->gro_enable) {
  258. if (ring->vlgrp && ext_info->vlan &&
  259. (ring->vlan_tag_strip ==
  260. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
  261. vlan_gro_receive(ring->napi_p, ring->vlgrp,
  262. ext_info->vlan, skb);
  263. else
  264. napi_gro_receive(ring->napi_p, skb);
  265. } else {
  266. if (ring->vlgrp && vlan &&
  267. (ring->vlan_tag_strip ==
  268. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
  269. vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
  270. else
  271. netif_receive_skb(skb);
  272. }
  273. vxge_debug_entryexit(VXGE_TRACE,
  274. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  275. }
  276. static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
  277. struct vxge_rx_priv *rx_priv)
  278. {
  279. pci_dma_sync_single_for_device(ring->pdev,
  280. rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
  281. vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
  282. vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
  283. }
  284. static inline void vxge_post(int *dtr_cnt, void **first_dtr,
  285. void *post_dtr, struct __vxge_hw_ring *ringh)
  286. {
  287. int dtr_count = *dtr_cnt;
  288. if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
  289. if (*first_dtr)
  290. vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
  291. *first_dtr = post_dtr;
  292. } else
  293. vxge_hw_ring_rxd_post_post(ringh, post_dtr);
  294. dtr_count++;
  295. *dtr_cnt = dtr_count;
  296. }
  297. /*
  298. * vxge_rx_1b_compl
  299. *
  300. * If the interrupt is because of a received frame or if the receive ring
  301. * contains fresh as yet un-processed frames, this function is called.
  302. */
  303. enum vxge_hw_status
  304. vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
  305. u8 t_code, void *userdata)
  306. {
  307. struct vxge_ring *ring = (struct vxge_ring *)userdata;
  308. struct net_device *dev = ring->ndev;
  309. unsigned int dma_sizes;
  310. void *first_dtr = NULL;
  311. int dtr_cnt = 0;
  312. int data_size;
  313. dma_addr_t data_dma;
  314. int pkt_length;
  315. struct sk_buff *skb;
  316. struct vxge_rx_priv *rx_priv;
  317. struct vxge_hw_ring_rxd_info ext_info;
  318. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  319. ring->ndev->name, __func__, __LINE__);
  320. ring->pkts_processed = 0;
  321. vxge_hw_ring_replenish(ringh);
  322. do {
  323. prefetch((char *)dtr + L1_CACHE_BYTES);
  324. rx_priv = vxge_hw_ring_rxd_private_get(dtr);
  325. skb = rx_priv->skb;
  326. data_size = rx_priv->data_size;
  327. data_dma = rx_priv->data_dma;
  328. prefetch(rx_priv->skb_data);
  329. vxge_debug_rx(VXGE_TRACE,
  330. "%s: %s:%d skb = 0x%p",
  331. ring->ndev->name, __func__, __LINE__, skb);
  332. vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
  333. pkt_length = dma_sizes;
  334. pkt_length -= ETH_FCS_LEN;
  335. vxge_debug_rx(VXGE_TRACE,
  336. "%s: %s:%d Packet Length = %d",
  337. ring->ndev->name, __func__, __LINE__, pkt_length);
  338. vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
  339. /* check skb validity */
  340. vxge_assert(skb);
  341. prefetch((char *)skb + L1_CACHE_BYTES);
  342. if (unlikely(t_code)) {
  343. if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
  344. VXGE_HW_OK) {
  345. ring->stats.rx_errors++;
  346. vxge_debug_rx(VXGE_TRACE,
  347. "%s: %s :%d Rx T_code is %d",
  348. ring->ndev->name, __func__,
  349. __LINE__, t_code);
  350. /* If the t_code is not supported and if the
  351. * t_code is other than 0x5 (unparseable packet
  352. * such as unknown UPV6 header), Drop it !!!
  353. */
  354. vxge_re_pre_post(dtr, ring, rx_priv);
  355. vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
  356. ring->stats.rx_dropped++;
  357. continue;
  358. }
  359. }
  360. if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
  361. if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
  362. if (!vxge_rx_map(dtr, ring)) {
  363. skb_put(skb, pkt_length);
  364. pci_unmap_single(ring->pdev, data_dma,
  365. data_size, PCI_DMA_FROMDEVICE);
  366. vxge_hw_ring_rxd_pre_post(ringh, dtr);
  367. vxge_post(&dtr_cnt, &first_dtr, dtr,
  368. ringh);
  369. } else {
  370. dev_kfree_skb(rx_priv->skb);
  371. rx_priv->skb = skb;
  372. rx_priv->data_size = data_size;
  373. vxge_re_pre_post(dtr, ring, rx_priv);
  374. vxge_post(&dtr_cnt, &first_dtr, dtr,
  375. ringh);
  376. ring->stats.rx_dropped++;
  377. break;
  378. }
  379. } else {
  380. vxge_re_pre_post(dtr, ring, rx_priv);
  381. vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
  382. ring->stats.rx_dropped++;
  383. break;
  384. }
  385. } else {
  386. struct sk_buff *skb_up;
  387. skb_up = netdev_alloc_skb(dev, pkt_length +
  388. VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  389. if (skb_up != NULL) {
  390. skb_reserve(skb_up,
  391. VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  392. pci_dma_sync_single_for_cpu(ring->pdev,
  393. data_dma, data_size,
  394. PCI_DMA_FROMDEVICE);
  395. vxge_debug_mem(VXGE_TRACE,
  396. "%s: %s:%d skb_up = %p",
  397. ring->ndev->name, __func__,
  398. __LINE__, skb);
  399. memcpy(skb_up->data, skb->data, pkt_length);
  400. vxge_re_pre_post(dtr, ring, rx_priv);
  401. vxge_post(&dtr_cnt, &first_dtr, dtr,
  402. ringh);
  403. /* will netif_rx small SKB instead */
  404. skb = skb_up;
  405. skb_put(skb, pkt_length);
  406. } else {
  407. vxge_re_pre_post(dtr, ring, rx_priv);
  408. vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
  409. vxge_debug_rx(VXGE_ERR,
  410. "%s: vxge_rx_1b_compl: out of "
  411. "memory", dev->name);
  412. ring->stats.skb_alloc_fail++;
  413. break;
  414. }
  415. }
  416. if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
  417. !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
  418. ring->rx_csum && /* Offload Rx side CSUM */
  419. ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
  420. ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
  421. skb->ip_summed = CHECKSUM_UNNECESSARY;
  422. else
  423. skb->ip_summed = CHECKSUM_NONE;
  424. vxge_rx_complete(ring, skb, ext_info.vlan,
  425. pkt_length, &ext_info);
  426. ring->budget--;
  427. ring->pkts_processed++;
  428. if (!ring->budget)
  429. break;
  430. } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
  431. &t_code) == VXGE_HW_OK);
  432. if (first_dtr)
  433. vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
  434. vxge_debug_entryexit(VXGE_TRACE,
  435. "%s:%d Exiting...",
  436. __func__, __LINE__);
  437. return VXGE_HW_OK;
  438. }
  439. /*
  440. * vxge_xmit_compl
  441. *
  442. * If an interrupt was raised to indicate DMA complete of the Tx packet,
  443. * this function is called. It identifies the last TxD whose buffer was
  444. * freed and frees all skbs whose data have already DMA'ed into the NICs
  445. * internal memory.
  446. */
  447. enum vxge_hw_status
  448. vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
  449. enum vxge_hw_fifo_tcode t_code, void *userdata,
  450. struct sk_buff ***skb_ptr, int nr_skb, int *more)
  451. {
  452. struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
  453. struct sk_buff *skb, **done_skb = *skb_ptr;
  454. int pkt_cnt = 0;
  455. vxge_debug_entryexit(VXGE_TRACE,
  456. "%s:%d Entered....", __func__, __LINE__);
  457. do {
  458. int frg_cnt;
  459. skb_frag_t *frag;
  460. int i = 0, j;
  461. struct vxge_tx_priv *txd_priv =
  462. vxge_hw_fifo_txdl_private_get(dtr);
  463. skb = txd_priv->skb;
  464. frg_cnt = skb_shinfo(skb)->nr_frags;
  465. frag = &skb_shinfo(skb)->frags[0];
  466. vxge_debug_tx(VXGE_TRACE,
  467. "%s: %s:%d fifo_hw = %p dtr = %p "
  468. "tcode = 0x%x", fifo->ndev->name, __func__,
  469. __LINE__, fifo_hw, dtr, t_code);
  470. /* check skb validity */
  471. vxge_assert(skb);
  472. vxge_debug_tx(VXGE_TRACE,
  473. "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
  474. fifo->ndev->name, __func__, __LINE__,
  475. skb, txd_priv, frg_cnt);
  476. if (unlikely(t_code)) {
  477. fifo->stats.tx_errors++;
  478. vxge_debug_tx(VXGE_ERR,
  479. "%s: tx: dtr %p completed due to "
  480. "error t_code %01x", fifo->ndev->name,
  481. dtr, t_code);
  482. vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
  483. }
  484. /* for unfragmented skb */
  485. pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
  486. skb_headlen(skb), PCI_DMA_TODEVICE);
  487. for (j = 0; j < frg_cnt; j++) {
  488. pci_unmap_page(fifo->pdev,
  489. txd_priv->dma_buffers[i++],
  490. frag->size, PCI_DMA_TODEVICE);
  491. frag += 1;
  492. }
  493. vxge_hw_fifo_txdl_free(fifo_hw, dtr);
  494. /* Updating the statistics block */
  495. fifo->stats.tx_frms++;
  496. fifo->stats.tx_bytes += skb->len;
  497. *done_skb++ = skb;
  498. if (--nr_skb <= 0) {
  499. *more = 1;
  500. break;
  501. }
  502. pkt_cnt++;
  503. if (pkt_cnt > fifo->indicate_max_pkts)
  504. break;
  505. } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
  506. &dtr, &t_code) == VXGE_HW_OK);
  507. *skb_ptr = done_skb;
  508. if (netif_tx_queue_stopped(fifo->txq))
  509. netif_tx_wake_queue(fifo->txq);
  510. vxge_debug_entryexit(VXGE_TRACE,
  511. "%s: %s:%d Exiting...",
  512. fifo->ndev->name, __func__, __LINE__);
  513. return VXGE_HW_OK;
  514. }
  515. /* select a vpath to transmit the packet */
  516. static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
  517. {
  518. u16 queue_len, counter = 0;
  519. if (skb->protocol == htons(ETH_P_IP)) {
  520. struct iphdr *ip;
  521. struct tcphdr *th;
  522. ip = ip_hdr(skb);
  523. if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
  524. th = (struct tcphdr *)(((unsigned char *)ip) +
  525. ip->ihl*4);
  526. queue_len = vdev->no_of_vpath;
  527. counter = (ntohs(th->source) +
  528. ntohs(th->dest)) &
  529. vdev->vpath_selector[queue_len - 1];
  530. if (counter >= queue_len)
  531. counter = queue_len - 1;
  532. }
  533. }
  534. return counter;
  535. }
  536. static enum vxge_hw_status vxge_search_mac_addr_in_list(
  537. struct vxge_vpath *vpath, u64 del_mac)
  538. {
  539. struct list_head *entry, *next;
  540. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  541. if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
  542. return TRUE;
  543. }
  544. return FALSE;
  545. }
  546. static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
  547. {
  548. struct macInfo mac_info;
  549. u8 *mac_address = NULL;
  550. u64 mac_addr = 0, vpath_vector = 0;
  551. int vpath_idx = 0;
  552. enum vxge_hw_status status = VXGE_HW_OK;
  553. struct vxge_vpath *vpath = NULL;
  554. struct __vxge_hw_device *hldev;
  555. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  556. mac_address = (u8 *)&mac_addr;
  557. memcpy(mac_address, mac_header, ETH_ALEN);
  558. /* Is this mac address already in the list? */
  559. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  560. vpath = &vdev->vpaths[vpath_idx];
  561. if (vxge_search_mac_addr_in_list(vpath, mac_addr))
  562. return vpath_idx;
  563. }
  564. memset(&mac_info, 0, sizeof(struct macInfo));
  565. memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
  566. /* Any vpath has room to add mac address to its da table? */
  567. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  568. vpath = &vdev->vpaths[vpath_idx];
  569. if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
  570. /* Add this mac address to this vpath */
  571. mac_info.vpath_no = vpath_idx;
  572. mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  573. status = vxge_add_mac_addr(vdev, &mac_info);
  574. if (status != VXGE_HW_OK)
  575. return -EPERM;
  576. return vpath_idx;
  577. }
  578. }
  579. mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
  580. vpath_idx = 0;
  581. mac_info.vpath_no = vpath_idx;
  582. /* Is the first vpath already selected as catch-basin ? */
  583. vpath = &vdev->vpaths[vpath_idx];
  584. if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
  585. /* Add this mac address to this vpath */
  586. if (FALSE == vxge_mac_list_add(vpath, &mac_info))
  587. return -EPERM;
  588. return vpath_idx;
  589. }
  590. /* Select first vpath as catch-basin */
  591. vpath_vector = vxge_mBIT(vpath->device_id);
  592. status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
  593. vxge_hw_mgmt_reg_type_mrpcim,
  594. 0,
  595. (ulong)offsetof(
  596. struct vxge_hw_mrpcim_reg,
  597. rts_mgr_cbasin_cfg),
  598. vpath_vector);
  599. if (status != VXGE_HW_OK) {
  600. vxge_debug_tx(VXGE_ERR,
  601. "%s: Unable to set the vpath-%d in catch-basin mode",
  602. VXGE_DRIVER_NAME, vpath->device_id);
  603. return -EPERM;
  604. }
  605. if (FALSE == vxge_mac_list_add(vpath, &mac_info))
  606. return -EPERM;
  607. return vpath_idx;
  608. }
  609. /**
  610. * vxge_xmit
  611. * @skb : the socket buffer containing the Tx data.
  612. * @dev : device pointer.
  613. *
  614. * This function is the Tx entry point of the driver. Neterion NIC supports
  615. * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
  616. */
  617. static netdev_tx_t
  618. vxge_xmit(struct sk_buff *skb, struct net_device *dev)
  619. {
  620. struct vxge_fifo *fifo = NULL;
  621. void *dtr_priv;
  622. void *dtr = NULL;
  623. struct vxgedev *vdev = NULL;
  624. enum vxge_hw_status status;
  625. int frg_cnt, first_frg_len;
  626. skb_frag_t *frag;
  627. int i = 0, j = 0, avail;
  628. u64 dma_pointer;
  629. struct vxge_tx_priv *txdl_priv = NULL;
  630. struct __vxge_hw_fifo *fifo_hw;
  631. int offload_type;
  632. int vpath_no = 0;
  633. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  634. dev->name, __func__, __LINE__);
  635. /* A buffer with no data will be dropped */
  636. if (unlikely(skb->len <= 0)) {
  637. vxge_debug_tx(VXGE_ERR,
  638. "%s: Buffer has no data..", dev->name);
  639. dev_kfree_skb(skb);
  640. return NETDEV_TX_OK;
  641. }
  642. vdev = (struct vxgedev *)netdev_priv(dev);
  643. if (unlikely(!is_vxge_card_up(vdev))) {
  644. vxge_debug_tx(VXGE_ERR,
  645. "%s: vdev not initialized", dev->name);
  646. dev_kfree_skb(skb);
  647. return NETDEV_TX_OK;
  648. }
  649. if (vdev->config.addr_learn_en) {
  650. vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
  651. if (vpath_no == -EPERM) {
  652. vxge_debug_tx(VXGE_ERR,
  653. "%s: Failed to store the mac address",
  654. dev->name);
  655. dev_kfree_skb(skb);
  656. return NETDEV_TX_OK;
  657. }
  658. }
  659. if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
  660. vpath_no = skb_get_queue_mapping(skb);
  661. else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
  662. vpath_no = vxge_get_vpath_no(vdev, skb);
  663. vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
  664. if (vpath_no >= vdev->no_of_vpath)
  665. vpath_no = 0;
  666. fifo = &vdev->vpaths[vpath_no].fifo;
  667. fifo_hw = fifo->handle;
  668. if (netif_tx_queue_stopped(fifo->txq))
  669. return NETDEV_TX_BUSY;
  670. avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
  671. if (avail == 0) {
  672. vxge_debug_tx(VXGE_ERR,
  673. "%s: No free TXDs available", dev->name);
  674. fifo->stats.txd_not_free++;
  675. goto _exit0;
  676. }
  677. /* Last TXD? Stop tx queue to avoid dropping packets. TX
  678. * completion will resume the queue.
  679. */
  680. if (avail == 1)
  681. netif_tx_stop_queue(fifo->txq);
  682. status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
  683. if (unlikely(status != VXGE_HW_OK)) {
  684. vxge_debug_tx(VXGE_ERR,
  685. "%s: Out of descriptors .", dev->name);
  686. fifo->stats.txd_out_of_desc++;
  687. goto _exit0;
  688. }
  689. vxge_debug_tx(VXGE_TRACE,
  690. "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
  691. dev->name, __func__, __LINE__,
  692. fifo_hw, dtr, dtr_priv);
  693. if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
  694. u16 vlan_tag = vlan_tx_tag_get(skb);
  695. vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
  696. }
  697. first_frg_len = skb_headlen(skb);
  698. dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
  699. PCI_DMA_TODEVICE);
  700. if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
  701. vxge_hw_fifo_txdl_free(fifo_hw, dtr);
  702. fifo->stats.pci_map_fail++;
  703. goto _exit0;
  704. }
  705. txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
  706. txdl_priv->skb = skb;
  707. txdl_priv->dma_buffers[j] = dma_pointer;
  708. frg_cnt = skb_shinfo(skb)->nr_frags;
  709. vxge_debug_tx(VXGE_TRACE,
  710. "%s: %s:%d skb = %p txdl_priv = %p "
  711. "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
  712. __func__, __LINE__, skb, txdl_priv,
  713. frg_cnt, (unsigned long long)dma_pointer);
  714. vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
  715. first_frg_len);
  716. frag = &skb_shinfo(skb)->frags[0];
  717. for (i = 0; i < frg_cnt; i++) {
  718. /* ignore 0 length fragment */
  719. if (!frag->size)
  720. continue;
  721. dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
  722. frag->page_offset, frag->size,
  723. PCI_DMA_TODEVICE);
  724. if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
  725. goto _exit2;
  726. vxge_debug_tx(VXGE_TRACE,
  727. "%s: %s:%d frag = %d dma_pointer = 0x%llx",
  728. dev->name, __func__, __LINE__, i,
  729. (unsigned long long)dma_pointer);
  730. txdl_priv->dma_buffers[j] = dma_pointer;
  731. vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
  732. frag->size);
  733. frag += 1;
  734. }
  735. offload_type = vxge_offload_type(skb);
  736. if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  737. int mss = vxge_tcp_mss(skb);
  738. if (mss) {
  739. vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
  740. dev->name, __func__, __LINE__, mss);
  741. vxge_hw_fifo_txdl_mss_set(dtr, mss);
  742. } else {
  743. vxge_assert(skb->len <=
  744. dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
  745. vxge_assert(0);
  746. goto _exit1;
  747. }
  748. }
  749. if (skb->ip_summed == CHECKSUM_PARTIAL)
  750. vxge_hw_fifo_txdl_cksum_set_bits(dtr,
  751. VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
  752. VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
  753. VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
  754. vxge_hw_fifo_txdl_post(fifo_hw, dtr);
  755. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
  756. dev->name, __func__, __LINE__);
  757. return NETDEV_TX_OK;
  758. _exit2:
  759. vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
  760. _exit1:
  761. j = 0;
  762. frag = &skb_shinfo(skb)->frags[0];
  763. pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
  764. skb_headlen(skb), PCI_DMA_TODEVICE);
  765. for (; j < i; j++) {
  766. pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
  767. frag->size, PCI_DMA_TODEVICE);
  768. frag += 1;
  769. }
  770. vxge_hw_fifo_txdl_free(fifo_hw, dtr);
  771. _exit0:
  772. netif_tx_stop_queue(fifo->txq);
  773. dev_kfree_skb(skb);
  774. return NETDEV_TX_OK;
  775. }
  776. /*
  777. * vxge_rx_term
  778. *
  779. * Function will be called by hw function to abort all outstanding receive
  780. * descriptors.
  781. */
  782. static void
  783. vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
  784. {
  785. struct vxge_ring *ring = (struct vxge_ring *)userdata;
  786. struct vxge_rx_priv *rx_priv =
  787. vxge_hw_ring_rxd_private_get(dtrh);
  788. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  789. ring->ndev->name, __func__, __LINE__);
  790. if (state != VXGE_HW_RXD_STATE_POSTED)
  791. return;
  792. pci_unmap_single(ring->pdev, rx_priv->data_dma,
  793. rx_priv->data_size, PCI_DMA_FROMDEVICE);
  794. dev_kfree_skb(rx_priv->skb);
  795. rx_priv->skb_data = NULL;
  796. vxge_debug_entryexit(VXGE_TRACE,
  797. "%s: %s:%d Exiting...",
  798. ring->ndev->name, __func__, __LINE__);
  799. }
  800. /*
  801. * vxge_tx_term
  802. *
  803. * Function will be called to abort all outstanding tx descriptors
  804. */
  805. static void
  806. vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
  807. {
  808. struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
  809. skb_frag_t *frag;
  810. int i = 0, j, frg_cnt;
  811. struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
  812. struct sk_buff *skb = txd_priv->skb;
  813. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  814. if (state != VXGE_HW_TXDL_STATE_POSTED)
  815. return;
  816. /* check skb validity */
  817. vxge_assert(skb);
  818. frg_cnt = skb_shinfo(skb)->nr_frags;
  819. frag = &skb_shinfo(skb)->frags[0];
  820. /* for unfragmented skb */
  821. pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
  822. skb_headlen(skb), PCI_DMA_TODEVICE);
  823. for (j = 0; j < frg_cnt; j++) {
  824. pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
  825. frag->size, PCI_DMA_TODEVICE);
  826. frag += 1;
  827. }
  828. dev_kfree_skb(skb);
  829. vxge_debug_entryexit(VXGE_TRACE,
  830. "%s:%d Exiting...", __func__, __LINE__);
  831. }
  832. /**
  833. * vxge_set_multicast
  834. * @dev: pointer to the device structure
  835. *
  836. * Entry point for multicast address enable/disable
  837. * This function is a driver entry point which gets called by the kernel
  838. * whenever multicast addresses must be enabled/disabled. This also gets
  839. * called to set/reset promiscuous mode. Depending on the deivce flag, we
  840. * determine, if multicast address must be enabled or if promiscuous mode
  841. * is to be disabled etc.
  842. */
  843. static void vxge_set_multicast(struct net_device *dev)
  844. {
  845. struct netdev_hw_addr *ha;
  846. struct vxgedev *vdev;
  847. int i, mcast_cnt = 0;
  848. struct __vxge_hw_device *hldev;
  849. struct vxge_vpath *vpath;
  850. enum vxge_hw_status status = VXGE_HW_OK;
  851. struct macInfo mac_info;
  852. int vpath_idx = 0;
  853. struct vxge_mac_addrs *mac_entry;
  854. struct list_head *list_head;
  855. struct list_head *entry, *next;
  856. u8 *mac_address = NULL;
  857. vxge_debug_entryexit(VXGE_TRACE,
  858. "%s:%d", __func__, __LINE__);
  859. vdev = (struct vxgedev *)netdev_priv(dev);
  860. hldev = (struct __vxge_hw_device *)vdev->devh;
  861. if (unlikely(!is_vxge_card_up(vdev)))
  862. return;
  863. if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
  864. for (i = 0; i < vdev->no_of_vpath; i++) {
  865. vpath = &vdev->vpaths[i];
  866. vxge_assert(vpath->is_open);
  867. status = vxge_hw_vpath_mcast_enable(vpath->handle);
  868. if (status != VXGE_HW_OK)
  869. vxge_debug_init(VXGE_ERR, "failed to enable "
  870. "multicast, status %d", status);
  871. vdev->all_multi_flg = 1;
  872. }
  873. } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
  874. for (i = 0; i < vdev->no_of_vpath; i++) {
  875. vpath = &vdev->vpaths[i];
  876. vxge_assert(vpath->is_open);
  877. status = vxge_hw_vpath_mcast_disable(vpath->handle);
  878. if (status != VXGE_HW_OK)
  879. vxge_debug_init(VXGE_ERR, "failed to disable "
  880. "multicast, status %d", status);
  881. vdev->all_multi_flg = 0;
  882. }
  883. }
  884. if (!vdev->config.addr_learn_en) {
  885. for (i = 0; i < vdev->no_of_vpath; i++) {
  886. vpath = &vdev->vpaths[i];
  887. vxge_assert(vpath->is_open);
  888. if (dev->flags & IFF_PROMISC)
  889. status = vxge_hw_vpath_promisc_enable(
  890. vpath->handle);
  891. else
  892. status = vxge_hw_vpath_promisc_disable(
  893. vpath->handle);
  894. if (status != VXGE_HW_OK)
  895. vxge_debug_init(VXGE_ERR, "failed to %s promisc"
  896. ", status %d", dev->flags&IFF_PROMISC ?
  897. "enable" : "disable", status);
  898. }
  899. }
  900. memset(&mac_info, 0, sizeof(struct macInfo));
  901. /* Update individual M_CAST address list */
  902. if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
  903. mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
  904. list_head = &vdev->vpaths[0].mac_addr_list;
  905. if ((netdev_mc_count(dev) +
  906. (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
  907. vdev->vpaths[0].max_mac_addr_cnt)
  908. goto _set_all_mcast;
  909. /* Delete previous MC's */
  910. for (i = 0; i < mcast_cnt; i++) {
  911. list_for_each_safe(entry, next, list_head) {
  912. mac_entry = (struct vxge_mac_addrs *) entry;
  913. /* Copy the mac address to delete */
  914. mac_address = (u8 *)&mac_entry->macaddr;
  915. memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
  916. /* Is this a multicast address */
  917. if (0x01 & mac_info.macaddr[0]) {
  918. for (vpath_idx = 0; vpath_idx <
  919. vdev->no_of_vpath;
  920. vpath_idx++) {
  921. mac_info.vpath_no = vpath_idx;
  922. status = vxge_del_mac_addr(
  923. vdev,
  924. &mac_info);
  925. }
  926. }
  927. }
  928. }
  929. /* Add new ones */
  930. netdev_for_each_mc_addr(ha, dev) {
  931. memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
  932. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
  933. vpath_idx++) {
  934. mac_info.vpath_no = vpath_idx;
  935. mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  936. status = vxge_add_mac_addr(vdev, &mac_info);
  937. if (status != VXGE_HW_OK) {
  938. vxge_debug_init(VXGE_ERR,
  939. "%s:%d Setting individual"
  940. "multicast address failed",
  941. __func__, __LINE__);
  942. goto _set_all_mcast;
  943. }
  944. }
  945. }
  946. return;
  947. _set_all_mcast:
  948. mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
  949. /* Delete previous MC's */
  950. for (i = 0; i < mcast_cnt; i++) {
  951. list_for_each_safe(entry, next, list_head) {
  952. mac_entry = (struct vxge_mac_addrs *) entry;
  953. /* Copy the mac address to delete */
  954. mac_address = (u8 *)&mac_entry->macaddr;
  955. memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
  956. /* Is this a multicast address */
  957. if (0x01 & mac_info.macaddr[0])
  958. break;
  959. }
  960. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
  961. vpath_idx++) {
  962. mac_info.vpath_no = vpath_idx;
  963. status = vxge_del_mac_addr(vdev, &mac_info);
  964. }
  965. }
  966. /* Enable all multicast */
  967. for (i = 0; i < vdev->no_of_vpath; i++) {
  968. vpath = &vdev->vpaths[i];
  969. vxge_assert(vpath->is_open);
  970. status = vxge_hw_vpath_mcast_enable(vpath->handle);
  971. if (status != VXGE_HW_OK) {
  972. vxge_debug_init(VXGE_ERR,
  973. "%s:%d Enabling all multicasts failed",
  974. __func__, __LINE__);
  975. }
  976. vdev->all_multi_flg = 1;
  977. }
  978. dev->flags |= IFF_ALLMULTI;
  979. }
  980. vxge_debug_entryexit(VXGE_TRACE,
  981. "%s:%d Exiting...", __func__, __LINE__);
  982. }
  983. /**
  984. * vxge_set_mac_addr
  985. * @dev: pointer to the device structure
  986. *
  987. * Update entry "0" (default MAC addr)
  988. */
  989. static int vxge_set_mac_addr(struct net_device *dev, void *p)
  990. {
  991. struct sockaddr *addr = p;
  992. struct vxgedev *vdev;
  993. struct __vxge_hw_device *hldev;
  994. enum vxge_hw_status status = VXGE_HW_OK;
  995. struct macInfo mac_info_new, mac_info_old;
  996. int vpath_idx = 0;
  997. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  998. vdev = (struct vxgedev *)netdev_priv(dev);
  999. hldev = vdev->devh;
  1000. if (!is_valid_ether_addr(addr->sa_data))
  1001. return -EINVAL;
  1002. memset(&mac_info_new, 0, sizeof(struct macInfo));
  1003. memset(&mac_info_old, 0, sizeof(struct macInfo));
  1004. vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
  1005. __func__, __LINE__);
  1006. /* Get the old address */
  1007. memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
  1008. /* Copy the new address */
  1009. memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
  1010. /* First delete the old mac address from all the vpaths
  1011. as we can't specify the index while adding new mac address */
  1012. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  1013. struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
  1014. if (!vpath->is_open) {
  1015. /* This can happen when this interface is added/removed
  1016. to the bonding interface. Delete this station address
  1017. from the linked list */
  1018. vxge_mac_list_del(vpath, &mac_info_old);
  1019. /* Add this new address to the linked list
  1020. for later restoring */
  1021. vxge_mac_list_add(vpath, &mac_info_new);
  1022. continue;
  1023. }
  1024. /* Delete the station address */
  1025. mac_info_old.vpath_no = vpath_idx;
  1026. status = vxge_del_mac_addr(vdev, &mac_info_old);
  1027. }
  1028. if (unlikely(!is_vxge_card_up(vdev))) {
  1029. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1030. return VXGE_HW_OK;
  1031. }
  1032. /* Set this mac address to all the vpaths */
  1033. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  1034. mac_info_new.vpath_no = vpath_idx;
  1035. mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  1036. status = vxge_add_mac_addr(vdev, &mac_info_new);
  1037. if (status != VXGE_HW_OK)
  1038. return -EINVAL;
  1039. }
  1040. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1041. return status;
  1042. }
  1043. /*
  1044. * vxge_vpath_intr_enable
  1045. * @vdev: pointer to vdev
  1046. * @vp_id: vpath for which to enable the interrupts
  1047. *
  1048. * Enables the interrupts for the vpath
  1049. */
  1050. void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
  1051. {
  1052. struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
  1053. int msix_id = 0;
  1054. int tim_msix_id[4] = {0, 1, 0, 0};
  1055. int alarm_msix_id = VXGE_ALARM_MSIX_ID;
  1056. vxge_hw_vpath_intr_enable(vpath->handle);
  1057. if (vdev->config.intr_type == INTA)
  1058. vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
  1059. else {
  1060. vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
  1061. alarm_msix_id);
  1062. msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
  1063. vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
  1064. vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
  1065. /* enable the alarm vector */
  1066. msix_id = (vpath->handle->vpath->hldev->first_vp_id *
  1067. VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
  1068. vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
  1069. }
  1070. }
  1071. /*
  1072. * vxge_vpath_intr_disable
  1073. * @vdev: pointer to vdev
  1074. * @vp_id: vpath for which to disable the interrupts
  1075. *
  1076. * Disables the interrupts for the vpath
  1077. */
  1078. void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
  1079. {
  1080. struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
  1081. int msix_id;
  1082. vxge_hw_vpath_intr_disable(vpath->handle);
  1083. if (vdev->config.intr_type == INTA)
  1084. vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
  1085. else {
  1086. msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
  1087. vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
  1088. vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
  1089. /* disable the alarm vector */
  1090. msix_id = (vpath->handle->vpath->hldev->first_vp_id *
  1091. VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
  1092. vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
  1093. }
  1094. }
  1095. /*
  1096. * vxge_reset_vpath
  1097. * @vdev: pointer to vdev
  1098. * @vp_id: vpath to reset
  1099. *
  1100. * Resets the vpath
  1101. */
  1102. static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
  1103. {
  1104. enum vxge_hw_status status = VXGE_HW_OK;
  1105. struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
  1106. int ret = 0;
  1107. /* check if device is down already */
  1108. if (unlikely(!is_vxge_card_up(vdev)))
  1109. return 0;
  1110. /* is device reset already scheduled */
  1111. if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
  1112. return 0;
  1113. if (vpath->handle) {
  1114. if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
  1115. if (is_vxge_card_up(vdev) &&
  1116. vxge_hw_vpath_recover_from_reset(vpath->handle)
  1117. != VXGE_HW_OK) {
  1118. vxge_debug_init(VXGE_ERR,
  1119. "vxge_hw_vpath_recover_from_reset"
  1120. "failed for vpath:%d", vp_id);
  1121. return status;
  1122. }
  1123. } else {
  1124. vxge_debug_init(VXGE_ERR,
  1125. "vxge_hw_vpath_reset failed for"
  1126. "vpath:%d", vp_id);
  1127. return status;
  1128. }
  1129. } else
  1130. return VXGE_HW_FAIL;
  1131. vxge_restore_vpath_mac_addr(vpath);
  1132. vxge_restore_vpath_vid_table(vpath);
  1133. /* Enable all broadcast */
  1134. vxge_hw_vpath_bcast_enable(vpath->handle);
  1135. /* Enable all multicast */
  1136. if (vdev->all_multi_flg) {
  1137. status = vxge_hw_vpath_mcast_enable(vpath->handle);
  1138. if (status != VXGE_HW_OK)
  1139. vxge_debug_init(VXGE_ERR,
  1140. "%s:%d Enabling multicast failed",
  1141. __func__, __LINE__);
  1142. }
  1143. /* Enable the interrupts */
  1144. vxge_vpath_intr_enable(vdev, vp_id);
  1145. smp_wmb();
  1146. /* Enable the flow of traffic through the vpath */
  1147. vxge_hw_vpath_enable(vpath->handle);
  1148. smp_wmb();
  1149. vxge_hw_vpath_rx_doorbell_init(vpath->handle);
  1150. vpath->ring.last_status = VXGE_HW_OK;
  1151. /* Vpath reset done */
  1152. clear_bit(vp_id, &vdev->vp_reset);
  1153. /* Start the vpath queue */
  1154. if (netif_tx_queue_stopped(vpath->fifo.txq))
  1155. netif_tx_wake_queue(vpath->fifo.txq);
  1156. return ret;
  1157. }
  1158. static int do_vxge_reset(struct vxgedev *vdev, int event)
  1159. {
  1160. enum vxge_hw_status status;
  1161. int ret = 0, vp_id, i;
  1162. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1163. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
  1164. /* check if device is down already */
  1165. if (unlikely(!is_vxge_card_up(vdev)))
  1166. return 0;
  1167. /* is reset already scheduled */
  1168. if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
  1169. return 0;
  1170. }
  1171. if (event == VXGE_LL_FULL_RESET) {
  1172. /* wait for all the vpath reset to complete */
  1173. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  1174. while (test_bit(vp_id, &vdev->vp_reset))
  1175. msleep(50);
  1176. }
  1177. /* if execution mode is set to debug, don't reset the adapter */
  1178. if (unlikely(vdev->exec_mode)) {
  1179. vxge_debug_init(VXGE_ERR,
  1180. "%s: execution mode is debug, returning..",
  1181. vdev->ndev->name);
  1182. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  1183. netif_tx_stop_all_queues(vdev->ndev);
  1184. return 0;
  1185. }
  1186. }
  1187. if (event == VXGE_LL_FULL_RESET) {
  1188. vxge_hw_device_intr_disable(vdev->devh);
  1189. switch (vdev->cric_err_event) {
  1190. case VXGE_HW_EVENT_UNKNOWN:
  1191. netif_tx_stop_all_queues(vdev->ndev);
  1192. vxge_debug_init(VXGE_ERR,
  1193. "fatal: %s: Disabling device due to"
  1194. "unknown error",
  1195. vdev->ndev->name);
  1196. ret = -EPERM;
  1197. goto out;
  1198. case VXGE_HW_EVENT_RESET_START:
  1199. break;
  1200. case VXGE_HW_EVENT_RESET_COMPLETE:
  1201. case VXGE_HW_EVENT_LINK_DOWN:
  1202. case VXGE_HW_EVENT_LINK_UP:
  1203. case VXGE_HW_EVENT_ALARM_CLEARED:
  1204. case VXGE_HW_EVENT_ECCERR:
  1205. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  1206. ret = -EPERM;
  1207. goto out;
  1208. case VXGE_HW_EVENT_FIFO_ERR:
  1209. case VXGE_HW_EVENT_VPATH_ERR:
  1210. break;
  1211. case VXGE_HW_EVENT_CRITICAL_ERR:
  1212. netif_tx_stop_all_queues(vdev->ndev);
  1213. vxge_debug_init(VXGE_ERR,
  1214. "fatal: %s: Disabling device due to"
  1215. "serious error",
  1216. vdev->ndev->name);
  1217. /* SOP or device reset required */
  1218. /* This event is not currently used */
  1219. ret = -EPERM;
  1220. goto out;
  1221. case VXGE_HW_EVENT_SERR:
  1222. netif_tx_stop_all_queues(vdev->ndev);
  1223. vxge_debug_init(VXGE_ERR,
  1224. "fatal: %s: Disabling device due to"
  1225. "serious error",
  1226. vdev->ndev->name);
  1227. ret = -EPERM;
  1228. goto out;
  1229. case VXGE_HW_EVENT_SRPCIM_SERR:
  1230. case VXGE_HW_EVENT_MRPCIM_SERR:
  1231. ret = -EPERM;
  1232. goto out;
  1233. case VXGE_HW_EVENT_SLOT_FREEZE:
  1234. netif_tx_stop_all_queues(vdev->ndev);
  1235. vxge_debug_init(VXGE_ERR,
  1236. "fatal: %s: Disabling device due to"
  1237. "slot freeze",
  1238. vdev->ndev->name);
  1239. ret = -EPERM;
  1240. goto out;
  1241. default:
  1242. break;
  1243. }
  1244. }
  1245. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
  1246. netif_tx_stop_all_queues(vdev->ndev);
  1247. if (event == VXGE_LL_FULL_RESET) {
  1248. status = vxge_reset_all_vpaths(vdev);
  1249. if (status != VXGE_HW_OK) {
  1250. vxge_debug_init(VXGE_ERR,
  1251. "fatal: %s: can not reset vpaths",
  1252. vdev->ndev->name);
  1253. ret = -EPERM;
  1254. goto out;
  1255. }
  1256. }
  1257. if (event == VXGE_LL_COMPL_RESET) {
  1258. for (i = 0; i < vdev->no_of_vpath; i++)
  1259. if (vdev->vpaths[i].handle) {
  1260. if (vxge_hw_vpath_recover_from_reset(
  1261. vdev->vpaths[i].handle)
  1262. != VXGE_HW_OK) {
  1263. vxge_debug_init(VXGE_ERR,
  1264. "vxge_hw_vpath_recover_"
  1265. "from_reset failed for vpath: "
  1266. "%d", i);
  1267. ret = -EPERM;
  1268. goto out;
  1269. }
  1270. } else {
  1271. vxge_debug_init(VXGE_ERR,
  1272. "vxge_hw_vpath_reset failed for "
  1273. "vpath:%d", i);
  1274. ret = -EPERM;
  1275. goto out;
  1276. }
  1277. }
  1278. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
  1279. /* Reprogram the DA table with populated mac addresses */
  1280. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  1281. vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
  1282. vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
  1283. }
  1284. /* enable vpath interrupts */
  1285. for (i = 0; i < vdev->no_of_vpath; i++)
  1286. vxge_vpath_intr_enable(vdev, i);
  1287. vxge_hw_device_intr_enable(vdev->devh);
  1288. smp_wmb();
  1289. /* Indicate card up */
  1290. set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  1291. /* Get the traffic to flow through the vpaths */
  1292. for (i = 0; i < vdev->no_of_vpath; i++) {
  1293. vxge_hw_vpath_enable(vdev->vpaths[i].handle);
  1294. smp_wmb();
  1295. vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
  1296. }
  1297. netif_tx_wake_all_queues(vdev->ndev);
  1298. }
  1299. out:
  1300. vxge_debug_entryexit(VXGE_TRACE,
  1301. "%s:%d Exiting...", __func__, __LINE__);
  1302. /* Indicate reset done */
  1303. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
  1304. clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
  1305. return ret;
  1306. }
  1307. /*
  1308. * vxge_reset
  1309. * @vdev: pointer to ll device
  1310. *
  1311. * driver may reset the chip on events of serr, eccerr, etc
  1312. */
  1313. int vxge_reset(struct vxgedev *vdev)
  1314. {
  1315. return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
  1316. }
  1317. /**
  1318. * vxge_poll - Receive handler when Receive Polling is used.
  1319. * @dev: pointer to the device structure.
  1320. * @budget: Number of packets budgeted to be processed in this iteration.
  1321. *
  1322. * This function comes into picture only if Receive side is being handled
  1323. * through polling (called NAPI in linux). It mostly does what the normal
  1324. * Rx interrupt handler does in terms of descriptor and packet processing
  1325. * but not in an interrupt context. Also it will process a specified number
  1326. * of packets at most in one iteration. This value is passed down by the
  1327. * kernel as the function argument 'budget'.
  1328. */
  1329. static int vxge_poll_msix(struct napi_struct *napi, int budget)
  1330. {
  1331. struct vxge_ring *ring =
  1332. container_of(napi, struct vxge_ring, napi);
  1333. int budget_org = budget;
  1334. ring->budget = budget;
  1335. vxge_hw_vpath_poll_rx(ring->handle);
  1336. if (ring->pkts_processed < budget_org) {
  1337. napi_complete(napi);
  1338. /* Re enable the Rx interrupts for the vpath */
  1339. vxge_hw_channel_msix_unmask(
  1340. (struct __vxge_hw_channel *)ring->handle,
  1341. ring->rx_vector_no);
  1342. }
  1343. return ring->pkts_processed;
  1344. }
  1345. static int vxge_poll_inta(struct napi_struct *napi, int budget)
  1346. {
  1347. struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
  1348. int pkts_processed = 0;
  1349. int i;
  1350. int budget_org = budget;
  1351. struct vxge_ring *ring;
  1352. struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
  1353. pci_get_drvdata(vdev->pdev);
  1354. for (i = 0; i < vdev->no_of_vpath; i++) {
  1355. ring = &vdev->vpaths[i].ring;
  1356. ring->budget = budget;
  1357. vxge_hw_vpath_poll_rx(ring->handle);
  1358. pkts_processed += ring->pkts_processed;
  1359. budget -= ring->pkts_processed;
  1360. if (budget <= 0)
  1361. break;
  1362. }
  1363. VXGE_COMPLETE_ALL_TX(vdev);
  1364. if (pkts_processed < budget_org) {
  1365. napi_complete(napi);
  1366. /* Re enable the Rx interrupts for the ring */
  1367. vxge_hw_device_unmask_all(hldev);
  1368. vxge_hw_device_flush_io(hldev);
  1369. }
  1370. return pkts_processed;
  1371. }
  1372. #ifdef CONFIG_NET_POLL_CONTROLLER
  1373. /**
  1374. * vxge_netpoll - netpoll event handler entry point
  1375. * @dev : pointer to the device structure.
  1376. * Description:
  1377. * This function will be called by upper layer to check for events on the
  1378. * interface in situations where interrupts are disabled. It is used for
  1379. * specific in-kernel networking tasks, such as remote consoles and kernel
  1380. * debugging over the network (example netdump in RedHat).
  1381. */
  1382. static void vxge_netpoll(struct net_device *dev)
  1383. {
  1384. struct __vxge_hw_device *hldev;
  1385. struct vxgedev *vdev;
  1386. vdev = (struct vxgedev *)netdev_priv(dev);
  1387. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  1388. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1389. if (pci_channel_offline(vdev->pdev))
  1390. return;
  1391. disable_irq(dev->irq);
  1392. vxge_hw_device_clear_tx_rx(hldev);
  1393. vxge_hw_device_clear_tx_rx(hldev);
  1394. VXGE_COMPLETE_ALL_RX(vdev);
  1395. VXGE_COMPLETE_ALL_TX(vdev);
  1396. enable_irq(dev->irq);
  1397. vxge_debug_entryexit(VXGE_TRACE,
  1398. "%s:%d Exiting...", __func__, __LINE__);
  1399. }
  1400. #endif
  1401. /* RTH configuration */
  1402. static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
  1403. {
  1404. enum vxge_hw_status status = VXGE_HW_OK;
  1405. struct vxge_hw_rth_hash_types hash_types;
  1406. u8 itable[256] = {0}; /* indirection table */
  1407. u8 mtable[256] = {0}; /* CPU to vpath mapping */
  1408. int index;
  1409. /*
  1410. * Filling
  1411. * - itable with bucket numbers
  1412. * - mtable with bucket-to-vpath mapping
  1413. */
  1414. for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
  1415. itable[index] = index;
  1416. mtable[index] = index % vdev->no_of_vpath;
  1417. }
  1418. /* Fill RTH hash types */
  1419. hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
  1420. hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
  1421. hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
  1422. hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
  1423. hash_types.hash_type_tcpipv6ex_en =
  1424. vdev->config.rth_hash_type_tcpipv6ex;
  1425. hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
  1426. /* set indirection table, bucket-to-vpath mapping */
  1427. status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
  1428. vdev->no_of_vpath,
  1429. mtable, itable,
  1430. vdev->config.rth_bkt_sz);
  1431. if (status != VXGE_HW_OK) {
  1432. vxge_debug_init(VXGE_ERR,
  1433. "RTH indirection table configuration failed "
  1434. "for vpath:%d", vdev->vpaths[0].device_id);
  1435. return status;
  1436. }
  1437. /*
  1438. * Because the itable_set() method uses the active_table field
  1439. * for the target virtual path the RTH config should be updated
  1440. * for all VPATHs. The h/w only uses the lowest numbered VPATH
  1441. * when steering frames.
  1442. */
  1443. for (index = 0; index < vdev->no_of_vpath; index++) {
  1444. status = vxge_hw_vpath_rts_rth_set(
  1445. vdev->vpaths[index].handle,
  1446. vdev->config.rth_algorithm,
  1447. &hash_types,
  1448. vdev->config.rth_bkt_sz);
  1449. if (status != VXGE_HW_OK) {
  1450. vxge_debug_init(VXGE_ERR,
  1451. "RTH configuration failed for vpath:%d",
  1452. vdev->vpaths[index].device_id);
  1453. return status;
  1454. }
  1455. }
  1456. return status;
  1457. }
  1458. int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
  1459. {
  1460. struct vxge_mac_addrs *new_mac_entry;
  1461. u8 *mac_address = NULL;
  1462. if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
  1463. return TRUE;
  1464. new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
  1465. if (!new_mac_entry) {
  1466. vxge_debug_mem(VXGE_ERR,
  1467. "%s: memory allocation failed",
  1468. VXGE_DRIVER_NAME);
  1469. return FALSE;
  1470. }
  1471. list_add(&new_mac_entry->item, &vpath->mac_addr_list);
  1472. /* Copy the new mac address to the list */
  1473. mac_address = (u8 *)&new_mac_entry->macaddr;
  1474. memcpy(mac_address, mac->macaddr, ETH_ALEN);
  1475. new_mac_entry->state = mac->state;
  1476. vpath->mac_addr_cnt++;
  1477. /* Is this a multicast address */
  1478. if (0x01 & mac->macaddr[0])
  1479. vpath->mcast_addr_cnt++;
  1480. return TRUE;
  1481. }
  1482. /* Add a mac address to DA table */
  1483. enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
  1484. {
  1485. enum vxge_hw_status status = VXGE_HW_OK;
  1486. struct vxge_vpath *vpath;
  1487. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
  1488. if (0x01 & mac->macaddr[0]) /* multicast address */
  1489. duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
  1490. else
  1491. duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
  1492. vpath = &vdev->vpaths[mac->vpath_no];
  1493. status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
  1494. mac->macmask, duplicate_mode);
  1495. if (status != VXGE_HW_OK) {
  1496. vxge_debug_init(VXGE_ERR,
  1497. "DA config add entry failed for vpath:%d",
  1498. vpath->device_id);
  1499. } else
  1500. if (FALSE == vxge_mac_list_add(vpath, mac))
  1501. status = -EPERM;
  1502. return status;
  1503. }
  1504. int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
  1505. {
  1506. struct list_head *entry, *next;
  1507. u64 del_mac = 0;
  1508. u8 *mac_address = (u8 *) (&del_mac);
  1509. /* Copy the mac address to delete from the list */
  1510. memcpy(mac_address, mac->macaddr, ETH_ALEN);
  1511. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  1512. if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
  1513. list_del(entry);
  1514. kfree((struct vxge_mac_addrs *)entry);
  1515. vpath->mac_addr_cnt--;
  1516. /* Is this a multicast address */
  1517. if (0x01 & mac->macaddr[0])
  1518. vpath->mcast_addr_cnt--;
  1519. return TRUE;
  1520. }
  1521. }
  1522. return FALSE;
  1523. }
  1524. /* delete a mac address from DA table */
  1525. enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
  1526. {
  1527. enum vxge_hw_status status = VXGE_HW_OK;
  1528. struct vxge_vpath *vpath;
  1529. vpath = &vdev->vpaths[mac->vpath_no];
  1530. status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
  1531. mac->macmask);
  1532. if (status != VXGE_HW_OK) {
  1533. vxge_debug_init(VXGE_ERR,
  1534. "DA config delete entry failed for vpath:%d",
  1535. vpath->device_id);
  1536. } else
  1537. vxge_mac_list_del(vpath, mac);
  1538. return status;
  1539. }
  1540. /* list all mac addresses from DA table */
  1541. enum vxge_hw_status
  1542. static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
  1543. struct macInfo *mac)
  1544. {
  1545. enum vxge_hw_status status = VXGE_HW_OK;
  1546. unsigned char macmask[ETH_ALEN];
  1547. unsigned char macaddr[ETH_ALEN];
  1548. status = vxge_hw_vpath_mac_addr_get(vpath->handle,
  1549. macaddr, macmask);
  1550. if (status != VXGE_HW_OK) {
  1551. vxge_debug_init(VXGE_ERR,
  1552. "DA config list entry failed for vpath:%d",
  1553. vpath->device_id);
  1554. return status;
  1555. }
  1556. while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
  1557. status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
  1558. macaddr, macmask);
  1559. if (status != VXGE_HW_OK)
  1560. break;
  1561. }
  1562. return status;
  1563. }
  1564. /* Store all vlan ids from the list to the vid table */
  1565. enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
  1566. {
  1567. enum vxge_hw_status status = VXGE_HW_OK;
  1568. struct vxgedev *vdev = vpath->vdev;
  1569. u16 vid;
  1570. if (vdev->vlgrp && vpath->is_open) {
  1571. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  1572. if (!vlan_group_get_device(vdev->vlgrp, vid))
  1573. continue;
  1574. /* Add these vlan to the vid table */
  1575. status = vxge_hw_vpath_vid_add(vpath->handle, vid);
  1576. }
  1577. }
  1578. return status;
  1579. }
  1580. /* Store all mac addresses from the list to the DA table */
  1581. enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
  1582. {
  1583. enum vxge_hw_status status = VXGE_HW_OK;
  1584. struct macInfo mac_info;
  1585. u8 *mac_address = NULL;
  1586. struct list_head *entry, *next;
  1587. memset(&mac_info, 0, sizeof(struct macInfo));
  1588. if (vpath->is_open) {
  1589. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  1590. mac_address =
  1591. (u8 *)&
  1592. ((struct vxge_mac_addrs *)entry)->macaddr;
  1593. memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
  1594. ((struct vxge_mac_addrs *)entry)->state =
  1595. VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  1596. /* does this mac address already exist in da table? */
  1597. status = vxge_search_mac_addr_in_da_table(vpath,
  1598. &mac_info);
  1599. if (status != VXGE_HW_OK) {
  1600. /* Add this mac address to the DA table */
  1601. status = vxge_hw_vpath_mac_addr_add(
  1602. vpath->handle, mac_info.macaddr,
  1603. mac_info.macmask,
  1604. VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
  1605. if (status != VXGE_HW_OK) {
  1606. vxge_debug_init(VXGE_ERR,
  1607. "DA add entry failed for vpath:%d",
  1608. vpath->device_id);
  1609. ((struct vxge_mac_addrs *)entry)->state
  1610. = VXGE_LL_MAC_ADDR_IN_LIST;
  1611. }
  1612. }
  1613. }
  1614. }
  1615. return status;
  1616. }
  1617. /* reset vpaths */
  1618. enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
  1619. {
  1620. enum vxge_hw_status status = VXGE_HW_OK;
  1621. struct vxge_vpath *vpath;
  1622. int i;
  1623. for (i = 0; i < vdev->no_of_vpath; i++) {
  1624. vpath = &vdev->vpaths[i];
  1625. if (vpath->handle) {
  1626. if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
  1627. if (is_vxge_card_up(vdev) &&
  1628. vxge_hw_vpath_recover_from_reset(
  1629. vpath->handle) != VXGE_HW_OK) {
  1630. vxge_debug_init(VXGE_ERR,
  1631. "vxge_hw_vpath_recover_"
  1632. "from_reset failed for vpath: "
  1633. "%d", i);
  1634. return status;
  1635. }
  1636. } else {
  1637. vxge_debug_init(VXGE_ERR,
  1638. "vxge_hw_vpath_reset failed for "
  1639. "vpath:%d", i);
  1640. return status;
  1641. }
  1642. }
  1643. }
  1644. return status;
  1645. }
  1646. /* close vpaths */
  1647. void vxge_close_vpaths(struct vxgedev *vdev, int index)
  1648. {
  1649. struct vxge_vpath *vpath;
  1650. int i;
  1651. for (i = index; i < vdev->no_of_vpath; i++) {
  1652. vpath = &vdev->vpaths[i];
  1653. if (vpath->handle && vpath->is_open) {
  1654. vxge_hw_vpath_close(vpath->handle);
  1655. vdev->stats.vpaths_open--;
  1656. }
  1657. vpath->is_open = 0;
  1658. vpath->handle = NULL;
  1659. }
  1660. }
  1661. /* open vpaths */
  1662. int vxge_open_vpaths(struct vxgedev *vdev)
  1663. {
  1664. struct vxge_hw_vpath_attr attr;
  1665. enum vxge_hw_status status;
  1666. struct vxge_vpath *vpath;
  1667. u32 vp_id = 0;
  1668. int i;
  1669. for (i = 0; i < vdev->no_of_vpath; i++) {
  1670. vpath = &vdev->vpaths[i];
  1671. vxge_assert(vpath->is_configured);
  1672. attr.vp_id = vpath->device_id;
  1673. attr.fifo_attr.callback = vxge_xmit_compl;
  1674. attr.fifo_attr.txdl_term = vxge_tx_term;
  1675. attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
  1676. attr.fifo_attr.userdata = &vpath->fifo;
  1677. attr.ring_attr.callback = vxge_rx_1b_compl;
  1678. attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
  1679. attr.ring_attr.rxd_term = vxge_rx_term;
  1680. attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
  1681. attr.ring_attr.userdata = &vpath->ring;
  1682. vpath->ring.ndev = vdev->ndev;
  1683. vpath->ring.pdev = vdev->pdev;
  1684. status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
  1685. if (status == VXGE_HW_OK) {
  1686. vpath->fifo.handle =
  1687. (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
  1688. vpath->ring.handle =
  1689. (struct __vxge_hw_ring *)attr.ring_attr.userdata;
  1690. vpath->fifo.tx_steering_type =
  1691. vdev->config.tx_steering_type;
  1692. vpath->fifo.ndev = vdev->ndev;
  1693. vpath->fifo.pdev = vdev->pdev;
  1694. if (vdev->config.tx_steering_type)
  1695. vpath->fifo.txq =
  1696. netdev_get_tx_queue(vdev->ndev, i);
  1697. else
  1698. vpath->fifo.txq =
  1699. netdev_get_tx_queue(vdev->ndev, 0);
  1700. vpath->fifo.indicate_max_pkts =
  1701. vdev->config.fifo_indicate_max_pkts;
  1702. vpath->ring.rx_vector_no = 0;
  1703. vpath->ring.rx_csum = vdev->rx_csum;
  1704. vpath->is_open = 1;
  1705. vdev->vp_handles[i] = vpath->handle;
  1706. vpath->ring.gro_enable = vdev->config.gro_enable;
  1707. vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
  1708. vdev->stats.vpaths_open++;
  1709. } else {
  1710. vdev->stats.vpath_open_fail++;
  1711. vxge_debug_init(VXGE_ERR,
  1712. "%s: vpath: %d failed to open "
  1713. "with status: %d",
  1714. vdev->ndev->name, vpath->device_id,
  1715. status);
  1716. vxge_close_vpaths(vdev, 0);
  1717. return -EPERM;
  1718. }
  1719. vp_id = vpath->handle->vpath->vp_id;
  1720. vdev->vpaths_deployed |= vxge_mBIT(vp_id);
  1721. }
  1722. return VXGE_HW_OK;
  1723. }
  1724. /*
  1725. * vxge_isr_napi
  1726. * @irq: the irq of the device.
  1727. * @dev_id: a void pointer to the hldev structure of the Titan device
  1728. * @ptregs: pointer to the registers pushed on the stack.
  1729. *
  1730. * This function is the ISR handler of the device when napi is enabled. It
  1731. * identifies the reason for the interrupt and calls the relevant service
  1732. * routines.
  1733. */
  1734. static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
  1735. {
  1736. struct net_device *dev;
  1737. struct __vxge_hw_device *hldev;
  1738. u64 reason;
  1739. enum vxge_hw_status status;
  1740. struct vxgedev *vdev = (struct vxgedev *) dev_id;;
  1741. vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1742. dev = vdev->ndev;
  1743. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  1744. if (pci_channel_offline(vdev->pdev))
  1745. return IRQ_NONE;
  1746. if (unlikely(!is_vxge_card_up(vdev)))
  1747. return IRQ_NONE;
  1748. status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
  1749. &reason);
  1750. if (status == VXGE_HW_OK) {
  1751. vxge_hw_device_mask_all(hldev);
  1752. if (reason &
  1753. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
  1754. vdev->vpaths_deployed >>
  1755. (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
  1756. vxge_hw_device_clear_tx_rx(hldev);
  1757. napi_schedule(&vdev->napi);
  1758. vxge_debug_intr(VXGE_TRACE,
  1759. "%s:%d Exiting...", __func__, __LINE__);
  1760. return IRQ_HANDLED;
  1761. } else
  1762. vxge_hw_device_unmask_all(hldev);
  1763. } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
  1764. (status == VXGE_HW_ERR_CRITICAL) ||
  1765. (status == VXGE_HW_ERR_FIFO))) {
  1766. vxge_hw_device_mask_all(hldev);
  1767. vxge_hw_device_flush_io(hldev);
  1768. return IRQ_HANDLED;
  1769. } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
  1770. return IRQ_HANDLED;
  1771. vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
  1772. return IRQ_NONE;
  1773. }
  1774. #ifdef CONFIG_PCI_MSI
  1775. static irqreturn_t
  1776. vxge_tx_msix_handle(int irq, void *dev_id)
  1777. {
  1778. struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
  1779. VXGE_COMPLETE_VPATH_TX(fifo);
  1780. return IRQ_HANDLED;
  1781. }
  1782. static irqreturn_t
  1783. vxge_rx_msix_napi_handle(int irq, void *dev_id)
  1784. {
  1785. struct vxge_ring *ring = (struct vxge_ring *)dev_id;
  1786. /* MSIX_IDX for Rx is 1 */
  1787. vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
  1788. ring->rx_vector_no);
  1789. napi_schedule(&ring->napi);
  1790. return IRQ_HANDLED;
  1791. }
  1792. static irqreturn_t
  1793. vxge_alarm_msix_handle(int irq, void *dev_id)
  1794. {
  1795. int i;
  1796. enum vxge_hw_status status;
  1797. struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
  1798. struct vxgedev *vdev = vpath->vdev;
  1799. int msix_id = (vpath->handle->vpath->vp_id *
  1800. VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
  1801. for (i = 0; i < vdev->no_of_vpath; i++) {
  1802. vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
  1803. status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
  1804. vdev->exec_mode);
  1805. if (status == VXGE_HW_OK) {
  1806. vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
  1807. msix_id);
  1808. continue;
  1809. }
  1810. vxge_debug_intr(VXGE_ERR,
  1811. "%s: vxge_hw_vpath_alarm_process failed %x ",
  1812. VXGE_DRIVER_NAME, status);
  1813. }
  1814. return IRQ_HANDLED;
  1815. }
  1816. static int vxge_alloc_msix(struct vxgedev *vdev)
  1817. {
  1818. int j, i, ret = 0;
  1819. int msix_intr_vect = 0, temp;
  1820. vdev->intr_cnt = 0;
  1821. start:
  1822. /* Tx/Rx MSIX Vectors count */
  1823. vdev->intr_cnt = vdev->no_of_vpath * 2;
  1824. /* Alarm MSIX Vectors count */
  1825. vdev->intr_cnt++;
  1826. vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
  1827. GFP_KERNEL);
  1828. if (!vdev->entries) {
  1829. vxge_debug_init(VXGE_ERR,
  1830. "%s: memory allocation failed",
  1831. VXGE_DRIVER_NAME);
  1832. ret = -ENOMEM;
  1833. goto alloc_entries_failed;
  1834. }
  1835. vdev->vxge_entries =
  1836. kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
  1837. GFP_KERNEL);
  1838. if (!vdev->vxge_entries) {
  1839. vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
  1840. VXGE_DRIVER_NAME);
  1841. ret = -ENOMEM;
  1842. goto alloc_vxge_entries_failed;
  1843. }
  1844. for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
  1845. msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
  1846. /* Initialize the fifo vector */
  1847. vdev->entries[j].entry = msix_intr_vect;
  1848. vdev->vxge_entries[j].entry = msix_intr_vect;
  1849. vdev->vxge_entries[j].in_use = 0;
  1850. j++;
  1851. /* Initialize the ring vector */
  1852. vdev->entries[j].entry = msix_intr_vect + 1;
  1853. vdev->vxge_entries[j].entry = msix_intr_vect + 1;
  1854. vdev->vxge_entries[j].in_use = 0;
  1855. j++;
  1856. }
  1857. /* Initialize the alarm vector */
  1858. vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
  1859. vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
  1860. vdev->vxge_entries[j].in_use = 0;
  1861. ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
  1862. if (ret > 0) {
  1863. vxge_debug_init(VXGE_ERR,
  1864. "%s: MSI-X enable failed for %d vectors, ret: %d",
  1865. VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
  1866. if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
  1867. ret = -ENODEV;
  1868. goto enable_msix_failed;
  1869. }
  1870. kfree(vdev->entries);
  1871. kfree(vdev->vxge_entries);
  1872. vdev->entries = NULL;
  1873. vdev->vxge_entries = NULL;
  1874. /* Try with less no of vector by reducing no of vpaths count */
  1875. temp = (ret - 1)/2;
  1876. vxge_close_vpaths(vdev, temp);
  1877. vdev->no_of_vpath = temp;
  1878. goto start;
  1879. } else if (ret < 0) {
  1880. ret = -ENODEV;
  1881. goto enable_msix_failed;
  1882. }
  1883. return 0;
  1884. enable_msix_failed:
  1885. kfree(vdev->vxge_entries);
  1886. alloc_vxge_entries_failed:
  1887. kfree(vdev->entries);
  1888. alloc_entries_failed:
  1889. return ret;
  1890. }
  1891. static int vxge_enable_msix(struct vxgedev *vdev)
  1892. {
  1893. int i, ret = 0;
  1894. /* 0 - Tx, 1 - Rx */
  1895. int tim_msix_id[4] = {0, 1, 0, 0};
  1896. vdev->intr_cnt = 0;
  1897. /* allocate msix vectors */
  1898. ret = vxge_alloc_msix(vdev);
  1899. if (!ret) {
  1900. for (i = 0; i < vdev->no_of_vpath; i++) {
  1901. struct vxge_vpath *vpath = &vdev->vpaths[i];
  1902. /* If fifo or ring are not enabled, the MSIX vector for
  1903. * it should be set to 0.
  1904. */
  1905. vpath->ring.rx_vector_no = (vpath->device_id *
  1906. VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
  1907. vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
  1908. VXGE_ALARM_MSIX_ID);
  1909. }
  1910. }
  1911. return ret;
  1912. }
  1913. static void vxge_rem_msix_isr(struct vxgedev *vdev)
  1914. {
  1915. int intr_cnt;
  1916. for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
  1917. intr_cnt++) {
  1918. if (vdev->vxge_entries[intr_cnt].in_use) {
  1919. synchronize_irq(vdev->entries[intr_cnt].vector);
  1920. free_irq(vdev->entries[intr_cnt].vector,
  1921. vdev->vxge_entries[intr_cnt].arg);
  1922. vdev->vxge_entries[intr_cnt].in_use = 0;
  1923. }
  1924. }
  1925. kfree(vdev->entries);
  1926. kfree(vdev->vxge_entries);
  1927. vdev->entries = NULL;
  1928. vdev->vxge_entries = NULL;
  1929. if (vdev->config.intr_type == MSI_X)
  1930. pci_disable_msix(vdev->pdev);
  1931. }
  1932. #endif
  1933. static void vxge_rem_isr(struct vxgedev *vdev)
  1934. {
  1935. struct __vxge_hw_device *hldev;
  1936. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  1937. #ifdef CONFIG_PCI_MSI
  1938. if (vdev->config.intr_type == MSI_X) {
  1939. vxge_rem_msix_isr(vdev);
  1940. } else
  1941. #endif
  1942. if (vdev->config.intr_type == INTA) {
  1943. synchronize_irq(vdev->pdev->irq);
  1944. free_irq(vdev->pdev->irq, vdev);
  1945. }
  1946. }
  1947. static int vxge_add_isr(struct vxgedev *vdev)
  1948. {
  1949. int ret = 0;
  1950. #ifdef CONFIG_PCI_MSI
  1951. int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
  1952. int pci_fun = PCI_FUNC(vdev->pdev->devfn);
  1953. if (vdev->config.intr_type == MSI_X)
  1954. ret = vxge_enable_msix(vdev);
  1955. if (ret) {
  1956. vxge_debug_init(VXGE_ERR,
  1957. "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
  1958. vxge_debug_init(VXGE_ERR,
  1959. "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
  1960. vdev->config.intr_type = INTA;
  1961. }
  1962. if (vdev->config.intr_type == MSI_X) {
  1963. for (intr_idx = 0;
  1964. intr_idx < (vdev->no_of_vpath *
  1965. VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
  1966. msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
  1967. irq_req = 0;
  1968. switch (msix_idx) {
  1969. case 0:
  1970. snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
  1971. "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
  1972. vdev->ndev->name,
  1973. vdev->entries[intr_cnt].entry,
  1974. pci_fun, vp_idx);
  1975. ret = request_irq(
  1976. vdev->entries[intr_cnt].vector,
  1977. vxge_tx_msix_handle, 0,
  1978. vdev->desc[intr_cnt],
  1979. &vdev->vpaths[vp_idx].fifo);
  1980. vdev->vxge_entries[intr_cnt].arg =
  1981. &vdev->vpaths[vp_idx].fifo;
  1982. irq_req = 1;
  1983. break;
  1984. case 1:
  1985. snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
  1986. "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
  1987. vdev->ndev->name,
  1988. vdev->entries[intr_cnt].entry,
  1989. pci_fun, vp_idx);
  1990. ret = request_irq(
  1991. vdev->entries[intr_cnt].vector,
  1992. vxge_rx_msix_napi_handle,
  1993. 0,
  1994. vdev->desc[intr_cnt],
  1995. &vdev->vpaths[vp_idx].ring);
  1996. vdev->vxge_entries[intr_cnt].arg =
  1997. &vdev->vpaths[vp_idx].ring;
  1998. irq_req = 1;
  1999. break;
  2000. }
  2001. if (ret) {
  2002. vxge_debug_init(VXGE_ERR,
  2003. "%s: MSIX - %d Registration failed",
  2004. vdev->ndev->name, intr_cnt);
  2005. vxge_rem_msix_isr(vdev);
  2006. vdev->config.intr_type = INTA;
  2007. vxge_debug_init(VXGE_ERR,
  2008. "%s: Defaulting to INTA"
  2009. , vdev->ndev->name);
  2010. goto INTA_MODE;
  2011. }
  2012. if (irq_req) {
  2013. /* We requested for this msix interrupt */
  2014. vdev->vxge_entries[intr_cnt].in_use = 1;
  2015. msix_idx += vdev->vpaths[vp_idx].device_id *
  2016. VXGE_HW_VPATH_MSIX_ACTIVE;
  2017. vxge_hw_vpath_msix_unmask(
  2018. vdev->vpaths[vp_idx].handle,
  2019. msix_idx);
  2020. intr_cnt++;
  2021. }
  2022. /* Point to next vpath handler */
  2023. if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
  2024. (vp_idx < (vdev->no_of_vpath - 1)))
  2025. vp_idx++;
  2026. }
  2027. intr_cnt = vdev->no_of_vpath * 2;
  2028. snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
  2029. "%s:vxge:MSI-X %d - Alarm - fn:%d",
  2030. vdev->ndev->name,
  2031. vdev->entries[intr_cnt].entry,
  2032. pci_fun);
  2033. /* For Alarm interrupts */
  2034. ret = request_irq(vdev->entries[intr_cnt].vector,
  2035. vxge_alarm_msix_handle, 0,
  2036. vdev->desc[intr_cnt],
  2037. &vdev->vpaths[0]);
  2038. if (ret) {
  2039. vxge_debug_init(VXGE_ERR,
  2040. "%s: MSIX - %d Registration failed",
  2041. vdev->ndev->name, intr_cnt);
  2042. vxge_rem_msix_isr(vdev);
  2043. vdev->config.intr_type = INTA;
  2044. vxge_debug_init(VXGE_ERR,
  2045. "%s: Defaulting to INTA",
  2046. vdev->ndev->name);
  2047. goto INTA_MODE;
  2048. }
  2049. msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
  2050. VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
  2051. vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
  2052. msix_idx);
  2053. vdev->vxge_entries[intr_cnt].in_use = 1;
  2054. vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
  2055. }
  2056. INTA_MODE:
  2057. #endif
  2058. if (vdev->config.intr_type == INTA) {
  2059. snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
  2060. "%s:vxge:INTA", vdev->ndev->name);
  2061. vxge_hw_device_set_intr_type(vdev->devh,
  2062. VXGE_HW_INTR_MODE_IRQLINE);
  2063. vxge_hw_vpath_tti_ci_set(vdev->devh,
  2064. vdev->vpaths[0].device_id);
  2065. ret = request_irq((int) vdev->pdev->irq,
  2066. vxge_isr_napi,
  2067. IRQF_SHARED, vdev->desc[0], vdev);
  2068. if (ret) {
  2069. vxge_debug_init(VXGE_ERR,
  2070. "%s %s-%d: ISR registration failed",
  2071. VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
  2072. return -ENODEV;
  2073. }
  2074. vxge_debug_init(VXGE_TRACE,
  2075. "new %s-%d line allocated",
  2076. "IRQ", vdev->pdev->irq);
  2077. }
  2078. return VXGE_HW_OK;
  2079. }
  2080. static void vxge_poll_vp_reset(unsigned long data)
  2081. {
  2082. struct vxgedev *vdev = (struct vxgedev *)data;
  2083. int i, j = 0;
  2084. for (i = 0; i < vdev->no_of_vpath; i++) {
  2085. if (test_bit(i, &vdev->vp_reset)) {
  2086. vxge_reset_vpath(vdev, i);
  2087. j++;
  2088. }
  2089. }
  2090. if (j && (vdev->config.intr_type != MSI_X)) {
  2091. vxge_hw_device_unmask_all(vdev->devh);
  2092. vxge_hw_device_flush_io(vdev->devh);
  2093. }
  2094. mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
  2095. }
  2096. static void vxge_poll_vp_lockup(unsigned long data)
  2097. {
  2098. struct vxgedev *vdev = (struct vxgedev *)data;
  2099. enum vxge_hw_status status = VXGE_HW_OK;
  2100. struct vxge_vpath *vpath;
  2101. struct vxge_ring *ring;
  2102. int i;
  2103. for (i = 0; i < vdev->no_of_vpath; i++) {
  2104. ring = &vdev->vpaths[i].ring;
  2105. /* Did this vpath received any packets */
  2106. if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
  2107. status = vxge_hw_vpath_check_leak(ring->handle);
  2108. /* Did it received any packets last time */
  2109. if ((VXGE_HW_FAIL == status) &&
  2110. (VXGE_HW_FAIL == ring->last_status)) {
  2111. /* schedule vpath reset */
  2112. if (!test_and_set_bit(i, &vdev->vp_reset)) {
  2113. vpath = &vdev->vpaths[i];
  2114. /* disable interrupts for this vpath */
  2115. vxge_vpath_intr_disable(vdev, i);
  2116. /* stop the queue for this vpath */
  2117. netif_tx_stop_queue(vpath->fifo.txq);
  2118. continue;
  2119. }
  2120. }
  2121. }
  2122. ring->stats.prev_rx_frms = ring->stats.rx_frms;
  2123. ring->last_status = status;
  2124. }
  2125. /* Check every 1 milli second */
  2126. mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
  2127. }
  2128. /**
  2129. * vxge_open
  2130. * @dev: pointer to the device structure.
  2131. *
  2132. * This function is the open entry point of the driver. It mainly calls a
  2133. * function to allocate Rx buffers and inserts them into the buffer
  2134. * descriptors and then enables the Rx part of the NIC.
  2135. * Return value: '0' on success and an appropriate (-)ve integer as
  2136. * defined in errno.h file on failure.
  2137. */
  2138. int
  2139. vxge_open(struct net_device *dev)
  2140. {
  2141. enum vxge_hw_status status;
  2142. struct vxgedev *vdev;
  2143. struct __vxge_hw_device *hldev;
  2144. struct vxge_vpath *vpath;
  2145. int ret = 0;
  2146. int i;
  2147. u64 val64, function_mode;
  2148. vxge_debug_entryexit(VXGE_TRACE,
  2149. "%s: %s:%d", dev->name, __func__, __LINE__);
  2150. vdev = (struct vxgedev *)netdev_priv(dev);
  2151. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  2152. function_mode = vdev->config.device_hw_info.function_mode;
  2153. /* make sure you have link off by default every time Nic is
  2154. * initialized */
  2155. netif_carrier_off(dev);
  2156. /* Open VPATHs */
  2157. status = vxge_open_vpaths(vdev);
  2158. if (status != VXGE_HW_OK) {
  2159. vxge_debug_init(VXGE_ERR,
  2160. "%s: fatal: Vpath open failed", vdev->ndev->name);
  2161. ret = -EPERM;
  2162. goto out0;
  2163. }
  2164. vdev->mtu = dev->mtu;
  2165. status = vxge_add_isr(vdev);
  2166. if (status != VXGE_HW_OK) {
  2167. vxge_debug_init(VXGE_ERR,
  2168. "%s: fatal: ISR add failed", dev->name);
  2169. ret = -EPERM;
  2170. goto out1;
  2171. }
  2172. if (vdev->config.intr_type != MSI_X) {
  2173. netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
  2174. vdev->config.napi_weight);
  2175. napi_enable(&vdev->napi);
  2176. for (i = 0; i < vdev->no_of_vpath; i++) {
  2177. vpath = &vdev->vpaths[i];
  2178. vpath->ring.napi_p = &vdev->napi;
  2179. }
  2180. } else {
  2181. for (i = 0; i < vdev->no_of_vpath; i++) {
  2182. vpath = &vdev->vpaths[i];
  2183. netif_napi_add(dev, &vpath->ring.napi,
  2184. vxge_poll_msix, vdev->config.napi_weight);
  2185. napi_enable(&vpath->ring.napi);
  2186. vpath->ring.napi_p = &vpath->ring.napi;
  2187. }
  2188. }
  2189. /* configure RTH */
  2190. if (vdev->config.rth_steering) {
  2191. status = vxge_rth_configure(vdev);
  2192. if (status != VXGE_HW_OK) {
  2193. vxge_debug_init(VXGE_ERR,
  2194. "%s: fatal: RTH configuration failed",
  2195. dev->name);
  2196. ret = -EPERM;
  2197. goto out2;
  2198. }
  2199. }
  2200. for (i = 0; i < vdev->no_of_vpath; i++) {
  2201. vpath = &vdev->vpaths[i];
  2202. /* set initial mtu before enabling the device */
  2203. status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
  2204. if (status != VXGE_HW_OK) {
  2205. vxge_debug_init(VXGE_ERR,
  2206. "%s: fatal: can not set new MTU", dev->name);
  2207. ret = -EPERM;
  2208. goto out2;
  2209. }
  2210. }
  2211. VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
  2212. vxge_debug_init(vdev->level_trace,
  2213. "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
  2214. VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
  2215. /* Restore the DA, VID table and also multicast and promiscuous mode
  2216. * states
  2217. */
  2218. if (vdev->all_multi_flg) {
  2219. for (i = 0; i < vdev->no_of_vpath; i++) {
  2220. vpath = &vdev->vpaths[i];
  2221. vxge_restore_vpath_mac_addr(vpath);
  2222. vxge_restore_vpath_vid_table(vpath);
  2223. status = vxge_hw_vpath_mcast_enable(vpath->handle);
  2224. if (status != VXGE_HW_OK)
  2225. vxge_debug_init(VXGE_ERR,
  2226. "%s:%d Enabling multicast failed",
  2227. __func__, __LINE__);
  2228. }
  2229. }
  2230. /* Enable vpath to sniff all unicast/multicast traffic that not
  2231. * addressed to them. We allow promiscous mode for PF only
  2232. */
  2233. val64 = 0;
  2234. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2235. val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
  2236. vxge_hw_mgmt_reg_write(vdev->devh,
  2237. vxge_hw_mgmt_reg_type_mrpcim,
  2238. 0,
  2239. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2240. rxmac_authorize_all_addr),
  2241. val64);
  2242. vxge_hw_mgmt_reg_write(vdev->devh,
  2243. vxge_hw_mgmt_reg_type_mrpcim,
  2244. 0,
  2245. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2246. rxmac_authorize_all_vid),
  2247. val64);
  2248. vxge_set_multicast(dev);
  2249. /* Enabling Bcast and mcast for all vpath */
  2250. for (i = 0; i < vdev->no_of_vpath; i++) {
  2251. vpath = &vdev->vpaths[i];
  2252. status = vxge_hw_vpath_bcast_enable(vpath->handle);
  2253. if (status != VXGE_HW_OK)
  2254. vxge_debug_init(VXGE_ERR,
  2255. "%s : Can not enable bcast for vpath "
  2256. "id %d", dev->name, i);
  2257. if (vdev->config.addr_learn_en) {
  2258. status = vxge_hw_vpath_mcast_enable(vpath->handle);
  2259. if (status != VXGE_HW_OK)
  2260. vxge_debug_init(VXGE_ERR,
  2261. "%s : Can not enable mcast for vpath "
  2262. "id %d", dev->name, i);
  2263. }
  2264. }
  2265. vxge_hw_device_setpause_data(vdev->devh, 0,
  2266. vdev->config.tx_pause_enable,
  2267. vdev->config.rx_pause_enable);
  2268. if (vdev->vp_reset_timer.function == NULL)
  2269. vxge_os_timer(vdev->vp_reset_timer,
  2270. vxge_poll_vp_reset, vdev, (HZ/2));
  2271. if (vdev->vp_lockup_timer.function == NULL)
  2272. vxge_os_timer(vdev->vp_lockup_timer,
  2273. vxge_poll_vp_lockup, vdev, (HZ/2));
  2274. set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2275. smp_wmb();
  2276. if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
  2277. netif_carrier_on(vdev->ndev);
  2278. netdev_notice(vdev->ndev, "Link Up\n");
  2279. vdev->stats.link_up++;
  2280. }
  2281. vxge_hw_device_intr_enable(vdev->devh);
  2282. smp_wmb();
  2283. for (i = 0; i < vdev->no_of_vpath; i++) {
  2284. vpath = &vdev->vpaths[i];
  2285. vxge_hw_vpath_enable(vpath->handle);
  2286. smp_wmb();
  2287. vxge_hw_vpath_rx_doorbell_init(vpath->handle);
  2288. }
  2289. netif_tx_start_all_queues(vdev->ndev);
  2290. goto out0;
  2291. out2:
  2292. vxge_rem_isr(vdev);
  2293. /* Disable napi */
  2294. if (vdev->config.intr_type != MSI_X)
  2295. napi_disable(&vdev->napi);
  2296. else {
  2297. for (i = 0; i < vdev->no_of_vpath; i++)
  2298. napi_disable(&vdev->vpaths[i].ring.napi);
  2299. }
  2300. out1:
  2301. vxge_close_vpaths(vdev, 0);
  2302. out0:
  2303. vxge_debug_entryexit(VXGE_TRACE,
  2304. "%s: %s:%d Exiting...",
  2305. dev->name, __func__, __LINE__);
  2306. return ret;
  2307. }
  2308. /* Loop throught the mac address list and delete all the entries */
  2309. void vxge_free_mac_add_list(struct vxge_vpath *vpath)
  2310. {
  2311. struct list_head *entry, *next;
  2312. if (list_empty(&vpath->mac_addr_list))
  2313. return;
  2314. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  2315. list_del(entry);
  2316. kfree((struct vxge_mac_addrs *)entry);
  2317. }
  2318. }
  2319. static void vxge_napi_del_all(struct vxgedev *vdev)
  2320. {
  2321. int i;
  2322. if (vdev->config.intr_type != MSI_X)
  2323. netif_napi_del(&vdev->napi);
  2324. else {
  2325. for (i = 0; i < vdev->no_of_vpath; i++)
  2326. netif_napi_del(&vdev->vpaths[i].ring.napi);
  2327. }
  2328. }
  2329. int do_vxge_close(struct net_device *dev, int do_io)
  2330. {
  2331. enum vxge_hw_status status;
  2332. struct vxgedev *vdev;
  2333. struct __vxge_hw_device *hldev;
  2334. int i;
  2335. u64 val64, vpath_vector;
  2336. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  2337. dev->name, __func__, __LINE__);
  2338. vdev = (struct vxgedev *)netdev_priv(dev);
  2339. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  2340. if (unlikely(!is_vxge_card_up(vdev)))
  2341. return 0;
  2342. /* If vxge_handle_crit_err task is executing,
  2343. * wait till it completes. */
  2344. while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
  2345. msleep(50);
  2346. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2347. if (do_io) {
  2348. /* Put the vpath back in normal mode */
  2349. vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
  2350. status = vxge_hw_mgmt_reg_read(vdev->devh,
  2351. vxge_hw_mgmt_reg_type_mrpcim,
  2352. 0,
  2353. (ulong)offsetof(
  2354. struct vxge_hw_mrpcim_reg,
  2355. rts_mgr_cbasin_cfg),
  2356. &val64);
  2357. if (status == VXGE_HW_OK) {
  2358. val64 &= ~vpath_vector;
  2359. status = vxge_hw_mgmt_reg_write(vdev->devh,
  2360. vxge_hw_mgmt_reg_type_mrpcim,
  2361. 0,
  2362. (ulong)offsetof(
  2363. struct vxge_hw_mrpcim_reg,
  2364. rts_mgr_cbasin_cfg),
  2365. val64);
  2366. }
  2367. /* Remove the function 0 from promiscous mode */
  2368. vxge_hw_mgmt_reg_write(vdev->devh,
  2369. vxge_hw_mgmt_reg_type_mrpcim,
  2370. 0,
  2371. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2372. rxmac_authorize_all_addr),
  2373. 0);
  2374. vxge_hw_mgmt_reg_write(vdev->devh,
  2375. vxge_hw_mgmt_reg_type_mrpcim,
  2376. 0,
  2377. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2378. rxmac_authorize_all_vid),
  2379. 0);
  2380. smp_wmb();
  2381. }
  2382. del_timer_sync(&vdev->vp_lockup_timer);
  2383. del_timer_sync(&vdev->vp_reset_timer);
  2384. /* Disable napi */
  2385. if (vdev->config.intr_type != MSI_X)
  2386. napi_disable(&vdev->napi);
  2387. else {
  2388. for (i = 0; i < vdev->no_of_vpath; i++)
  2389. napi_disable(&vdev->vpaths[i].ring.napi);
  2390. }
  2391. netif_carrier_off(vdev->ndev);
  2392. netdev_notice(vdev->ndev, "Link Down\n");
  2393. netif_tx_stop_all_queues(vdev->ndev);
  2394. /* Note that at this point xmit() is stopped by upper layer */
  2395. if (do_io)
  2396. vxge_hw_device_intr_disable(vdev->devh);
  2397. mdelay(1000);
  2398. vxge_rem_isr(vdev);
  2399. vxge_napi_del_all(vdev);
  2400. if (do_io)
  2401. vxge_reset_all_vpaths(vdev);
  2402. vxge_close_vpaths(vdev, 0);
  2403. vxge_debug_entryexit(VXGE_TRACE,
  2404. "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
  2405. clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
  2406. return 0;
  2407. }
  2408. /**
  2409. * vxge_close
  2410. * @dev: device pointer.
  2411. *
  2412. * This is the stop entry point of the driver. It needs to undo exactly
  2413. * whatever was done by the open entry point, thus it's usually referred to
  2414. * as the close function.Among other things this function mainly stops the
  2415. * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
  2416. * Return value: '0' on success and an appropriate (-)ve integer as
  2417. * defined in errno.h file on failure.
  2418. */
  2419. int
  2420. vxge_close(struct net_device *dev)
  2421. {
  2422. do_vxge_close(dev, 1);
  2423. return 0;
  2424. }
  2425. /**
  2426. * vxge_change_mtu
  2427. * @dev: net device pointer.
  2428. * @new_mtu :the new MTU size for the device.
  2429. *
  2430. * A driver entry point to change MTU size for the device. Before changing
  2431. * the MTU the device must be stopped.
  2432. */
  2433. static int vxge_change_mtu(struct net_device *dev, int new_mtu)
  2434. {
  2435. struct vxgedev *vdev = netdev_priv(dev);
  2436. vxge_debug_entryexit(vdev->level_trace,
  2437. "%s:%d", __func__, __LINE__);
  2438. if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
  2439. vxge_debug_init(vdev->level_err,
  2440. "%s: mtu size is invalid", dev->name);
  2441. return -EPERM;
  2442. }
  2443. /* check if device is down already */
  2444. if (unlikely(!is_vxge_card_up(vdev))) {
  2445. /* just store new value, will use later on open() */
  2446. dev->mtu = new_mtu;
  2447. vxge_debug_init(vdev->level_err,
  2448. "%s", "device is down on MTU change");
  2449. return 0;
  2450. }
  2451. vxge_debug_init(vdev->level_trace,
  2452. "trying to apply new MTU %d", new_mtu);
  2453. if (vxge_close(dev))
  2454. return -EIO;
  2455. dev->mtu = new_mtu;
  2456. vdev->mtu = new_mtu;
  2457. if (vxge_open(dev))
  2458. return -EIO;
  2459. vxge_debug_init(vdev->level_trace,
  2460. "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
  2461. vxge_debug_entryexit(vdev->level_trace,
  2462. "%s:%d Exiting...", __func__, __LINE__);
  2463. return 0;
  2464. }
  2465. /**
  2466. * vxge_get_stats
  2467. * @dev: pointer to the device structure
  2468. *
  2469. * Updates the device statistics structure. This function updates the device
  2470. * statistics structure in the net_device structure and returns a pointer
  2471. * to the same.
  2472. */
  2473. static struct net_device_stats *
  2474. vxge_get_stats(struct net_device *dev)
  2475. {
  2476. struct vxgedev *vdev;
  2477. struct net_device_stats *net_stats;
  2478. int k;
  2479. vdev = netdev_priv(dev);
  2480. net_stats = &vdev->stats.net_stats;
  2481. memset(net_stats, 0, sizeof(struct net_device_stats));
  2482. for (k = 0; k < vdev->no_of_vpath; k++) {
  2483. net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
  2484. net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
  2485. net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
  2486. net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
  2487. net_stats->rx_dropped +=
  2488. vdev->vpaths[k].ring.stats.rx_dropped;
  2489. net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
  2490. net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
  2491. net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
  2492. }
  2493. return net_stats;
  2494. }
  2495. /**
  2496. * vxge_ioctl
  2497. * @dev: Device pointer.
  2498. * @ifr: An IOCTL specific structure, that can contain a pointer to
  2499. * a proprietary structure used to pass information to the driver.
  2500. * @cmd: This is used to distinguish between the different commands that
  2501. * can be passed to the IOCTL functions.
  2502. *
  2503. * Entry point for the Ioctl.
  2504. */
  2505. static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2506. {
  2507. return -EOPNOTSUPP;
  2508. }
  2509. /**
  2510. * vxge_tx_watchdog
  2511. * @dev: pointer to net device structure
  2512. *
  2513. * Watchdog for transmit side.
  2514. * This function is triggered if the Tx Queue is stopped
  2515. * for a pre-defined amount of time when the Interface is still up.
  2516. */
  2517. static void
  2518. vxge_tx_watchdog(struct net_device *dev)
  2519. {
  2520. struct vxgedev *vdev;
  2521. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  2522. vdev = (struct vxgedev *)netdev_priv(dev);
  2523. vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
  2524. vxge_reset(vdev);
  2525. vxge_debug_entryexit(VXGE_TRACE,
  2526. "%s:%d Exiting...", __func__, __LINE__);
  2527. }
  2528. /**
  2529. * vxge_vlan_rx_register
  2530. * @dev: net device pointer.
  2531. * @grp: vlan group
  2532. *
  2533. * Vlan group registration
  2534. */
  2535. static void
  2536. vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  2537. {
  2538. struct vxgedev *vdev;
  2539. struct vxge_vpath *vpath;
  2540. int vp;
  2541. u64 vid;
  2542. enum vxge_hw_status status;
  2543. int i;
  2544. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  2545. vdev = (struct vxgedev *)netdev_priv(dev);
  2546. vpath = &vdev->vpaths[0];
  2547. if ((NULL == grp) && (vpath->is_open)) {
  2548. /* Get the first vlan */
  2549. status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
  2550. while (status == VXGE_HW_OK) {
  2551. /* Delete this vlan from the vid table */
  2552. for (vp = 0; vp < vdev->no_of_vpath; vp++) {
  2553. vpath = &vdev->vpaths[vp];
  2554. if (!vpath->is_open)
  2555. continue;
  2556. vxge_hw_vpath_vid_delete(vpath->handle, vid);
  2557. }
  2558. /* Get the next vlan to be deleted */
  2559. vpath = &vdev->vpaths[0];
  2560. status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
  2561. }
  2562. }
  2563. vdev->vlgrp = grp;
  2564. for (i = 0; i < vdev->no_of_vpath; i++) {
  2565. if (vdev->vpaths[i].is_configured)
  2566. vdev->vpaths[i].ring.vlgrp = grp;
  2567. }
  2568. vxge_debug_entryexit(VXGE_TRACE,
  2569. "%s:%d Exiting...", __func__, __LINE__);
  2570. }
  2571. /**
  2572. * vxge_vlan_rx_add_vid
  2573. * @dev: net device pointer.
  2574. * @vid: vid
  2575. *
  2576. * Add the vlan id to the devices vlan id table
  2577. */
  2578. static void
  2579. vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  2580. {
  2581. struct vxgedev *vdev;
  2582. struct vxge_vpath *vpath;
  2583. int vp_id;
  2584. vdev = (struct vxgedev *)netdev_priv(dev);
  2585. /* Add these vlan to the vid table */
  2586. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  2587. vpath = &vdev->vpaths[vp_id];
  2588. if (!vpath->is_open)
  2589. continue;
  2590. vxge_hw_vpath_vid_add(vpath->handle, vid);
  2591. }
  2592. }
  2593. /**
  2594. * vxge_vlan_rx_add_vid
  2595. * @dev: net device pointer.
  2596. * @vid: vid
  2597. *
  2598. * Remove the vlan id from the device's vlan id table
  2599. */
  2600. static void
  2601. vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  2602. {
  2603. struct vxgedev *vdev;
  2604. struct vxge_vpath *vpath;
  2605. int vp_id;
  2606. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  2607. vdev = (struct vxgedev *)netdev_priv(dev);
  2608. vlan_group_set_device(vdev->vlgrp, vid, NULL);
  2609. /* Delete this vlan from the vid table */
  2610. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  2611. vpath = &vdev->vpaths[vp_id];
  2612. if (!vpath->is_open)
  2613. continue;
  2614. vxge_hw_vpath_vid_delete(vpath->handle, vid);
  2615. }
  2616. vxge_debug_entryexit(VXGE_TRACE,
  2617. "%s:%d Exiting...", __func__, __LINE__);
  2618. }
  2619. static const struct net_device_ops vxge_netdev_ops = {
  2620. .ndo_open = vxge_open,
  2621. .ndo_stop = vxge_close,
  2622. .ndo_get_stats = vxge_get_stats,
  2623. .ndo_start_xmit = vxge_xmit,
  2624. .ndo_validate_addr = eth_validate_addr,
  2625. .ndo_set_multicast_list = vxge_set_multicast,
  2626. .ndo_do_ioctl = vxge_ioctl,
  2627. .ndo_set_mac_address = vxge_set_mac_addr,
  2628. .ndo_change_mtu = vxge_change_mtu,
  2629. .ndo_vlan_rx_register = vxge_vlan_rx_register,
  2630. .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
  2631. .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
  2632. .ndo_tx_timeout = vxge_tx_watchdog,
  2633. #ifdef CONFIG_NET_POLL_CONTROLLER
  2634. .ndo_poll_controller = vxge_netpoll,
  2635. #endif
  2636. };
  2637. int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
  2638. struct vxge_config *config,
  2639. int high_dma, int no_of_vpath,
  2640. struct vxgedev **vdev_out)
  2641. {
  2642. struct net_device *ndev;
  2643. enum vxge_hw_status status = VXGE_HW_OK;
  2644. struct vxgedev *vdev;
  2645. int ret = 0, no_of_queue = 1;
  2646. u64 stat;
  2647. *vdev_out = NULL;
  2648. if (config->tx_steering_type)
  2649. no_of_queue = no_of_vpath;
  2650. ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
  2651. no_of_queue);
  2652. if (ndev == NULL) {
  2653. vxge_debug_init(
  2654. vxge_hw_device_trace_level_get(hldev),
  2655. "%s : device allocation failed", __func__);
  2656. ret = -ENODEV;
  2657. goto _out0;
  2658. }
  2659. vxge_debug_entryexit(
  2660. vxge_hw_device_trace_level_get(hldev),
  2661. "%s: %s:%d Entering...",
  2662. ndev->name, __func__, __LINE__);
  2663. vdev = netdev_priv(ndev);
  2664. memset(vdev, 0, sizeof(struct vxgedev));
  2665. vdev->ndev = ndev;
  2666. vdev->devh = hldev;
  2667. vdev->pdev = hldev->pdev;
  2668. memcpy(&vdev->config, config, sizeof(struct vxge_config));
  2669. vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
  2670. SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
  2671. ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  2672. NETIF_F_HW_VLAN_FILTER;
  2673. /* Driver entry points */
  2674. ndev->irq = vdev->pdev->irq;
  2675. ndev->base_addr = (unsigned long) hldev->bar0;
  2676. ndev->netdev_ops = &vxge_netdev_ops;
  2677. ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
  2678. initialize_ethtool_ops(ndev);
  2679. /* Allocate memory for vpath */
  2680. vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
  2681. no_of_vpath, GFP_KERNEL);
  2682. if (!vdev->vpaths) {
  2683. vxge_debug_init(VXGE_ERR,
  2684. "%s: vpath memory allocation failed",
  2685. vdev->ndev->name);
  2686. ret = -ENODEV;
  2687. goto _out1;
  2688. }
  2689. ndev->features |= NETIF_F_SG;
  2690. ndev->features |= NETIF_F_HW_CSUM;
  2691. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2692. "%s : checksuming enabled", __func__);
  2693. if (high_dma) {
  2694. ndev->features |= NETIF_F_HIGHDMA;
  2695. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2696. "%s : using High DMA", __func__);
  2697. }
  2698. ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
  2699. if (vdev->config.gro_enable)
  2700. ndev->features |= NETIF_F_GRO;
  2701. if (register_netdev(ndev)) {
  2702. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2703. "%s: %s : device registration failed!",
  2704. ndev->name, __func__);
  2705. ret = -ENODEV;
  2706. goto _out2;
  2707. }
  2708. /* Set the factory defined MAC address initially */
  2709. ndev->addr_len = ETH_ALEN;
  2710. /* Make Link state as off at this point, when the Link change
  2711. * interrupt comes the state will be automatically changed to
  2712. * the right state.
  2713. */
  2714. netif_carrier_off(ndev);
  2715. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2716. "%s: Ethernet device registered",
  2717. ndev->name);
  2718. *vdev_out = vdev;
  2719. /* Resetting the Device stats */
  2720. status = vxge_hw_mrpcim_stats_access(
  2721. hldev,
  2722. VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
  2723. 0,
  2724. 0,
  2725. &stat);
  2726. if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
  2727. vxge_debug_init(
  2728. vxge_hw_device_trace_level_get(hldev),
  2729. "%s: device stats clear returns"
  2730. "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
  2731. vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
  2732. "%s: %s:%d Exiting...",
  2733. ndev->name, __func__, __LINE__);
  2734. return ret;
  2735. _out2:
  2736. kfree(vdev->vpaths);
  2737. _out1:
  2738. free_netdev(ndev);
  2739. _out0:
  2740. return ret;
  2741. }
  2742. /*
  2743. * vxge_device_unregister
  2744. *
  2745. * This function will unregister and free network device
  2746. */
  2747. void
  2748. vxge_device_unregister(struct __vxge_hw_device *hldev)
  2749. {
  2750. struct vxgedev *vdev;
  2751. struct net_device *dev;
  2752. char buf[IFNAMSIZ];
  2753. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  2754. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  2755. u32 level_trace;
  2756. #endif
  2757. dev = hldev->ndev;
  2758. vdev = netdev_priv(dev);
  2759. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  2760. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  2761. level_trace = vdev->level_trace;
  2762. #endif
  2763. vxge_debug_entryexit(level_trace,
  2764. "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
  2765. memcpy(buf, vdev->ndev->name, IFNAMSIZ);
  2766. /* in 2.6 will call stop() if device is up */
  2767. unregister_netdev(dev);
  2768. flush_scheduled_work();
  2769. vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
  2770. vxge_debug_entryexit(level_trace,
  2771. "%s: %s:%d Exiting...", buf, __func__, __LINE__);
  2772. }
  2773. /*
  2774. * vxge_callback_crit_err
  2775. *
  2776. * This function is called by the alarm handler in interrupt context.
  2777. * Driver must analyze it based on the event type.
  2778. */
  2779. static void
  2780. vxge_callback_crit_err(struct __vxge_hw_device *hldev,
  2781. enum vxge_hw_event type, u64 vp_id)
  2782. {
  2783. struct net_device *dev = hldev->ndev;
  2784. struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
  2785. struct vxge_vpath *vpath = NULL;
  2786. int vpath_idx;
  2787. vxge_debug_entryexit(vdev->level_trace,
  2788. "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
  2789. /* Note: This event type should be used for device wide
  2790. * indications only - Serious errors, Slot freeze and critical errors
  2791. */
  2792. vdev->cric_err_event = type;
  2793. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  2794. vpath = &vdev->vpaths[vpath_idx];
  2795. if (vpath->device_id == vp_id)
  2796. break;
  2797. }
  2798. if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
  2799. if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
  2800. vxge_debug_init(VXGE_ERR,
  2801. "%s: Slot is frozen", vdev->ndev->name);
  2802. } else if (type == VXGE_HW_EVENT_SERR) {
  2803. vxge_debug_init(VXGE_ERR,
  2804. "%s: Encountered Serious Error",
  2805. vdev->ndev->name);
  2806. } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
  2807. vxge_debug_init(VXGE_ERR,
  2808. "%s: Encountered Critical Error",
  2809. vdev->ndev->name);
  2810. }
  2811. if ((type == VXGE_HW_EVENT_SERR) ||
  2812. (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
  2813. if (unlikely(vdev->exec_mode))
  2814. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2815. } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
  2816. vxge_hw_device_mask_all(hldev);
  2817. if (unlikely(vdev->exec_mode))
  2818. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2819. } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
  2820. (type == VXGE_HW_EVENT_VPATH_ERR)) {
  2821. if (unlikely(vdev->exec_mode))
  2822. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2823. else {
  2824. /* check if this vpath is already set for reset */
  2825. if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
  2826. /* disable interrupts for this vpath */
  2827. vxge_vpath_intr_disable(vdev, vpath_idx);
  2828. /* stop the queue for this vpath */
  2829. netif_tx_stop_queue(vpath->fifo.txq);
  2830. }
  2831. }
  2832. }
  2833. vxge_debug_entryexit(vdev->level_trace,
  2834. "%s: %s:%d Exiting...",
  2835. vdev->ndev->name, __func__, __LINE__);
  2836. }
  2837. static void verify_bandwidth(void)
  2838. {
  2839. int i, band_width, total = 0, equal_priority = 0;
  2840. /* 1. If user enters 0 for some fifo, give equal priority to all */
  2841. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2842. if (bw_percentage[i] == 0) {
  2843. equal_priority = 1;
  2844. break;
  2845. }
  2846. }
  2847. if (!equal_priority) {
  2848. /* 2. If sum exceeds 100, give equal priority to all */
  2849. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2850. if (bw_percentage[i] == 0xFF)
  2851. break;
  2852. total += bw_percentage[i];
  2853. if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
  2854. equal_priority = 1;
  2855. break;
  2856. }
  2857. }
  2858. }
  2859. if (!equal_priority) {
  2860. /* Is all the bandwidth consumed? */
  2861. if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
  2862. if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
  2863. /* Split rest of bw equally among next VPs*/
  2864. band_width =
  2865. (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
  2866. (VXGE_HW_MAX_VIRTUAL_PATHS - i);
  2867. if (band_width < 2) /* min of 2% */
  2868. equal_priority = 1;
  2869. else {
  2870. for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
  2871. i++)
  2872. bw_percentage[i] =
  2873. band_width;
  2874. }
  2875. }
  2876. } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
  2877. equal_priority = 1;
  2878. }
  2879. if (equal_priority) {
  2880. vxge_debug_init(VXGE_ERR,
  2881. "%s: Assigning equal bandwidth to all the vpaths",
  2882. VXGE_DRIVER_NAME);
  2883. bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
  2884. VXGE_HW_MAX_VIRTUAL_PATHS;
  2885. for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2886. bw_percentage[i] = bw_percentage[0];
  2887. }
  2888. }
  2889. /*
  2890. * Vpath configuration
  2891. */
  2892. static int __devinit vxge_config_vpaths(
  2893. struct vxge_hw_device_config *device_config,
  2894. u64 vpath_mask, struct vxge_config *config_param)
  2895. {
  2896. int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
  2897. u32 txdl_size, txdl_per_memblock;
  2898. temp = driver_config->vpath_per_dev;
  2899. if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
  2900. (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
  2901. /* No more CPU. Return vpath number as zero.*/
  2902. if (driver_config->g_no_cpus == -1)
  2903. return 0;
  2904. if (!driver_config->g_no_cpus)
  2905. driver_config->g_no_cpus = num_online_cpus();
  2906. driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
  2907. if (!driver_config->vpath_per_dev)
  2908. driver_config->vpath_per_dev = 1;
  2909. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2910. if (!vxge_bVALn(vpath_mask, i, 1))
  2911. continue;
  2912. else
  2913. default_no_vpath++;
  2914. if (default_no_vpath < driver_config->vpath_per_dev)
  2915. driver_config->vpath_per_dev = default_no_vpath;
  2916. driver_config->g_no_cpus = driver_config->g_no_cpus -
  2917. (driver_config->vpath_per_dev * 2);
  2918. if (driver_config->g_no_cpus <= 0)
  2919. driver_config->g_no_cpus = -1;
  2920. }
  2921. if (driver_config->vpath_per_dev == 1) {
  2922. vxge_debug_ll_config(VXGE_TRACE,
  2923. "%s: Disable tx and rx steering, "
  2924. "as single vpath is configured", VXGE_DRIVER_NAME);
  2925. config_param->rth_steering = NO_STEERING;
  2926. config_param->tx_steering_type = NO_STEERING;
  2927. device_config->rth_en = 0;
  2928. }
  2929. /* configure bandwidth */
  2930. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2931. device_config->vp_config[i].min_bandwidth = bw_percentage[i];
  2932. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2933. device_config->vp_config[i].vp_id = i;
  2934. device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
  2935. if (no_of_vpaths < driver_config->vpath_per_dev) {
  2936. if (!vxge_bVALn(vpath_mask, i, 1)) {
  2937. vxge_debug_ll_config(VXGE_TRACE,
  2938. "%s: vpath: %d is not available",
  2939. VXGE_DRIVER_NAME, i);
  2940. continue;
  2941. } else {
  2942. vxge_debug_ll_config(VXGE_TRACE,
  2943. "%s: vpath: %d available",
  2944. VXGE_DRIVER_NAME, i);
  2945. no_of_vpaths++;
  2946. }
  2947. } else {
  2948. vxge_debug_ll_config(VXGE_TRACE,
  2949. "%s: vpath: %d is not configured, "
  2950. "max_config_vpath exceeded",
  2951. VXGE_DRIVER_NAME, i);
  2952. break;
  2953. }
  2954. /* Configure Tx fifo's */
  2955. device_config->vp_config[i].fifo.enable =
  2956. VXGE_HW_FIFO_ENABLE;
  2957. device_config->vp_config[i].fifo.max_frags =
  2958. MAX_SKB_FRAGS + 1;
  2959. device_config->vp_config[i].fifo.memblock_size =
  2960. VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
  2961. txdl_size = device_config->vp_config[i].fifo.max_frags *
  2962. sizeof(struct vxge_hw_fifo_txd);
  2963. txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
  2964. device_config->vp_config[i].fifo.fifo_blocks =
  2965. ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
  2966. device_config->vp_config[i].fifo.intr =
  2967. VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
  2968. /* Configure tti properties */
  2969. device_config->vp_config[i].tti.intr_enable =
  2970. VXGE_HW_TIM_INTR_ENABLE;
  2971. device_config->vp_config[i].tti.btimer_val =
  2972. (VXGE_TTI_BTIMER_VAL * 1000) / 272;
  2973. device_config->vp_config[i].tti.timer_ac_en =
  2974. VXGE_HW_TIM_TIMER_AC_ENABLE;
  2975. /* For msi-x with napi (each vector
  2976. has a handler of its own) -
  2977. Set CI to OFF for all vpaths */
  2978. device_config->vp_config[i].tti.timer_ci_en =
  2979. VXGE_HW_TIM_TIMER_CI_DISABLE;
  2980. device_config->vp_config[i].tti.timer_ri_en =
  2981. VXGE_HW_TIM_TIMER_RI_DISABLE;
  2982. device_config->vp_config[i].tti.util_sel =
  2983. VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
  2984. device_config->vp_config[i].tti.ltimer_val =
  2985. (VXGE_TTI_LTIMER_VAL * 1000) / 272;
  2986. device_config->vp_config[i].tti.rtimer_val =
  2987. (VXGE_TTI_RTIMER_VAL * 1000) / 272;
  2988. device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
  2989. device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
  2990. device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
  2991. device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
  2992. device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
  2993. device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
  2994. device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
  2995. /* Configure Rx rings */
  2996. device_config->vp_config[i].ring.enable =
  2997. VXGE_HW_RING_ENABLE;
  2998. device_config->vp_config[i].ring.ring_blocks =
  2999. VXGE_HW_DEF_RING_BLOCKS;
  3000. device_config->vp_config[i].ring.buffer_mode =
  3001. VXGE_HW_RING_RXD_BUFFER_MODE_1;
  3002. device_config->vp_config[i].ring.rxds_limit =
  3003. VXGE_HW_DEF_RING_RXDS_LIMIT;
  3004. device_config->vp_config[i].ring.scatter_mode =
  3005. VXGE_HW_RING_SCATTER_MODE_A;
  3006. /* Configure rti properties */
  3007. device_config->vp_config[i].rti.intr_enable =
  3008. VXGE_HW_TIM_INTR_ENABLE;
  3009. device_config->vp_config[i].rti.btimer_val =
  3010. (VXGE_RTI_BTIMER_VAL * 1000)/272;
  3011. device_config->vp_config[i].rti.timer_ac_en =
  3012. VXGE_HW_TIM_TIMER_AC_ENABLE;
  3013. device_config->vp_config[i].rti.timer_ci_en =
  3014. VXGE_HW_TIM_TIMER_CI_DISABLE;
  3015. device_config->vp_config[i].rti.timer_ri_en =
  3016. VXGE_HW_TIM_TIMER_RI_DISABLE;
  3017. device_config->vp_config[i].rti.util_sel =
  3018. VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
  3019. device_config->vp_config[i].rti.urange_a =
  3020. RTI_RX_URANGE_A;
  3021. device_config->vp_config[i].rti.urange_b =
  3022. RTI_RX_URANGE_B;
  3023. device_config->vp_config[i].rti.urange_c =
  3024. RTI_RX_URANGE_C;
  3025. device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
  3026. device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
  3027. device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
  3028. device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
  3029. device_config->vp_config[i].rti.rtimer_val =
  3030. (VXGE_RTI_RTIMER_VAL * 1000) / 272;
  3031. device_config->vp_config[i].rti.ltimer_val =
  3032. (VXGE_RTI_LTIMER_VAL * 1000) / 272;
  3033. device_config->vp_config[i].rpa_strip_vlan_tag =
  3034. vlan_tag_strip;
  3035. }
  3036. driver_config->vpath_per_dev = temp;
  3037. return no_of_vpaths;
  3038. }
  3039. /* initialize device configuratrions */
  3040. static void __devinit vxge_device_config_init(
  3041. struct vxge_hw_device_config *device_config,
  3042. int *intr_type)
  3043. {
  3044. /* Used for CQRQ/SRQ. */
  3045. device_config->dma_blockpool_initial =
  3046. VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
  3047. device_config->dma_blockpool_max =
  3048. VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
  3049. if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
  3050. max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
  3051. #ifndef CONFIG_PCI_MSI
  3052. vxge_debug_init(VXGE_ERR,
  3053. "%s: This Kernel does not support "
  3054. "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
  3055. *intr_type = INTA;
  3056. #endif
  3057. /* Configure whether MSI-X or IRQL. */
  3058. switch (*intr_type) {
  3059. case INTA:
  3060. device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  3061. break;
  3062. case MSI_X:
  3063. device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
  3064. break;
  3065. }
  3066. /* Timer period between device poll */
  3067. device_config->device_poll_millis = VXGE_TIMER_DELAY;
  3068. /* Configure mac based steering. */
  3069. device_config->rts_mac_en = addr_learn_en;
  3070. /* Configure Vpaths */
  3071. device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
  3072. vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
  3073. __func__);
  3074. vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
  3075. device_config->dma_blockpool_initial);
  3076. vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
  3077. device_config->dma_blockpool_max);
  3078. vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
  3079. device_config->intr_mode);
  3080. vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
  3081. device_config->device_poll_millis);
  3082. vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
  3083. device_config->rts_mac_en);
  3084. vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
  3085. device_config->rth_en);
  3086. vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
  3087. device_config->rth_it_type);
  3088. }
  3089. static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
  3090. {
  3091. int i;
  3092. vxge_debug_init(VXGE_TRACE,
  3093. "%s: %d Vpath(s) opened",
  3094. vdev->ndev->name, vdev->no_of_vpath);
  3095. switch (vdev->config.intr_type) {
  3096. case INTA:
  3097. vxge_debug_init(VXGE_TRACE,
  3098. "%s: Interrupt type INTA", vdev->ndev->name);
  3099. break;
  3100. case MSI_X:
  3101. vxge_debug_init(VXGE_TRACE,
  3102. "%s: Interrupt type MSI-X", vdev->ndev->name);
  3103. break;
  3104. }
  3105. if (vdev->config.rth_steering) {
  3106. vxge_debug_init(VXGE_TRACE,
  3107. "%s: RTH steering enabled for TCP_IPV4",
  3108. vdev->ndev->name);
  3109. } else {
  3110. vxge_debug_init(VXGE_TRACE,
  3111. "%s: RTH steering disabled", vdev->ndev->name);
  3112. }
  3113. switch (vdev->config.tx_steering_type) {
  3114. case NO_STEERING:
  3115. vxge_debug_init(VXGE_TRACE,
  3116. "%s: Tx steering disabled", vdev->ndev->name);
  3117. break;
  3118. case TX_PRIORITY_STEERING:
  3119. vxge_debug_init(VXGE_TRACE,
  3120. "%s: Unsupported tx steering option",
  3121. vdev->ndev->name);
  3122. vxge_debug_init(VXGE_TRACE,
  3123. "%s: Tx steering disabled", vdev->ndev->name);
  3124. vdev->config.tx_steering_type = 0;
  3125. break;
  3126. case TX_VLAN_STEERING:
  3127. vxge_debug_init(VXGE_TRACE,
  3128. "%s: Unsupported tx steering option",
  3129. vdev->ndev->name);
  3130. vxge_debug_init(VXGE_TRACE,
  3131. "%s: Tx steering disabled", vdev->ndev->name);
  3132. vdev->config.tx_steering_type = 0;
  3133. break;
  3134. case TX_MULTIQ_STEERING:
  3135. vxge_debug_init(VXGE_TRACE,
  3136. "%s: Tx multiqueue steering enabled",
  3137. vdev->ndev->name);
  3138. break;
  3139. case TX_PORT_STEERING:
  3140. vxge_debug_init(VXGE_TRACE,
  3141. "%s: Tx port steering enabled",
  3142. vdev->ndev->name);
  3143. break;
  3144. default:
  3145. vxge_debug_init(VXGE_ERR,
  3146. "%s: Unsupported tx steering type",
  3147. vdev->ndev->name);
  3148. vxge_debug_init(VXGE_TRACE,
  3149. "%s: Tx steering disabled", vdev->ndev->name);
  3150. vdev->config.tx_steering_type = 0;
  3151. }
  3152. if (vdev->config.gro_enable) {
  3153. vxge_debug_init(VXGE_ERR,
  3154. "%s: Generic receive offload enabled",
  3155. vdev->ndev->name);
  3156. } else
  3157. vxge_debug_init(VXGE_TRACE,
  3158. "%s: Generic receive offload disabled",
  3159. vdev->ndev->name);
  3160. if (vdev->config.addr_learn_en)
  3161. vxge_debug_init(VXGE_TRACE,
  3162. "%s: MAC Address learning enabled", vdev->ndev->name);
  3163. vxge_debug_init(VXGE_TRACE,
  3164. "%s: Rx doorbell mode enabled", vdev->ndev->name);
  3165. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3166. if (!vxge_bVALn(vpath_mask, i, 1))
  3167. continue;
  3168. vxge_debug_ll_config(VXGE_TRACE,
  3169. "%s: MTU size - %d", vdev->ndev->name,
  3170. ((struct __vxge_hw_device *)(vdev->devh))->
  3171. config.vp_config[i].mtu);
  3172. vxge_debug_init(VXGE_TRACE,
  3173. "%s: VLAN tag stripping %s", vdev->ndev->name,
  3174. ((struct __vxge_hw_device *)(vdev->devh))->
  3175. config.vp_config[i].rpa_strip_vlan_tag
  3176. ? "Enabled" : "Disabled");
  3177. vxge_debug_init(VXGE_TRACE,
  3178. "%s: Ring blocks : %d", vdev->ndev->name,
  3179. ((struct __vxge_hw_device *)(vdev->devh))->
  3180. config.vp_config[i].ring.ring_blocks);
  3181. vxge_debug_init(VXGE_TRACE,
  3182. "%s: Fifo blocks : %d", vdev->ndev->name,
  3183. ((struct __vxge_hw_device *)(vdev->devh))->
  3184. config.vp_config[i].fifo.fifo_blocks);
  3185. vxge_debug_ll_config(VXGE_TRACE,
  3186. "%s: Max frags : %d", vdev->ndev->name,
  3187. ((struct __vxge_hw_device *)(vdev->devh))->
  3188. config.vp_config[i].fifo.max_frags);
  3189. break;
  3190. }
  3191. }
  3192. #ifdef CONFIG_PM
  3193. /**
  3194. * vxge_pm_suspend - vxge power management suspend entry point
  3195. *
  3196. */
  3197. static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
  3198. {
  3199. return -ENOSYS;
  3200. }
  3201. /**
  3202. * vxge_pm_resume - vxge power management resume entry point
  3203. *
  3204. */
  3205. static int vxge_pm_resume(struct pci_dev *pdev)
  3206. {
  3207. return -ENOSYS;
  3208. }
  3209. #endif
  3210. /**
  3211. * vxge_io_error_detected - called when PCI error is detected
  3212. * @pdev: Pointer to PCI device
  3213. * @state: The current pci connection state
  3214. *
  3215. * This function is called after a PCI bus error affecting
  3216. * this device has been detected.
  3217. */
  3218. static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
  3219. pci_channel_state_t state)
  3220. {
  3221. struct __vxge_hw_device *hldev =
  3222. (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3223. struct net_device *netdev = hldev->ndev;
  3224. netif_device_detach(netdev);
  3225. if (state == pci_channel_io_perm_failure)
  3226. return PCI_ERS_RESULT_DISCONNECT;
  3227. if (netif_running(netdev)) {
  3228. /* Bring down the card, while avoiding PCI I/O */
  3229. do_vxge_close(netdev, 0);
  3230. }
  3231. pci_disable_device(pdev);
  3232. return PCI_ERS_RESULT_NEED_RESET;
  3233. }
  3234. /**
  3235. * vxge_io_slot_reset - called after the pci bus has been reset.
  3236. * @pdev: Pointer to PCI device
  3237. *
  3238. * Restart the card from scratch, as if from a cold-boot.
  3239. * At this point, the card has exprienced a hard reset,
  3240. * followed by fixups by BIOS, and has its config space
  3241. * set up identically to what it was at cold boot.
  3242. */
  3243. static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
  3244. {
  3245. struct __vxge_hw_device *hldev =
  3246. (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3247. struct net_device *netdev = hldev->ndev;
  3248. struct vxgedev *vdev = netdev_priv(netdev);
  3249. if (pci_enable_device(pdev)) {
  3250. netdev_err(netdev, "Cannot re-enable device after reset\n");
  3251. return PCI_ERS_RESULT_DISCONNECT;
  3252. }
  3253. pci_set_master(pdev);
  3254. vxge_reset(vdev);
  3255. return PCI_ERS_RESULT_RECOVERED;
  3256. }
  3257. /**
  3258. * vxge_io_resume - called when traffic can start flowing again.
  3259. * @pdev: Pointer to PCI device
  3260. *
  3261. * This callback is called when the error recovery driver tells
  3262. * us that its OK to resume normal operation.
  3263. */
  3264. static void vxge_io_resume(struct pci_dev *pdev)
  3265. {
  3266. struct __vxge_hw_device *hldev =
  3267. (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3268. struct net_device *netdev = hldev->ndev;
  3269. if (netif_running(netdev)) {
  3270. if (vxge_open(netdev)) {
  3271. netdev_err(netdev,
  3272. "Can't bring device back up after reset\n");
  3273. return;
  3274. }
  3275. }
  3276. netif_device_attach(netdev);
  3277. }
  3278. static inline u32 vxge_get_num_vfs(u64 function_mode)
  3279. {
  3280. u32 num_functions = 0;
  3281. switch (function_mode) {
  3282. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
  3283. case VXGE_HW_FUNCTION_MODE_SRIOV_8:
  3284. num_functions = 8;
  3285. break;
  3286. case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
  3287. num_functions = 1;
  3288. break;
  3289. case VXGE_HW_FUNCTION_MODE_SRIOV:
  3290. case VXGE_HW_FUNCTION_MODE_MRIOV:
  3291. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
  3292. num_functions = 17;
  3293. break;
  3294. case VXGE_HW_FUNCTION_MODE_SRIOV_4:
  3295. num_functions = 4;
  3296. break;
  3297. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
  3298. num_functions = 2;
  3299. break;
  3300. case VXGE_HW_FUNCTION_MODE_MRIOV_8:
  3301. num_functions = 8; /* TODO */
  3302. break;
  3303. }
  3304. return num_functions;
  3305. }
  3306. /**
  3307. * vxge_probe
  3308. * @pdev : structure containing the PCI related information of the device.
  3309. * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
  3310. * Description:
  3311. * This function is called when a new PCI device gets detected and initializes
  3312. * it.
  3313. * Return value:
  3314. * returns 0 on success and negative on failure.
  3315. *
  3316. */
  3317. static int __devinit
  3318. vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
  3319. {
  3320. struct __vxge_hw_device *hldev;
  3321. enum vxge_hw_status status;
  3322. int ret;
  3323. int high_dma = 0;
  3324. u64 vpath_mask = 0;
  3325. struct vxgedev *vdev;
  3326. struct vxge_config *ll_config = NULL;
  3327. struct vxge_hw_device_config *device_config = NULL;
  3328. struct vxge_hw_device_attr attr;
  3329. int i, j, no_of_vpath = 0, max_vpath_supported = 0;
  3330. u8 *macaddr;
  3331. struct vxge_mac_addrs *entry;
  3332. static int bus = -1, device = -1;
  3333. u32 host_type;
  3334. u8 new_device = 0;
  3335. enum vxge_hw_status is_privileged;
  3336. u32 function_mode;
  3337. u32 num_vfs = 0;
  3338. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  3339. attr.pdev = pdev;
  3340. /* In SRIOV-17 mode, functions of the same adapter
  3341. * can be deployed on different buses */
  3342. if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
  3343. (device != PCI_SLOT(pdev->devfn))))
  3344. new_device = 1;
  3345. bus = pdev->bus->number;
  3346. device = PCI_SLOT(pdev->devfn);
  3347. if (new_device) {
  3348. if (driver_config->config_dev_cnt &&
  3349. (driver_config->config_dev_cnt !=
  3350. driver_config->total_dev_cnt))
  3351. vxge_debug_init(VXGE_ERR,
  3352. "%s: Configured %d of %d devices",
  3353. VXGE_DRIVER_NAME,
  3354. driver_config->config_dev_cnt,
  3355. driver_config->total_dev_cnt);
  3356. driver_config->config_dev_cnt = 0;
  3357. driver_config->total_dev_cnt = 0;
  3358. }
  3359. /* Now making the CPU based no of vpath calculation
  3360. * applicable for individual functions as well.
  3361. */
  3362. driver_config->g_no_cpus = 0;
  3363. driver_config->vpath_per_dev = max_config_vpath;
  3364. driver_config->total_dev_cnt++;
  3365. if (++driver_config->config_dev_cnt > max_config_dev) {
  3366. ret = 0;
  3367. goto _exit0;
  3368. }
  3369. device_config = kzalloc(sizeof(struct vxge_hw_device_config),
  3370. GFP_KERNEL);
  3371. if (!device_config) {
  3372. ret = -ENOMEM;
  3373. vxge_debug_init(VXGE_ERR,
  3374. "device_config : malloc failed %s %d",
  3375. __FILE__, __LINE__);
  3376. goto _exit0;
  3377. }
  3378. ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
  3379. if (!ll_config) {
  3380. ret = -ENOMEM;
  3381. vxge_debug_init(VXGE_ERR,
  3382. "ll_config : malloc failed %s %d",
  3383. __FILE__, __LINE__);
  3384. goto _exit0;
  3385. }
  3386. ll_config->tx_steering_type = TX_MULTIQ_STEERING;
  3387. ll_config->intr_type = MSI_X;
  3388. ll_config->napi_weight = NEW_NAPI_WEIGHT;
  3389. ll_config->rth_steering = RTH_STEERING;
  3390. /* get the default configuration parameters */
  3391. vxge_hw_device_config_default_get(device_config);
  3392. /* initialize configuration parameters */
  3393. vxge_device_config_init(device_config, &ll_config->intr_type);
  3394. ret = pci_enable_device(pdev);
  3395. if (ret) {
  3396. vxge_debug_init(VXGE_ERR,
  3397. "%s : can not enable PCI device", __func__);
  3398. goto _exit0;
  3399. }
  3400. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3401. vxge_debug_ll_config(VXGE_TRACE,
  3402. "%s : using 64bit DMA", __func__);
  3403. high_dma = 1;
  3404. if (pci_set_consistent_dma_mask(pdev,
  3405. DMA_BIT_MASK(64))) {
  3406. vxge_debug_init(VXGE_ERR,
  3407. "%s : unable to obtain 64bit DMA for "
  3408. "consistent allocations", __func__);
  3409. ret = -ENOMEM;
  3410. goto _exit1;
  3411. }
  3412. } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  3413. vxge_debug_ll_config(VXGE_TRACE,
  3414. "%s : using 32bit DMA", __func__);
  3415. } else {
  3416. ret = -ENOMEM;
  3417. goto _exit1;
  3418. }
  3419. if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
  3420. vxge_debug_init(VXGE_ERR,
  3421. "%s : request regions failed", __func__);
  3422. ret = -ENODEV;
  3423. goto _exit1;
  3424. }
  3425. pci_set_master(pdev);
  3426. attr.bar0 = pci_ioremap_bar(pdev, 0);
  3427. if (!attr.bar0) {
  3428. vxge_debug_init(VXGE_ERR,
  3429. "%s : cannot remap io memory bar0", __func__);
  3430. ret = -ENODEV;
  3431. goto _exit2;
  3432. }
  3433. vxge_debug_ll_config(VXGE_TRACE,
  3434. "pci ioremap bar0: %p:0x%llx",
  3435. attr.bar0,
  3436. (unsigned long long)pci_resource_start(pdev, 0));
  3437. status = vxge_hw_device_hw_info_get(attr.bar0,
  3438. &ll_config->device_hw_info);
  3439. if (status != VXGE_HW_OK) {
  3440. vxge_debug_init(VXGE_ERR,
  3441. "%s: Reading of hardware info failed."
  3442. "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
  3443. ret = -EINVAL;
  3444. goto _exit3;
  3445. }
  3446. if (ll_config->device_hw_info.fw_version.major !=
  3447. VXGE_DRIVER_FW_VERSION_MAJOR) {
  3448. vxge_debug_init(VXGE_ERR,
  3449. "%s: Incorrect firmware version."
  3450. "Please upgrade the firmware to version 1.x.x",
  3451. VXGE_DRIVER_NAME);
  3452. ret = -EINVAL;
  3453. goto _exit3;
  3454. }
  3455. vpath_mask = ll_config->device_hw_info.vpath_mask;
  3456. if (vpath_mask == 0) {
  3457. vxge_debug_ll_config(VXGE_TRACE,
  3458. "%s: No vpaths available in device", VXGE_DRIVER_NAME);
  3459. ret = -EINVAL;
  3460. goto _exit3;
  3461. }
  3462. vxge_debug_ll_config(VXGE_TRACE,
  3463. "%s:%d Vpath mask = %llx", __func__, __LINE__,
  3464. (unsigned long long)vpath_mask);
  3465. function_mode = ll_config->device_hw_info.function_mode;
  3466. host_type = ll_config->device_hw_info.host_type;
  3467. is_privileged = __vxge_hw_device_is_privilaged(host_type,
  3468. ll_config->device_hw_info.func_id);
  3469. /* Check how many vpaths are available */
  3470. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3471. if (!((vpath_mask) & vxge_mBIT(i)))
  3472. continue;
  3473. max_vpath_supported++;
  3474. }
  3475. if (new_device)
  3476. num_vfs = vxge_get_num_vfs(function_mode) - 1;
  3477. /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
  3478. if (is_sriov(function_mode) && (max_config_dev > 1) &&
  3479. (ll_config->intr_type != INTA) &&
  3480. (is_privileged == VXGE_HW_OK)) {
  3481. ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
  3482. ? (max_config_dev - 1) : num_vfs);
  3483. if (ret)
  3484. vxge_debug_ll_config(VXGE_ERR,
  3485. "Failed in enabling SRIOV mode: %d\n", ret);
  3486. }
  3487. /*
  3488. * Configure vpaths and get driver configured number of vpaths
  3489. * which is less than or equal to the maximum vpaths per function.
  3490. */
  3491. no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
  3492. if (!no_of_vpath) {
  3493. vxge_debug_ll_config(VXGE_ERR,
  3494. "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
  3495. ret = 0;
  3496. goto _exit3;
  3497. }
  3498. /* Setting driver callbacks */
  3499. attr.uld_callbacks.link_up = vxge_callback_link_up;
  3500. attr.uld_callbacks.link_down = vxge_callback_link_down;
  3501. attr.uld_callbacks.crit_err = vxge_callback_crit_err;
  3502. status = vxge_hw_device_initialize(&hldev, &attr, device_config);
  3503. if (status != VXGE_HW_OK) {
  3504. vxge_debug_init(VXGE_ERR,
  3505. "Failed to initialize device (%d)", status);
  3506. ret = -EINVAL;
  3507. goto _exit3;
  3508. }
  3509. /* if FCS stripping is not disabled in MAC fail driver load */
  3510. if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
  3511. vxge_debug_init(VXGE_ERR,
  3512. "%s: FCS stripping is not disabled in MAC"
  3513. " failing driver load", VXGE_DRIVER_NAME);
  3514. ret = -EINVAL;
  3515. goto _exit4;
  3516. }
  3517. vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
  3518. /* set private device info */
  3519. pci_set_drvdata(pdev, hldev);
  3520. ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
  3521. ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
  3522. ll_config->addr_learn_en = addr_learn_en;
  3523. ll_config->rth_algorithm = RTH_ALG_JENKINS;
  3524. ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
  3525. ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
  3526. ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
  3527. ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
  3528. ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
  3529. ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
  3530. ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
  3531. ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
  3532. ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
  3533. if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
  3534. &vdev)) {
  3535. ret = -EINVAL;
  3536. goto _exit4;
  3537. }
  3538. vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
  3539. VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
  3540. vxge_hw_device_trace_level_get(hldev));
  3541. /* set private HW device info */
  3542. hldev->ndev = vdev->ndev;
  3543. vdev->mtu = VXGE_HW_DEFAULT_MTU;
  3544. vdev->bar0 = attr.bar0;
  3545. vdev->max_vpath_supported = max_vpath_supported;
  3546. vdev->no_of_vpath = no_of_vpath;
  3547. /* Virtual Path count */
  3548. for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3549. if (!vxge_bVALn(vpath_mask, i, 1))
  3550. continue;
  3551. if (j >= vdev->no_of_vpath)
  3552. break;
  3553. vdev->vpaths[j].is_configured = 1;
  3554. vdev->vpaths[j].device_id = i;
  3555. vdev->vpaths[j].ring.driver_id = j;
  3556. vdev->vpaths[j].vdev = vdev;
  3557. vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
  3558. memcpy((u8 *)vdev->vpaths[j].macaddr,
  3559. ll_config->device_hw_info.mac_addrs[i],
  3560. ETH_ALEN);
  3561. /* Initialize the mac address list header */
  3562. INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
  3563. vdev->vpaths[j].mac_addr_cnt = 0;
  3564. vdev->vpaths[j].mcast_addr_cnt = 0;
  3565. j++;
  3566. }
  3567. vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
  3568. vdev->max_config_port = max_config_port;
  3569. vdev->vlan_tag_strip = vlan_tag_strip;
  3570. /* map the hashing selector table to the configured vpaths */
  3571. for (i = 0; i < vdev->no_of_vpath; i++)
  3572. vdev->vpath_selector[i] = vpath_selector[i];
  3573. macaddr = (u8 *)vdev->vpaths[0].macaddr;
  3574. ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
  3575. ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
  3576. ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
  3577. vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
  3578. vdev->ndev->name, ll_config->device_hw_info.serial_number);
  3579. vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
  3580. vdev->ndev->name, ll_config->device_hw_info.part_number);
  3581. vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
  3582. vdev->ndev->name, ll_config->device_hw_info.product_desc);
  3583. vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
  3584. vdev->ndev->name, macaddr);
  3585. vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
  3586. vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
  3587. vxge_debug_init(VXGE_TRACE,
  3588. "%s: Firmware version : %s Date : %s", vdev->ndev->name,
  3589. ll_config->device_hw_info.fw_version.version,
  3590. ll_config->device_hw_info.fw_date.date);
  3591. if (new_device) {
  3592. switch (ll_config->device_hw_info.function_mode) {
  3593. case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
  3594. vxge_debug_init(VXGE_TRACE,
  3595. "%s: Single Function Mode Enabled", vdev->ndev->name);
  3596. break;
  3597. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
  3598. vxge_debug_init(VXGE_TRACE,
  3599. "%s: Multi Function Mode Enabled", vdev->ndev->name);
  3600. break;
  3601. case VXGE_HW_FUNCTION_MODE_SRIOV:
  3602. vxge_debug_init(VXGE_TRACE,
  3603. "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
  3604. break;
  3605. case VXGE_HW_FUNCTION_MODE_MRIOV:
  3606. vxge_debug_init(VXGE_TRACE,
  3607. "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
  3608. break;
  3609. }
  3610. }
  3611. vxge_print_parm(vdev, vpath_mask);
  3612. /* Store the fw version for ethttool option */
  3613. strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
  3614. memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
  3615. memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
  3616. /* Copy the station mac address to the list */
  3617. for (i = 0; i < vdev->no_of_vpath; i++) {
  3618. entry = (struct vxge_mac_addrs *)
  3619. kzalloc(sizeof(struct vxge_mac_addrs),
  3620. GFP_KERNEL);
  3621. if (NULL == entry) {
  3622. vxge_debug_init(VXGE_ERR,
  3623. "%s: mac_addr_list : memory allocation failed",
  3624. vdev->ndev->name);
  3625. ret = -EPERM;
  3626. goto _exit5;
  3627. }
  3628. macaddr = (u8 *)&entry->macaddr;
  3629. memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
  3630. list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
  3631. vdev->vpaths[i].mac_addr_cnt = 1;
  3632. }
  3633. kfree(device_config);
  3634. /*
  3635. * INTA is shared in multi-function mode. This is unlike the INTA
  3636. * implementation in MR mode, where each VH has its own INTA message.
  3637. * - INTA is masked (disabled) as long as at least one function sets
  3638. * its TITAN_MASK_ALL_INT.ALARM bit.
  3639. * - INTA is unmasked (enabled) when all enabled functions have cleared
  3640. * their own TITAN_MASK_ALL_INT.ALARM bit.
  3641. * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
  3642. * Though this driver leaves the top level interrupts unmasked while
  3643. * leaving the required module interrupt bits masked on exit, there
  3644. * could be a rougue driver around that does not follow this procedure
  3645. * resulting in a failure to generate interrupts. The following code is
  3646. * present to prevent such a failure.
  3647. */
  3648. if (ll_config->device_hw_info.function_mode ==
  3649. VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
  3650. if (vdev->config.intr_type == INTA)
  3651. vxge_hw_device_unmask_all(hldev);
  3652. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
  3653. vdev->ndev->name, __func__, __LINE__);
  3654. vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
  3655. VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
  3656. vxge_hw_device_trace_level_get(hldev));
  3657. kfree(ll_config);
  3658. return 0;
  3659. _exit5:
  3660. for (i = 0; i < vdev->no_of_vpath; i++)
  3661. vxge_free_mac_add_list(&vdev->vpaths[i]);
  3662. vxge_device_unregister(hldev);
  3663. _exit4:
  3664. pci_disable_sriov(pdev);
  3665. vxge_hw_device_terminate(hldev);
  3666. _exit3:
  3667. iounmap(attr.bar0);
  3668. _exit2:
  3669. pci_release_regions(pdev);
  3670. _exit1:
  3671. pci_disable_device(pdev);
  3672. _exit0:
  3673. kfree(ll_config);
  3674. kfree(device_config);
  3675. driver_config->config_dev_cnt--;
  3676. pci_set_drvdata(pdev, NULL);
  3677. return ret;
  3678. }
  3679. /**
  3680. * vxge_rem_nic - Free the PCI device
  3681. * @pdev: structure containing the PCI related information of the device.
  3682. * Description: This function is called by the Pci subsystem to release a
  3683. * PCI device and free up all resource held up by the device.
  3684. */
  3685. static void __devexit
  3686. vxge_remove(struct pci_dev *pdev)
  3687. {
  3688. struct __vxge_hw_device *hldev;
  3689. struct vxgedev *vdev = NULL;
  3690. struct net_device *dev;
  3691. int i = 0;
  3692. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  3693. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  3694. u32 level_trace;
  3695. #endif
  3696. hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3697. if (hldev == NULL)
  3698. return;
  3699. dev = hldev->ndev;
  3700. vdev = netdev_priv(dev);
  3701. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  3702. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  3703. level_trace = vdev->level_trace;
  3704. #endif
  3705. vxge_debug_entryexit(level_trace,
  3706. "%s:%d", __func__, __LINE__);
  3707. vxge_debug_init(level_trace,
  3708. "%s : removing PCI device...", __func__);
  3709. vxge_device_unregister(hldev);
  3710. for (i = 0; i < vdev->no_of_vpath; i++) {
  3711. vxge_free_mac_add_list(&vdev->vpaths[i]);
  3712. vdev->vpaths[i].mcast_addr_cnt = 0;
  3713. vdev->vpaths[i].mac_addr_cnt = 0;
  3714. }
  3715. kfree(vdev->vpaths);
  3716. iounmap(vdev->bar0);
  3717. pci_disable_sriov(pdev);
  3718. /* we are safe to free it now */
  3719. free_netdev(dev);
  3720. vxge_debug_init(level_trace,
  3721. "%s:%d Device unregistered", __func__, __LINE__);
  3722. vxge_hw_device_terminate(hldev);
  3723. pci_disable_device(pdev);
  3724. pci_release_regions(pdev);
  3725. pci_set_drvdata(pdev, NULL);
  3726. vxge_debug_entryexit(level_trace,
  3727. "%s:%d Exiting...", __func__, __LINE__);
  3728. }
  3729. static struct pci_error_handlers vxge_err_handler = {
  3730. .error_detected = vxge_io_error_detected,
  3731. .slot_reset = vxge_io_slot_reset,
  3732. .resume = vxge_io_resume,
  3733. };
  3734. static struct pci_driver vxge_driver = {
  3735. .name = VXGE_DRIVER_NAME,
  3736. .id_table = vxge_id_table,
  3737. .probe = vxge_probe,
  3738. .remove = __devexit_p(vxge_remove),
  3739. #ifdef CONFIG_PM
  3740. .suspend = vxge_pm_suspend,
  3741. .resume = vxge_pm_resume,
  3742. #endif
  3743. .err_handler = &vxge_err_handler,
  3744. };
  3745. static int __init
  3746. vxge_starter(void)
  3747. {
  3748. int ret = 0;
  3749. pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
  3750. pr_info("Driver version: %s\n", DRV_VERSION);
  3751. verify_bandwidth();
  3752. driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
  3753. if (!driver_config)
  3754. return -ENOMEM;
  3755. ret = pci_register_driver(&vxge_driver);
  3756. if (driver_config->config_dev_cnt &&
  3757. (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
  3758. vxge_debug_init(VXGE_ERR,
  3759. "%s: Configured %d of %d devices",
  3760. VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
  3761. driver_config->total_dev_cnt);
  3762. if (ret)
  3763. kfree(driver_config);
  3764. return ret;
  3765. }
  3766. static void __exit
  3767. vxge_closer(void)
  3768. {
  3769. pci_unregister_driver(&vxge_driver);
  3770. kfree(driver_config);
  3771. }
  3772. module_init(vxge_starter);
  3773. module_exit(vxge_closer);