vxge-main.c 119 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2009 Neterion Inc.
  13. *
  14. * The module loadable parameters that are supported by the driver and a brief
  15. * explanation of all the variables:
  16. * vlan_tag_strip:
  17. * Strip VLAN Tag enable/disable. Instructs the device to remove
  18. * the VLAN tag from all received tagged frames that are not
  19. * replicated at the internal L2 switch.
  20. * 0 - Do not strip the VLAN tag.
  21. * 1 - Strip the VLAN tag.
  22. *
  23. * addr_learn_en:
  24. * Enable learning the mac address of the guest OS interface in
  25. * a virtualization environment.
  26. * 0 - DISABLE
  27. * 1 - ENABLE
  28. *
  29. * max_config_port:
  30. * Maximum number of port to be supported.
  31. * MIN -1 and MAX - 2
  32. *
  33. * max_config_vpath:
  34. * This configures the maximum no of VPATH configures for each
  35. * device function.
  36. * MIN - 1 and MAX - 17
  37. *
  38. * max_config_dev:
  39. * This configures maximum no of Device function to be enabled.
  40. * MIN - 1 and MAX - 17
  41. *
  42. ******************************************************************************/
  43. #include <linux/if_vlan.h>
  44. #include <linux/pci.h>
  45. #include <linux/slab.h>
  46. #include <linux/tcp.h>
  47. #include <net/ip.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/etherdevice.h>
  50. #include "vxge-main.h"
  51. #include "vxge-reg.h"
  52. MODULE_LICENSE("Dual BSD/GPL");
  53. MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
  54. "Virtualized Server Adapter");
  55. static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
  56. {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
  57. PCI_ANY_ID},
  58. {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
  59. PCI_ANY_ID},
  60. {0}
  61. };
  62. MODULE_DEVICE_TABLE(pci, vxge_id_table);
  63. VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
  64. VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
  65. VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
  66. VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
  67. VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
  68. VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
  69. static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
  70. {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
  71. static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
  72. {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
  73. module_param_array(bw_percentage, uint, NULL, 0);
  74. static struct vxge_drv_config *driver_config;
  75. static inline int is_vxge_card_up(struct vxgedev *vdev)
  76. {
  77. return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  78. }
  79. static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
  80. {
  81. unsigned long flags = 0;
  82. struct sk_buff **skb_ptr = NULL;
  83. struct sk_buff **temp;
  84. #define NR_SKB_COMPLETED 128
  85. struct sk_buff *completed[NR_SKB_COMPLETED];
  86. int more;
  87. do {
  88. more = 0;
  89. skb_ptr = completed;
  90. if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
  91. vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
  92. NR_SKB_COMPLETED, &more);
  93. spin_unlock_irqrestore(&fifo->tx_lock, flags);
  94. }
  95. /* free SKBs */
  96. for (temp = completed; temp != skb_ptr; temp++)
  97. dev_kfree_skb_irq(*temp);
  98. } while (more) ;
  99. }
  100. static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
  101. {
  102. int i;
  103. /* Complete all transmits */
  104. for (i = 0; i < vdev->no_of_vpath; i++)
  105. VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
  106. }
  107. static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
  108. {
  109. int i;
  110. struct vxge_ring *ring;
  111. /* Complete all receives*/
  112. for (i = 0; i < vdev->no_of_vpath; i++) {
  113. ring = &vdev->vpaths[i].ring;
  114. vxge_hw_vpath_poll_rx(ring->handle);
  115. }
  116. }
  117. /*
  118. * MultiQ manipulation helper functions
  119. */
  120. void vxge_stop_all_tx_queue(struct vxgedev *vdev)
  121. {
  122. int i;
  123. struct net_device *dev = vdev->ndev;
  124. if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
  125. for (i = 0; i < vdev->no_of_vpath; i++)
  126. vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
  127. }
  128. netif_tx_stop_all_queues(dev);
  129. }
  130. void vxge_stop_tx_queue(struct vxge_fifo *fifo)
  131. {
  132. struct net_device *dev = fifo->ndev;
  133. struct netdev_queue *txq = NULL;
  134. if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
  135. txq = netdev_get_tx_queue(dev, fifo->driver_id);
  136. else {
  137. txq = netdev_get_tx_queue(dev, 0);
  138. fifo->queue_state = VPATH_QUEUE_STOP;
  139. }
  140. netif_tx_stop_queue(txq);
  141. }
  142. void vxge_start_all_tx_queue(struct vxgedev *vdev)
  143. {
  144. int i;
  145. struct net_device *dev = vdev->ndev;
  146. if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
  147. for (i = 0; i < vdev->no_of_vpath; i++)
  148. vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
  149. }
  150. netif_tx_start_all_queues(dev);
  151. }
  152. static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
  153. {
  154. int i;
  155. struct net_device *dev = vdev->ndev;
  156. if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
  157. for (i = 0; i < vdev->no_of_vpath; i++)
  158. vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
  159. }
  160. netif_tx_wake_all_queues(dev);
  161. }
  162. void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
  163. {
  164. struct net_device *dev = fifo->ndev;
  165. int vpath_no = fifo->driver_id;
  166. struct netdev_queue *txq = NULL;
  167. if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
  168. txq = netdev_get_tx_queue(dev, vpath_no);
  169. if (netif_tx_queue_stopped(txq))
  170. netif_tx_wake_queue(txq);
  171. } else {
  172. txq = netdev_get_tx_queue(dev, 0);
  173. if (fifo->queue_state == VPATH_QUEUE_STOP)
  174. if (netif_tx_queue_stopped(txq)) {
  175. fifo->queue_state = VPATH_QUEUE_START;
  176. netif_tx_wake_queue(txq);
  177. }
  178. }
  179. }
  180. /*
  181. * vxge_callback_link_up
  182. *
  183. * This function is called during interrupt context to notify link up state
  184. * change.
  185. */
  186. void
  187. vxge_callback_link_up(struct __vxge_hw_device *hldev)
  188. {
  189. struct net_device *dev = hldev->ndev;
  190. struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
  191. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  192. vdev->ndev->name, __func__, __LINE__);
  193. printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
  194. vdev->stats.link_up++;
  195. netif_carrier_on(vdev->ndev);
  196. vxge_wake_all_tx_queue(vdev);
  197. vxge_debug_entryexit(VXGE_TRACE,
  198. "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
  199. }
  200. /*
  201. * vxge_callback_link_down
  202. *
  203. * This function is called during interrupt context to notify link down state
  204. * change.
  205. */
  206. void
  207. vxge_callback_link_down(struct __vxge_hw_device *hldev)
  208. {
  209. struct net_device *dev = hldev->ndev;
  210. struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
  211. vxge_debug_entryexit(VXGE_TRACE,
  212. "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
  213. printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
  214. vdev->stats.link_down++;
  215. netif_carrier_off(vdev->ndev);
  216. vxge_stop_all_tx_queue(vdev);
  217. vxge_debug_entryexit(VXGE_TRACE,
  218. "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
  219. }
  220. /*
  221. * vxge_rx_alloc
  222. *
  223. * Allocate SKB.
  224. */
  225. static struct sk_buff*
  226. vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
  227. {
  228. struct net_device *dev;
  229. struct sk_buff *skb;
  230. struct vxge_rx_priv *rx_priv;
  231. dev = ring->ndev;
  232. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  233. ring->ndev->name, __func__, __LINE__);
  234. rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
  235. /* try to allocate skb first. this one may fail */
  236. skb = netdev_alloc_skb(dev, skb_size +
  237. VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  238. if (skb == NULL) {
  239. vxge_debug_mem(VXGE_ERR,
  240. "%s: out of memory to allocate SKB", dev->name);
  241. ring->stats.skb_alloc_fail++;
  242. return NULL;
  243. }
  244. vxge_debug_mem(VXGE_TRACE,
  245. "%s: %s:%d Skb : 0x%p", ring->ndev->name,
  246. __func__, __LINE__, skb);
  247. skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  248. rx_priv->skb = skb;
  249. rx_priv->skb_data = NULL;
  250. rx_priv->data_size = skb_size;
  251. vxge_debug_entryexit(VXGE_TRACE,
  252. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  253. return skb;
  254. }
  255. /*
  256. * vxge_rx_map
  257. */
  258. static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
  259. {
  260. struct vxge_rx_priv *rx_priv;
  261. dma_addr_t dma_addr;
  262. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  263. ring->ndev->name, __func__, __LINE__);
  264. rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
  265. rx_priv->skb_data = rx_priv->skb->data;
  266. dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
  267. rx_priv->data_size, PCI_DMA_FROMDEVICE);
  268. if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
  269. ring->stats.pci_map_fail++;
  270. return -EIO;
  271. }
  272. vxge_debug_mem(VXGE_TRACE,
  273. "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
  274. ring->ndev->name, __func__, __LINE__,
  275. (unsigned long long)dma_addr);
  276. vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
  277. rx_priv->data_dma = dma_addr;
  278. vxge_debug_entryexit(VXGE_TRACE,
  279. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  280. return 0;
  281. }
  282. /*
  283. * vxge_rx_initial_replenish
  284. * Allocation of RxD as an initial replenish procedure.
  285. */
  286. static enum vxge_hw_status
  287. vxge_rx_initial_replenish(void *dtrh, void *userdata)
  288. {
  289. struct vxge_ring *ring = (struct vxge_ring *)userdata;
  290. struct vxge_rx_priv *rx_priv;
  291. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  292. ring->ndev->name, __func__, __LINE__);
  293. if (vxge_rx_alloc(dtrh, ring,
  294. VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
  295. return VXGE_HW_FAIL;
  296. if (vxge_rx_map(dtrh, ring)) {
  297. rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
  298. dev_kfree_skb(rx_priv->skb);
  299. return VXGE_HW_FAIL;
  300. }
  301. vxge_debug_entryexit(VXGE_TRACE,
  302. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  303. return VXGE_HW_OK;
  304. }
  305. static inline void
  306. vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
  307. int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
  308. {
  309. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  310. ring->ndev->name, __func__, __LINE__);
  311. skb_record_rx_queue(skb, ring->driver_id);
  312. skb->protocol = eth_type_trans(skb, ring->ndev);
  313. ring->stats.rx_frms++;
  314. ring->stats.rx_bytes += pkt_length;
  315. if (skb->pkt_type == PACKET_MULTICAST)
  316. ring->stats.rx_mcast++;
  317. vxge_debug_rx(VXGE_TRACE,
  318. "%s: %s:%d skb protocol = %d",
  319. ring->ndev->name, __func__, __LINE__, skb->protocol);
  320. if (ring->gro_enable) {
  321. if (ring->vlgrp && ext_info->vlan &&
  322. (ring->vlan_tag_strip ==
  323. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
  324. vlan_gro_receive(ring->napi_p, ring->vlgrp,
  325. ext_info->vlan, skb);
  326. else
  327. napi_gro_receive(ring->napi_p, skb);
  328. } else {
  329. if (ring->vlgrp && vlan &&
  330. (ring->vlan_tag_strip ==
  331. VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
  332. vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
  333. else
  334. netif_receive_skb(skb);
  335. }
  336. vxge_debug_entryexit(VXGE_TRACE,
  337. "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
  338. }
  339. static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
  340. struct vxge_rx_priv *rx_priv)
  341. {
  342. pci_dma_sync_single_for_device(ring->pdev,
  343. rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
  344. vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
  345. vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
  346. }
  347. static inline void vxge_post(int *dtr_cnt, void **first_dtr,
  348. void *post_dtr, struct __vxge_hw_ring *ringh)
  349. {
  350. int dtr_count = *dtr_cnt;
  351. if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
  352. if (*first_dtr)
  353. vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
  354. *first_dtr = post_dtr;
  355. } else
  356. vxge_hw_ring_rxd_post_post(ringh, post_dtr);
  357. dtr_count++;
  358. *dtr_cnt = dtr_count;
  359. }
  360. /*
  361. * vxge_rx_1b_compl
  362. *
  363. * If the interrupt is because of a received frame or if the receive ring
  364. * contains fresh as yet un-processed frames, this function is called.
  365. */
  366. enum vxge_hw_status
  367. vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
  368. u8 t_code, void *userdata)
  369. {
  370. struct vxge_ring *ring = (struct vxge_ring *)userdata;
  371. struct net_device *dev = ring->ndev;
  372. unsigned int dma_sizes;
  373. void *first_dtr = NULL;
  374. int dtr_cnt = 0;
  375. int data_size;
  376. dma_addr_t data_dma;
  377. int pkt_length;
  378. struct sk_buff *skb;
  379. struct vxge_rx_priv *rx_priv;
  380. struct vxge_hw_ring_rxd_info ext_info;
  381. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  382. ring->ndev->name, __func__, __LINE__);
  383. ring->pkts_processed = 0;
  384. vxge_hw_ring_replenish(ringh);
  385. do {
  386. prefetch((char *)dtr + L1_CACHE_BYTES);
  387. rx_priv = vxge_hw_ring_rxd_private_get(dtr);
  388. skb = rx_priv->skb;
  389. data_size = rx_priv->data_size;
  390. data_dma = rx_priv->data_dma;
  391. prefetch(rx_priv->skb_data);
  392. vxge_debug_rx(VXGE_TRACE,
  393. "%s: %s:%d skb = 0x%p",
  394. ring->ndev->name, __func__, __LINE__, skb);
  395. vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
  396. pkt_length = dma_sizes;
  397. pkt_length -= ETH_FCS_LEN;
  398. vxge_debug_rx(VXGE_TRACE,
  399. "%s: %s:%d Packet Length = %d",
  400. ring->ndev->name, __func__, __LINE__, pkt_length);
  401. vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
  402. /* check skb validity */
  403. vxge_assert(skb);
  404. prefetch((char *)skb + L1_CACHE_BYTES);
  405. if (unlikely(t_code)) {
  406. if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
  407. VXGE_HW_OK) {
  408. ring->stats.rx_errors++;
  409. vxge_debug_rx(VXGE_TRACE,
  410. "%s: %s :%d Rx T_code is %d",
  411. ring->ndev->name, __func__,
  412. __LINE__, t_code);
  413. /* If the t_code is not supported and if the
  414. * t_code is other than 0x5 (unparseable packet
  415. * such as unknown UPV6 header), Drop it !!!
  416. */
  417. vxge_re_pre_post(dtr, ring, rx_priv);
  418. vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
  419. ring->stats.rx_dropped++;
  420. continue;
  421. }
  422. }
  423. if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
  424. if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
  425. if (!vxge_rx_map(dtr, ring)) {
  426. skb_put(skb, pkt_length);
  427. pci_unmap_single(ring->pdev, data_dma,
  428. data_size, PCI_DMA_FROMDEVICE);
  429. vxge_hw_ring_rxd_pre_post(ringh, dtr);
  430. vxge_post(&dtr_cnt, &first_dtr, dtr,
  431. ringh);
  432. } else {
  433. dev_kfree_skb(rx_priv->skb);
  434. rx_priv->skb = skb;
  435. rx_priv->data_size = data_size;
  436. vxge_re_pre_post(dtr, ring, rx_priv);
  437. vxge_post(&dtr_cnt, &first_dtr, dtr,
  438. ringh);
  439. ring->stats.rx_dropped++;
  440. break;
  441. }
  442. } else {
  443. vxge_re_pre_post(dtr, ring, rx_priv);
  444. vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
  445. ring->stats.rx_dropped++;
  446. break;
  447. }
  448. } else {
  449. struct sk_buff *skb_up;
  450. skb_up = netdev_alloc_skb(dev, pkt_length +
  451. VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  452. if (skb_up != NULL) {
  453. skb_reserve(skb_up,
  454. VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
  455. pci_dma_sync_single_for_cpu(ring->pdev,
  456. data_dma, data_size,
  457. PCI_DMA_FROMDEVICE);
  458. vxge_debug_mem(VXGE_TRACE,
  459. "%s: %s:%d skb_up = %p",
  460. ring->ndev->name, __func__,
  461. __LINE__, skb);
  462. memcpy(skb_up->data, skb->data, pkt_length);
  463. vxge_re_pre_post(dtr, ring, rx_priv);
  464. vxge_post(&dtr_cnt, &first_dtr, dtr,
  465. ringh);
  466. /* will netif_rx small SKB instead */
  467. skb = skb_up;
  468. skb_put(skb, pkt_length);
  469. } else {
  470. vxge_re_pre_post(dtr, ring, rx_priv);
  471. vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
  472. vxge_debug_rx(VXGE_ERR,
  473. "%s: vxge_rx_1b_compl: out of "
  474. "memory", dev->name);
  475. ring->stats.skb_alloc_fail++;
  476. break;
  477. }
  478. }
  479. if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
  480. !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
  481. ring->rx_csum && /* Offload Rx side CSUM */
  482. ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
  483. ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
  484. skb->ip_summed = CHECKSUM_UNNECESSARY;
  485. else
  486. skb->ip_summed = CHECKSUM_NONE;
  487. vxge_rx_complete(ring, skb, ext_info.vlan,
  488. pkt_length, &ext_info);
  489. ring->budget--;
  490. ring->pkts_processed++;
  491. if (!ring->budget)
  492. break;
  493. } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
  494. &t_code) == VXGE_HW_OK);
  495. if (first_dtr)
  496. vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
  497. vxge_debug_entryexit(VXGE_TRACE,
  498. "%s:%d Exiting...",
  499. __func__, __LINE__);
  500. return VXGE_HW_OK;
  501. }
  502. /*
  503. * vxge_xmit_compl
  504. *
  505. * If an interrupt was raised to indicate DMA complete of the Tx packet,
  506. * this function is called. It identifies the last TxD whose buffer was
  507. * freed and frees all skbs whose data have already DMA'ed into the NICs
  508. * internal memory.
  509. */
  510. enum vxge_hw_status
  511. vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
  512. enum vxge_hw_fifo_tcode t_code, void *userdata,
  513. struct sk_buff ***skb_ptr, int nr_skb, int *more)
  514. {
  515. struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
  516. struct sk_buff *skb, **done_skb = *skb_ptr;
  517. int pkt_cnt = 0;
  518. vxge_debug_entryexit(VXGE_TRACE,
  519. "%s:%d Entered....", __func__, __LINE__);
  520. do {
  521. int frg_cnt;
  522. skb_frag_t *frag;
  523. int i = 0, j;
  524. struct vxge_tx_priv *txd_priv =
  525. vxge_hw_fifo_txdl_private_get(dtr);
  526. skb = txd_priv->skb;
  527. frg_cnt = skb_shinfo(skb)->nr_frags;
  528. frag = &skb_shinfo(skb)->frags[0];
  529. vxge_debug_tx(VXGE_TRACE,
  530. "%s: %s:%d fifo_hw = %p dtr = %p "
  531. "tcode = 0x%x", fifo->ndev->name, __func__,
  532. __LINE__, fifo_hw, dtr, t_code);
  533. /* check skb validity */
  534. vxge_assert(skb);
  535. vxge_debug_tx(VXGE_TRACE,
  536. "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
  537. fifo->ndev->name, __func__, __LINE__,
  538. skb, txd_priv, frg_cnt);
  539. if (unlikely(t_code)) {
  540. fifo->stats.tx_errors++;
  541. vxge_debug_tx(VXGE_ERR,
  542. "%s: tx: dtr %p completed due to "
  543. "error t_code %01x", fifo->ndev->name,
  544. dtr, t_code);
  545. vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
  546. }
  547. /* for unfragmented skb */
  548. pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
  549. skb_headlen(skb), PCI_DMA_TODEVICE);
  550. for (j = 0; j < frg_cnt; j++) {
  551. pci_unmap_page(fifo->pdev,
  552. txd_priv->dma_buffers[i++],
  553. frag->size, PCI_DMA_TODEVICE);
  554. frag += 1;
  555. }
  556. vxge_hw_fifo_txdl_free(fifo_hw, dtr);
  557. /* Updating the statistics block */
  558. fifo->stats.tx_frms++;
  559. fifo->stats.tx_bytes += skb->len;
  560. *done_skb++ = skb;
  561. if (--nr_skb <= 0) {
  562. *more = 1;
  563. break;
  564. }
  565. pkt_cnt++;
  566. if (pkt_cnt > fifo->indicate_max_pkts)
  567. break;
  568. } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
  569. &dtr, &t_code) == VXGE_HW_OK);
  570. *skb_ptr = done_skb;
  571. vxge_wake_tx_queue(fifo, skb);
  572. vxge_debug_entryexit(VXGE_TRACE,
  573. "%s: %s:%d Exiting...",
  574. fifo->ndev->name, __func__, __LINE__);
  575. return VXGE_HW_OK;
  576. }
  577. /* select a vpath to transmit the packet */
  578. static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
  579. int *do_lock)
  580. {
  581. u16 queue_len, counter = 0;
  582. if (skb->protocol == htons(ETH_P_IP)) {
  583. struct iphdr *ip;
  584. struct tcphdr *th;
  585. ip = ip_hdr(skb);
  586. if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
  587. th = (struct tcphdr *)(((unsigned char *)ip) +
  588. ip->ihl*4);
  589. queue_len = vdev->no_of_vpath;
  590. counter = (ntohs(th->source) +
  591. ntohs(th->dest)) &
  592. vdev->vpath_selector[queue_len - 1];
  593. if (counter >= queue_len)
  594. counter = queue_len - 1;
  595. if (ip->protocol == IPPROTO_UDP) {
  596. #ifdef NETIF_F_LLTX
  597. *do_lock = 0;
  598. #endif
  599. }
  600. }
  601. }
  602. return counter;
  603. }
  604. static enum vxge_hw_status vxge_search_mac_addr_in_list(
  605. struct vxge_vpath *vpath, u64 del_mac)
  606. {
  607. struct list_head *entry, *next;
  608. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  609. if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
  610. return TRUE;
  611. }
  612. return FALSE;
  613. }
  614. static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
  615. {
  616. struct macInfo mac_info;
  617. u8 *mac_address = NULL;
  618. u64 mac_addr = 0, vpath_vector = 0;
  619. int vpath_idx = 0;
  620. enum vxge_hw_status status = VXGE_HW_OK;
  621. struct vxge_vpath *vpath = NULL;
  622. struct __vxge_hw_device *hldev;
  623. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  624. mac_address = (u8 *)&mac_addr;
  625. memcpy(mac_address, mac_header, ETH_ALEN);
  626. /* Is this mac address already in the list? */
  627. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  628. vpath = &vdev->vpaths[vpath_idx];
  629. if (vxge_search_mac_addr_in_list(vpath, mac_addr))
  630. return vpath_idx;
  631. }
  632. memset(&mac_info, 0, sizeof(struct macInfo));
  633. memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
  634. /* Any vpath has room to add mac address to its da table? */
  635. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  636. vpath = &vdev->vpaths[vpath_idx];
  637. if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
  638. /* Add this mac address to this vpath */
  639. mac_info.vpath_no = vpath_idx;
  640. mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  641. status = vxge_add_mac_addr(vdev, &mac_info);
  642. if (status != VXGE_HW_OK)
  643. return -EPERM;
  644. return vpath_idx;
  645. }
  646. }
  647. mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
  648. vpath_idx = 0;
  649. mac_info.vpath_no = vpath_idx;
  650. /* Is the first vpath already selected as catch-basin ? */
  651. vpath = &vdev->vpaths[vpath_idx];
  652. if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
  653. /* Add this mac address to this vpath */
  654. if (FALSE == vxge_mac_list_add(vpath, &mac_info))
  655. return -EPERM;
  656. return vpath_idx;
  657. }
  658. /* Select first vpath as catch-basin */
  659. vpath_vector = vxge_mBIT(vpath->device_id);
  660. status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
  661. vxge_hw_mgmt_reg_type_mrpcim,
  662. 0,
  663. (ulong)offsetof(
  664. struct vxge_hw_mrpcim_reg,
  665. rts_mgr_cbasin_cfg),
  666. vpath_vector);
  667. if (status != VXGE_HW_OK) {
  668. vxge_debug_tx(VXGE_ERR,
  669. "%s: Unable to set the vpath-%d in catch-basin mode",
  670. VXGE_DRIVER_NAME, vpath->device_id);
  671. return -EPERM;
  672. }
  673. if (FALSE == vxge_mac_list_add(vpath, &mac_info))
  674. return -EPERM;
  675. return vpath_idx;
  676. }
  677. /**
  678. * vxge_xmit
  679. * @skb : the socket buffer containing the Tx data.
  680. * @dev : device pointer.
  681. *
  682. * This function is the Tx entry point of the driver. Neterion NIC supports
  683. * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
  684. * NOTE: when device cant queue the pkt, just the trans_start variable will
  685. * not be upadted.
  686. */
  687. static netdev_tx_t
  688. vxge_xmit(struct sk_buff *skb, struct net_device *dev)
  689. {
  690. struct vxge_fifo *fifo = NULL;
  691. void *dtr_priv;
  692. void *dtr = NULL;
  693. struct vxgedev *vdev = NULL;
  694. enum vxge_hw_status status;
  695. int frg_cnt, first_frg_len;
  696. skb_frag_t *frag;
  697. int i = 0, j = 0, avail;
  698. u64 dma_pointer;
  699. struct vxge_tx_priv *txdl_priv = NULL;
  700. struct __vxge_hw_fifo *fifo_hw;
  701. int offload_type;
  702. unsigned long flags = 0;
  703. int vpath_no = 0;
  704. int do_spin_tx_lock = 1;
  705. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  706. dev->name, __func__, __LINE__);
  707. /* A buffer with no data will be dropped */
  708. if (unlikely(skb->len <= 0)) {
  709. vxge_debug_tx(VXGE_ERR,
  710. "%s: Buffer has no data..", dev->name);
  711. dev_kfree_skb(skb);
  712. return NETDEV_TX_OK;
  713. }
  714. vdev = (struct vxgedev *)netdev_priv(dev);
  715. if (unlikely(!is_vxge_card_up(vdev))) {
  716. vxge_debug_tx(VXGE_ERR,
  717. "%s: vdev not initialized", dev->name);
  718. dev_kfree_skb(skb);
  719. return NETDEV_TX_OK;
  720. }
  721. if (vdev->config.addr_learn_en) {
  722. vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
  723. if (vpath_no == -EPERM) {
  724. vxge_debug_tx(VXGE_ERR,
  725. "%s: Failed to store the mac address",
  726. dev->name);
  727. dev_kfree_skb(skb);
  728. return NETDEV_TX_OK;
  729. }
  730. }
  731. if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
  732. vpath_no = skb_get_queue_mapping(skb);
  733. else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
  734. vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
  735. vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
  736. if (vpath_no >= vdev->no_of_vpath)
  737. vpath_no = 0;
  738. fifo = &vdev->vpaths[vpath_no].fifo;
  739. fifo_hw = fifo->handle;
  740. if (do_spin_tx_lock)
  741. spin_lock_irqsave(&fifo->tx_lock, flags);
  742. else {
  743. if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
  744. return NETDEV_TX_LOCKED;
  745. }
  746. if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
  747. if (netif_subqueue_stopped(dev, skb)) {
  748. spin_unlock_irqrestore(&fifo->tx_lock, flags);
  749. return NETDEV_TX_BUSY;
  750. }
  751. } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
  752. if (netif_queue_stopped(dev)) {
  753. spin_unlock_irqrestore(&fifo->tx_lock, flags);
  754. return NETDEV_TX_BUSY;
  755. }
  756. }
  757. avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
  758. if (avail == 0) {
  759. vxge_debug_tx(VXGE_ERR,
  760. "%s: No free TXDs available", dev->name);
  761. fifo->stats.txd_not_free++;
  762. vxge_stop_tx_queue(fifo);
  763. goto _exit2;
  764. }
  765. /* Last TXD? Stop tx queue to avoid dropping packets. TX
  766. * completion will resume the queue.
  767. */
  768. if (avail == 1)
  769. vxge_stop_tx_queue(fifo);
  770. status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
  771. if (unlikely(status != VXGE_HW_OK)) {
  772. vxge_debug_tx(VXGE_ERR,
  773. "%s: Out of descriptors .", dev->name);
  774. fifo->stats.txd_out_of_desc++;
  775. vxge_stop_tx_queue(fifo);
  776. goto _exit2;
  777. }
  778. vxge_debug_tx(VXGE_TRACE,
  779. "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
  780. dev->name, __func__, __LINE__,
  781. fifo_hw, dtr, dtr_priv);
  782. if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
  783. u16 vlan_tag = vlan_tx_tag_get(skb);
  784. vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
  785. }
  786. first_frg_len = skb_headlen(skb);
  787. dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
  788. PCI_DMA_TODEVICE);
  789. if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
  790. vxge_hw_fifo_txdl_free(fifo_hw, dtr);
  791. vxge_stop_tx_queue(fifo);
  792. fifo->stats.pci_map_fail++;
  793. goto _exit2;
  794. }
  795. txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
  796. txdl_priv->skb = skb;
  797. txdl_priv->dma_buffers[j] = dma_pointer;
  798. frg_cnt = skb_shinfo(skb)->nr_frags;
  799. vxge_debug_tx(VXGE_TRACE,
  800. "%s: %s:%d skb = %p txdl_priv = %p "
  801. "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
  802. __func__, __LINE__, skb, txdl_priv,
  803. frg_cnt, (unsigned long long)dma_pointer);
  804. vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
  805. first_frg_len);
  806. frag = &skb_shinfo(skb)->frags[0];
  807. for (i = 0; i < frg_cnt; i++) {
  808. /* ignore 0 length fragment */
  809. if (!frag->size)
  810. continue;
  811. dma_pointer =
  812. (u64)pci_map_page(fifo->pdev, frag->page,
  813. frag->page_offset, frag->size,
  814. PCI_DMA_TODEVICE);
  815. if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
  816. goto _exit0;
  817. vxge_debug_tx(VXGE_TRACE,
  818. "%s: %s:%d frag = %d dma_pointer = 0x%llx",
  819. dev->name, __func__, __LINE__, i,
  820. (unsigned long long)dma_pointer);
  821. txdl_priv->dma_buffers[j] = dma_pointer;
  822. vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
  823. frag->size);
  824. frag += 1;
  825. }
  826. offload_type = vxge_offload_type(skb);
  827. if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  828. int mss = vxge_tcp_mss(skb);
  829. if (mss) {
  830. vxge_debug_tx(VXGE_TRACE,
  831. "%s: %s:%d mss = %d",
  832. dev->name, __func__, __LINE__, mss);
  833. vxge_hw_fifo_txdl_mss_set(dtr, mss);
  834. } else {
  835. vxge_assert(skb->len <=
  836. dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
  837. vxge_assert(0);
  838. goto _exit1;
  839. }
  840. }
  841. if (skb->ip_summed == CHECKSUM_PARTIAL)
  842. vxge_hw_fifo_txdl_cksum_set_bits(dtr,
  843. VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
  844. VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
  845. VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
  846. vxge_hw_fifo_txdl_post(fifo_hw, dtr);
  847. #ifdef NETIF_F_LLTX
  848. dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
  849. #endif
  850. spin_unlock_irqrestore(&fifo->tx_lock, flags);
  851. VXGE_COMPLETE_VPATH_TX(fifo);
  852. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
  853. dev->name, __func__, __LINE__);
  854. return NETDEV_TX_OK;
  855. _exit0:
  856. vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
  857. _exit1:
  858. j = 0;
  859. frag = &skb_shinfo(skb)->frags[0];
  860. pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
  861. skb_headlen(skb), PCI_DMA_TODEVICE);
  862. for (; j < i; j++) {
  863. pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
  864. frag->size, PCI_DMA_TODEVICE);
  865. frag += 1;
  866. }
  867. vxge_hw_fifo_txdl_free(fifo_hw, dtr);
  868. _exit2:
  869. dev_kfree_skb(skb);
  870. spin_unlock_irqrestore(&fifo->tx_lock, flags);
  871. VXGE_COMPLETE_VPATH_TX(fifo);
  872. return NETDEV_TX_OK;
  873. }
  874. /*
  875. * vxge_rx_term
  876. *
  877. * Function will be called by hw function to abort all outstanding receive
  878. * descriptors.
  879. */
  880. static void
  881. vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
  882. {
  883. struct vxge_ring *ring = (struct vxge_ring *)userdata;
  884. struct vxge_rx_priv *rx_priv =
  885. vxge_hw_ring_rxd_private_get(dtrh);
  886. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  887. ring->ndev->name, __func__, __LINE__);
  888. if (state != VXGE_HW_RXD_STATE_POSTED)
  889. return;
  890. pci_unmap_single(ring->pdev, rx_priv->data_dma,
  891. rx_priv->data_size, PCI_DMA_FROMDEVICE);
  892. dev_kfree_skb(rx_priv->skb);
  893. rx_priv->skb_data = NULL;
  894. vxge_debug_entryexit(VXGE_TRACE,
  895. "%s: %s:%d Exiting...",
  896. ring->ndev->name, __func__, __LINE__);
  897. }
  898. /*
  899. * vxge_tx_term
  900. *
  901. * Function will be called to abort all outstanding tx descriptors
  902. */
  903. static void
  904. vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
  905. {
  906. struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
  907. skb_frag_t *frag;
  908. int i = 0, j, frg_cnt;
  909. struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
  910. struct sk_buff *skb = txd_priv->skb;
  911. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  912. if (state != VXGE_HW_TXDL_STATE_POSTED)
  913. return;
  914. /* check skb validity */
  915. vxge_assert(skb);
  916. frg_cnt = skb_shinfo(skb)->nr_frags;
  917. frag = &skb_shinfo(skb)->frags[0];
  918. /* for unfragmented skb */
  919. pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
  920. skb_headlen(skb), PCI_DMA_TODEVICE);
  921. for (j = 0; j < frg_cnt; j++) {
  922. pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
  923. frag->size, PCI_DMA_TODEVICE);
  924. frag += 1;
  925. }
  926. dev_kfree_skb(skb);
  927. vxge_debug_entryexit(VXGE_TRACE,
  928. "%s:%d Exiting...", __func__, __LINE__);
  929. }
  930. /**
  931. * vxge_set_multicast
  932. * @dev: pointer to the device structure
  933. *
  934. * Entry point for multicast address enable/disable
  935. * This function is a driver entry point which gets called by the kernel
  936. * whenever multicast addresses must be enabled/disabled. This also gets
  937. * called to set/reset promiscuous mode. Depending on the deivce flag, we
  938. * determine, if multicast address must be enabled or if promiscuous mode
  939. * is to be disabled etc.
  940. */
  941. static void vxge_set_multicast(struct net_device *dev)
  942. {
  943. struct netdev_hw_addr *ha;
  944. struct vxgedev *vdev;
  945. int i, mcast_cnt = 0;
  946. struct __vxge_hw_device *hldev;
  947. enum vxge_hw_status status = VXGE_HW_OK;
  948. struct macInfo mac_info;
  949. int vpath_idx = 0;
  950. struct vxge_mac_addrs *mac_entry;
  951. struct list_head *list_head;
  952. struct list_head *entry, *next;
  953. u8 *mac_address = NULL;
  954. vxge_debug_entryexit(VXGE_TRACE,
  955. "%s:%d", __func__, __LINE__);
  956. vdev = (struct vxgedev *)netdev_priv(dev);
  957. hldev = (struct __vxge_hw_device *)vdev->devh;
  958. if (unlikely(!is_vxge_card_up(vdev)))
  959. return;
  960. if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
  961. for (i = 0; i < vdev->no_of_vpath; i++) {
  962. vxge_assert(vdev->vpaths[i].is_open);
  963. status = vxge_hw_vpath_mcast_enable(
  964. vdev->vpaths[i].handle);
  965. vdev->all_multi_flg = 1;
  966. }
  967. } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
  968. for (i = 0; i < vdev->no_of_vpath; i++) {
  969. vxge_assert(vdev->vpaths[i].is_open);
  970. status = vxge_hw_vpath_mcast_disable(
  971. vdev->vpaths[i].handle);
  972. vdev->all_multi_flg = 1;
  973. }
  974. }
  975. if (status != VXGE_HW_OK)
  976. vxge_debug_init(VXGE_ERR,
  977. "failed to %s multicast, status %d",
  978. dev->flags & IFF_ALLMULTI ?
  979. "enable" : "disable", status);
  980. if (!vdev->config.addr_learn_en) {
  981. if (dev->flags & IFF_PROMISC) {
  982. for (i = 0; i < vdev->no_of_vpath; i++) {
  983. vxge_assert(vdev->vpaths[i].is_open);
  984. status = vxge_hw_vpath_promisc_enable(
  985. vdev->vpaths[i].handle);
  986. }
  987. } else {
  988. for (i = 0; i < vdev->no_of_vpath; i++) {
  989. vxge_assert(vdev->vpaths[i].is_open);
  990. status = vxge_hw_vpath_promisc_disable(
  991. vdev->vpaths[i].handle);
  992. }
  993. }
  994. }
  995. memset(&mac_info, 0, sizeof(struct macInfo));
  996. /* Update individual M_CAST address list */
  997. if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
  998. mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
  999. list_head = &vdev->vpaths[0].mac_addr_list;
  1000. if ((netdev_mc_count(dev) +
  1001. (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
  1002. vdev->vpaths[0].max_mac_addr_cnt)
  1003. goto _set_all_mcast;
  1004. /* Delete previous MC's */
  1005. for (i = 0; i < mcast_cnt; i++) {
  1006. if (!list_empty(list_head))
  1007. mac_entry = (struct vxge_mac_addrs *)
  1008. list_first_entry(list_head,
  1009. struct vxge_mac_addrs,
  1010. item);
  1011. list_for_each_safe(entry, next, list_head) {
  1012. mac_entry = (struct vxge_mac_addrs *) entry;
  1013. /* Copy the mac address to delete */
  1014. mac_address = (u8 *)&mac_entry->macaddr;
  1015. memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
  1016. /* Is this a multicast address */
  1017. if (0x01 & mac_info.macaddr[0]) {
  1018. for (vpath_idx = 0; vpath_idx <
  1019. vdev->no_of_vpath;
  1020. vpath_idx++) {
  1021. mac_info.vpath_no = vpath_idx;
  1022. status = vxge_del_mac_addr(
  1023. vdev,
  1024. &mac_info);
  1025. }
  1026. }
  1027. }
  1028. }
  1029. /* Add new ones */
  1030. netdev_for_each_mc_addr(ha, dev) {
  1031. memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
  1032. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
  1033. vpath_idx++) {
  1034. mac_info.vpath_no = vpath_idx;
  1035. mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  1036. status = vxge_add_mac_addr(vdev, &mac_info);
  1037. if (status != VXGE_HW_OK) {
  1038. vxge_debug_init(VXGE_ERR,
  1039. "%s:%d Setting individual"
  1040. "multicast address failed",
  1041. __func__, __LINE__);
  1042. goto _set_all_mcast;
  1043. }
  1044. }
  1045. }
  1046. return;
  1047. _set_all_mcast:
  1048. mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
  1049. /* Delete previous MC's */
  1050. for (i = 0; i < mcast_cnt; i++) {
  1051. list_for_each_safe(entry, next, list_head) {
  1052. mac_entry = (struct vxge_mac_addrs *) entry;
  1053. /* Copy the mac address to delete */
  1054. mac_address = (u8 *)&mac_entry->macaddr;
  1055. memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
  1056. /* Is this a multicast address */
  1057. if (0x01 & mac_info.macaddr[0])
  1058. break;
  1059. }
  1060. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
  1061. vpath_idx++) {
  1062. mac_info.vpath_no = vpath_idx;
  1063. status = vxge_del_mac_addr(vdev, &mac_info);
  1064. }
  1065. }
  1066. /* Enable all multicast */
  1067. for (i = 0; i < vdev->no_of_vpath; i++) {
  1068. vxge_assert(vdev->vpaths[i].is_open);
  1069. status = vxge_hw_vpath_mcast_enable(
  1070. vdev->vpaths[i].handle);
  1071. if (status != VXGE_HW_OK) {
  1072. vxge_debug_init(VXGE_ERR,
  1073. "%s:%d Enabling all multicasts failed",
  1074. __func__, __LINE__);
  1075. }
  1076. vdev->all_multi_flg = 1;
  1077. }
  1078. dev->flags |= IFF_ALLMULTI;
  1079. }
  1080. vxge_debug_entryexit(VXGE_TRACE,
  1081. "%s:%d Exiting...", __func__, __LINE__);
  1082. }
  1083. /**
  1084. * vxge_set_mac_addr
  1085. * @dev: pointer to the device structure
  1086. *
  1087. * Update entry "0" (default MAC addr)
  1088. */
  1089. static int vxge_set_mac_addr(struct net_device *dev, void *p)
  1090. {
  1091. struct sockaddr *addr = p;
  1092. struct vxgedev *vdev;
  1093. struct __vxge_hw_device *hldev;
  1094. enum vxge_hw_status status = VXGE_HW_OK;
  1095. struct macInfo mac_info_new, mac_info_old;
  1096. int vpath_idx = 0;
  1097. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1098. vdev = (struct vxgedev *)netdev_priv(dev);
  1099. hldev = vdev->devh;
  1100. if (!is_valid_ether_addr(addr->sa_data))
  1101. return -EINVAL;
  1102. memset(&mac_info_new, 0, sizeof(struct macInfo));
  1103. memset(&mac_info_old, 0, sizeof(struct macInfo));
  1104. vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
  1105. __func__, __LINE__);
  1106. /* Get the old address */
  1107. memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
  1108. /* Copy the new address */
  1109. memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
  1110. /* First delete the old mac address from all the vpaths
  1111. as we can't specify the index while adding new mac address */
  1112. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  1113. struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
  1114. if (!vpath->is_open) {
  1115. /* This can happen when this interface is added/removed
  1116. to the bonding interface. Delete this station address
  1117. from the linked list */
  1118. vxge_mac_list_del(vpath, &mac_info_old);
  1119. /* Add this new address to the linked list
  1120. for later restoring */
  1121. vxge_mac_list_add(vpath, &mac_info_new);
  1122. continue;
  1123. }
  1124. /* Delete the station address */
  1125. mac_info_old.vpath_no = vpath_idx;
  1126. status = vxge_del_mac_addr(vdev, &mac_info_old);
  1127. }
  1128. if (unlikely(!is_vxge_card_up(vdev))) {
  1129. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1130. return VXGE_HW_OK;
  1131. }
  1132. /* Set this mac address to all the vpaths */
  1133. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
  1134. mac_info_new.vpath_no = vpath_idx;
  1135. mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  1136. status = vxge_add_mac_addr(vdev, &mac_info_new);
  1137. if (status != VXGE_HW_OK)
  1138. return -EINVAL;
  1139. }
  1140. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1141. return status;
  1142. }
  1143. /*
  1144. * vxge_vpath_intr_enable
  1145. * @vdev: pointer to vdev
  1146. * @vp_id: vpath for which to enable the interrupts
  1147. *
  1148. * Enables the interrupts for the vpath
  1149. */
  1150. void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
  1151. {
  1152. struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
  1153. int msix_id = 0;
  1154. int tim_msix_id[4] = {0, 1, 0, 0};
  1155. int alarm_msix_id = VXGE_ALARM_MSIX_ID;
  1156. vxge_hw_vpath_intr_enable(vpath->handle);
  1157. if (vdev->config.intr_type == INTA)
  1158. vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
  1159. else {
  1160. vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
  1161. alarm_msix_id);
  1162. msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
  1163. vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
  1164. vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
  1165. /* enable the alarm vector */
  1166. msix_id = (vpath->handle->vpath->hldev->first_vp_id *
  1167. VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
  1168. vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
  1169. }
  1170. }
  1171. /*
  1172. * vxge_vpath_intr_disable
  1173. * @vdev: pointer to vdev
  1174. * @vp_id: vpath for which to disable the interrupts
  1175. *
  1176. * Disables the interrupts for the vpath
  1177. */
  1178. void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
  1179. {
  1180. struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
  1181. int msix_id;
  1182. vxge_hw_vpath_intr_disable(vpath->handle);
  1183. if (vdev->config.intr_type == INTA)
  1184. vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
  1185. else {
  1186. msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
  1187. vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
  1188. vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
  1189. /* disable the alarm vector */
  1190. msix_id = (vpath->handle->vpath->hldev->first_vp_id *
  1191. VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
  1192. vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
  1193. }
  1194. }
  1195. /*
  1196. * vxge_reset_vpath
  1197. * @vdev: pointer to vdev
  1198. * @vp_id: vpath to reset
  1199. *
  1200. * Resets the vpath
  1201. */
  1202. static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
  1203. {
  1204. enum vxge_hw_status status = VXGE_HW_OK;
  1205. int ret = 0;
  1206. /* check if device is down already */
  1207. if (unlikely(!is_vxge_card_up(vdev)))
  1208. return 0;
  1209. /* is device reset already scheduled */
  1210. if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
  1211. return 0;
  1212. if (vdev->vpaths[vp_id].handle) {
  1213. if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle)
  1214. == VXGE_HW_OK) {
  1215. if (is_vxge_card_up(vdev) &&
  1216. vxge_hw_vpath_recover_from_reset(
  1217. vdev->vpaths[vp_id].handle)
  1218. != VXGE_HW_OK) {
  1219. vxge_debug_init(VXGE_ERR,
  1220. "vxge_hw_vpath_recover_from_reset"
  1221. "failed for vpath:%d", vp_id);
  1222. return status;
  1223. }
  1224. } else {
  1225. vxge_debug_init(VXGE_ERR,
  1226. "vxge_hw_vpath_reset failed for"
  1227. "vpath:%d", vp_id);
  1228. return status;
  1229. }
  1230. } else
  1231. return VXGE_HW_FAIL;
  1232. vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
  1233. vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
  1234. /* Enable all broadcast */
  1235. vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle);
  1236. /* Enable the interrupts */
  1237. vxge_vpath_intr_enable(vdev, vp_id);
  1238. smp_wmb();
  1239. /* Enable the flow of traffic through the vpath */
  1240. vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle);
  1241. smp_wmb();
  1242. vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle);
  1243. vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK;
  1244. /* Vpath reset done */
  1245. clear_bit(vp_id, &vdev->vp_reset);
  1246. /* Start the vpath queue */
  1247. vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL);
  1248. return ret;
  1249. }
  1250. static int do_vxge_reset(struct vxgedev *vdev, int event)
  1251. {
  1252. enum vxge_hw_status status;
  1253. int ret = 0, vp_id, i;
  1254. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1255. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
  1256. /* check if device is down already */
  1257. if (unlikely(!is_vxge_card_up(vdev)))
  1258. return 0;
  1259. /* is reset already scheduled */
  1260. if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
  1261. return 0;
  1262. }
  1263. if (event == VXGE_LL_FULL_RESET) {
  1264. /* wait for all the vpath reset to complete */
  1265. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  1266. while (test_bit(vp_id, &vdev->vp_reset))
  1267. msleep(50);
  1268. }
  1269. /* if execution mode is set to debug, don't reset the adapter */
  1270. if (unlikely(vdev->exec_mode)) {
  1271. vxge_debug_init(VXGE_ERR,
  1272. "%s: execution mode is debug, returning..",
  1273. vdev->ndev->name);
  1274. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  1275. vxge_stop_all_tx_queue(vdev);
  1276. return 0;
  1277. }
  1278. }
  1279. if (event == VXGE_LL_FULL_RESET) {
  1280. vxge_hw_device_intr_disable(vdev->devh);
  1281. switch (vdev->cric_err_event) {
  1282. case VXGE_HW_EVENT_UNKNOWN:
  1283. vxge_stop_all_tx_queue(vdev);
  1284. vxge_debug_init(VXGE_ERR,
  1285. "fatal: %s: Disabling device due to"
  1286. "unknown error",
  1287. vdev->ndev->name);
  1288. ret = -EPERM;
  1289. goto out;
  1290. case VXGE_HW_EVENT_RESET_START:
  1291. break;
  1292. case VXGE_HW_EVENT_RESET_COMPLETE:
  1293. case VXGE_HW_EVENT_LINK_DOWN:
  1294. case VXGE_HW_EVENT_LINK_UP:
  1295. case VXGE_HW_EVENT_ALARM_CLEARED:
  1296. case VXGE_HW_EVENT_ECCERR:
  1297. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  1298. ret = -EPERM;
  1299. goto out;
  1300. case VXGE_HW_EVENT_FIFO_ERR:
  1301. case VXGE_HW_EVENT_VPATH_ERR:
  1302. break;
  1303. case VXGE_HW_EVENT_CRITICAL_ERR:
  1304. vxge_stop_all_tx_queue(vdev);
  1305. vxge_debug_init(VXGE_ERR,
  1306. "fatal: %s: Disabling device due to"
  1307. "serious error",
  1308. vdev->ndev->name);
  1309. /* SOP or device reset required */
  1310. /* This event is not currently used */
  1311. ret = -EPERM;
  1312. goto out;
  1313. case VXGE_HW_EVENT_SERR:
  1314. vxge_stop_all_tx_queue(vdev);
  1315. vxge_debug_init(VXGE_ERR,
  1316. "fatal: %s: Disabling device due to"
  1317. "serious error",
  1318. vdev->ndev->name);
  1319. ret = -EPERM;
  1320. goto out;
  1321. case VXGE_HW_EVENT_SRPCIM_SERR:
  1322. case VXGE_HW_EVENT_MRPCIM_SERR:
  1323. ret = -EPERM;
  1324. goto out;
  1325. case VXGE_HW_EVENT_SLOT_FREEZE:
  1326. vxge_stop_all_tx_queue(vdev);
  1327. vxge_debug_init(VXGE_ERR,
  1328. "fatal: %s: Disabling device due to"
  1329. "slot freeze",
  1330. vdev->ndev->name);
  1331. ret = -EPERM;
  1332. goto out;
  1333. default:
  1334. break;
  1335. }
  1336. }
  1337. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
  1338. vxge_stop_all_tx_queue(vdev);
  1339. if (event == VXGE_LL_FULL_RESET) {
  1340. status = vxge_reset_all_vpaths(vdev);
  1341. if (status != VXGE_HW_OK) {
  1342. vxge_debug_init(VXGE_ERR,
  1343. "fatal: %s: can not reset vpaths",
  1344. vdev->ndev->name);
  1345. ret = -EPERM;
  1346. goto out;
  1347. }
  1348. }
  1349. if (event == VXGE_LL_COMPL_RESET) {
  1350. for (i = 0; i < vdev->no_of_vpath; i++)
  1351. if (vdev->vpaths[i].handle) {
  1352. if (vxge_hw_vpath_recover_from_reset(
  1353. vdev->vpaths[i].handle)
  1354. != VXGE_HW_OK) {
  1355. vxge_debug_init(VXGE_ERR,
  1356. "vxge_hw_vpath_recover_"
  1357. "from_reset failed for vpath: "
  1358. "%d", i);
  1359. ret = -EPERM;
  1360. goto out;
  1361. }
  1362. } else {
  1363. vxge_debug_init(VXGE_ERR,
  1364. "vxge_hw_vpath_reset failed for "
  1365. "vpath:%d", i);
  1366. ret = -EPERM;
  1367. goto out;
  1368. }
  1369. }
  1370. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
  1371. /* Reprogram the DA table with populated mac addresses */
  1372. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  1373. vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
  1374. vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
  1375. }
  1376. /* enable vpath interrupts */
  1377. for (i = 0; i < vdev->no_of_vpath; i++)
  1378. vxge_vpath_intr_enable(vdev, i);
  1379. vxge_hw_device_intr_enable(vdev->devh);
  1380. smp_wmb();
  1381. /* Indicate card up */
  1382. set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  1383. /* Get the traffic to flow through the vpaths */
  1384. for (i = 0; i < vdev->no_of_vpath; i++) {
  1385. vxge_hw_vpath_enable(vdev->vpaths[i].handle);
  1386. smp_wmb();
  1387. vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
  1388. }
  1389. vxge_wake_all_tx_queue(vdev);
  1390. }
  1391. out:
  1392. vxge_debug_entryexit(VXGE_TRACE,
  1393. "%s:%d Exiting...", __func__, __LINE__);
  1394. /* Indicate reset done */
  1395. if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
  1396. clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
  1397. return ret;
  1398. }
  1399. /*
  1400. * vxge_reset
  1401. * @vdev: pointer to ll device
  1402. *
  1403. * driver may reset the chip on events of serr, eccerr, etc
  1404. */
  1405. int vxge_reset(struct vxgedev *vdev)
  1406. {
  1407. do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
  1408. return 0;
  1409. }
  1410. /**
  1411. * vxge_poll - Receive handler when Receive Polling is used.
  1412. * @dev: pointer to the device structure.
  1413. * @budget: Number of packets budgeted to be processed in this iteration.
  1414. *
  1415. * This function comes into picture only if Receive side is being handled
  1416. * through polling (called NAPI in linux). It mostly does what the normal
  1417. * Rx interrupt handler does in terms of descriptor and packet processing
  1418. * but not in an interrupt context. Also it will process a specified number
  1419. * of packets at most in one iteration. This value is passed down by the
  1420. * kernel as the function argument 'budget'.
  1421. */
  1422. static int vxge_poll_msix(struct napi_struct *napi, int budget)
  1423. {
  1424. struct vxge_ring *ring =
  1425. container_of(napi, struct vxge_ring, napi);
  1426. int budget_org = budget;
  1427. ring->budget = budget;
  1428. vxge_hw_vpath_poll_rx(ring->handle);
  1429. if (ring->pkts_processed < budget_org) {
  1430. napi_complete(napi);
  1431. /* Re enable the Rx interrupts for the vpath */
  1432. vxge_hw_channel_msix_unmask(
  1433. (struct __vxge_hw_channel *)ring->handle,
  1434. ring->rx_vector_no);
  1435. }
  1436. return ring->pkts_processed;
  1437. }
  1438. static int vxge_poll_inta(struct napi_struct *napi, int budget)
  1439. {
  1440. struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
  1441. int pkts_processed = 0;
  1442. int i;
  1443. int budget_org = budget;
  1444. struct vxge_ring *ring;
  1445. struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
  1446. pci_get_drvdata(vdev->pdev);
  1447. for (i = 0; i < vdev->no_of_vpath; i++) {
  1448. ring = &vdev->vpaths[i].ring;
  1449. ring->budget = budget;
  1450. vxge_hw_vpath_poll_rx(ring->handle);
  1451. pkts_processed += ring->pkts_processed;
  1452. budget -= ring->pkts_processed;
  1453. if (budget <= 0)
  1454. break;
  1455. }
  1456. VXGE_COMPLETE_ALL_TX(vdev);
  1457. if (pkts_processed < budget_org) {
  1458. napi_complete(napi);
  1459. /* Re enable the Rx interrupts for the ring */
  1460. vxge_hw_device_unmask_all(hldev);
  1461. vxge_hw_device_flush_io(hldev);
  1462. }
  1463. return pkts_processed;
  1464. }
  1465. #ifdef CONFIG_NET_POLL_CONTROLLER
  1466. /**
  1467. * vxge_netpoll - netpoll event handler entry point
  1468. * @dev : pointer to the device structure.
  1469. * Description:
  1470. * This function will be called by upper layer to check for events on the
  1471. * interface in situations where interrupts are disabled. It is used for
  1472. * specific in-kernel networking tasks, such as remote consoles and kernel
  1473. * debugging over the network (example netdump in RedHat).
  1474. */
  1475. static void vxge_netpoll(struct net_device *dev)
  1476. {
  1477. struct __vxge_hw_device *hldev;
  1478. struct vxgedev *vdev;
  1479. vdev = (struct vxgedev *)netdev_priv(dev);
  1480. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  1481. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1482. if (pci_channel_offline(vdev->pdev))
  1483. return;
  1484. disable_irq(dev->irq);
  1485. vxge_hw_device_clear_tx_rx(hldev);
  1486. vxge_hw_device_clear_tx_rx(hldev);
  1487. VXGE_COMPLETE_ALL_RX(vdev);
  1488. VXGE_COMPLETE_ALL_TX(vdev);
  1489. enable_irq(dev->irq);
  1490. vxge_debug_entryexit(VXGE_TRACE,
  1491. "%s:%d Exiting...", __func__, __LINE__);
  1492. return;
  1493. }
  1494. #endif
  1495. /* RTH configuration */
  1496. static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
  1497. {
  1498. enum vxge_hw_status status = VXGE_HW_OK;
  1499. struct vxge_hw_rth_hash_types hash_types;
  1500. u8 itable[256] = {0}; /* indirection table */
  1501. u8 mtable[256] = {0}; /* CPU to vpath mapping */
  1502. int index;
  1503. /*
  1504. * Filling
  1505. * - itable with bucket numbers
  1506. * - mtable with bucket-to-vpath mapping
  1507. */
  1508. for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
  1509. itable[index] = index;
  1510. mtable[index] = index % vdev->no_of_vpath;
  1511. }
  1512. /* Fill RTH hash types */
  1513. hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
  1514. hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
  1515. hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
  1516. hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
  1517. hash_types.hash_type_tcpipv6ex_en =
  1518. vdev->config.rth_hash_type_tcpipv6ex;
  1519. hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
  1520. /* set indirection table, bucket-to-vpath mapping */
  1521. status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
  1522. vdev->no_of_vpath,
  1523. mtable, itable,
  1524. vdev->config.rth_bkt_sz);
  1525. if (status != VXGE_HW_OK) {
  1526. vxge_debug_init(VXGE_ERR,
  1527. "RTH indirection table configuration failed "
  1528. "for vpath:%d", vdev->vpaths[0].device_id);
  1529. return status;
  1530. }
  1531. /*
  1532. * Because the itable_set() method uses the active_table field
  1533. * for the target virtual path the RTH config should be updated
  1534. * for all VPATHs. The h/w only uses the lowest numbered VPATH
  1535. * when steering frames.
  1536. */
  1537. for (index = 0; index < vdev->no_of_vpath; index++) {
  1538. status = vxge_hw_vpath_rts_rth_set(
  1539. vdev->vpaths[index].handle,
  1540. vdev->config.rth_algorithm,
  1541. &hash_types,
  1542. vdev->config.rth_bkt_sz);
  1543. if (status != VXGE_HW_OK) {
  1544. vxge_debug_init(VXGE_ERR,
  1545. "RTH configuration failed for vpath:%d",
  1546. vdev->vpaths[index].device_id);
  1547. return status;
  1548. }
  1549. }
  1550. return status;
  1551. }
  1552. int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
  1553. {
  1554. struct vxge_mac_addrs *new_mac_entry;
  1555. u8 *mac_address = NULL;
  1556. if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
  1557. return TRUE;
  1558. new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
  1559. if (!new_mac_entry) {
  1560. vxge_debug_mem(VXGE_ERR,
  1561. "%s: memory allocation failed",
  1562. VXGE_DRIVER_NAME);
  1563. return FALSE;
  1564. }
  1565. list_add(&new_mac_entry->item, &vpath->mac_addr_list);
  1566. /* Copy the new mac address to the list */
  1567. mac_address = (u8 *)&new_mac_entry->macaddr;
  1568. memcpy(mac_address, mac->macaddr, ETH_ALEN);
  1569. new_mac_entry->state = mac->state;
  1570. vpath->mac_addr_cnt++;
  1571. /* Is this a multicast address */
  1572. if (0x01 & mac->macaddr[0])
  1573. vpath->mcast_addr_cnt++;
  1574. return TRUE;
  1575. }
  1576. /* Add a mac address to DA table */
  1577. enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
  1578. {
  1579. enum vxge_hw_status status = VXGE_HW_OK;
  1580. struct vxge_vpath *vpath;
  1581. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
  1582. if (0x01 & mac->macaddr[0]) /* multicast address */
  1583. duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
  1584. else
  1585. duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
  1586. vpath = &vdev->vpaths[mac->vpath_no];
  1587. status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
  1588. mac->macmask, duplicate_mode);
  1589. if (status != VXGE_HW_OK) {
  1590. vxge_debug_init(VXGE_ERR,
  1591. "DA config add entry failed for vpath:%d",
  1592. vpath->device_id);
  1593. } else
  1594. if (FALSE == vxge_mac_list_add(vpath, mac))
  1595. status = -EPERM;
  1596. return status;
  1597. }
  1598. int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
  1599. {
  1600. struct list_head *entry, *next;
  1601. u64 del_mac = 0;
  1602. u8 *mac_address = (u8 *) (&del_mac);
  1603. /* Copy the mac address to delete from the list */
  1604. memcpy(mac_address, mac->macaddr, ETH_ALEN);
  1605. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  1606. if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
  1607. list_del(entry);
  1608. kfree((struct vxge_mac_addrs *)entry);
  1609. vpath->mac_addr_cnt--;
  1610. /* Is this a multicast address */
  1611. if (0x01 & mac->macaddr[0])
  1612. vpath->mcast_addr_cnt--;
  1613. return TRUE;
  1614. }
  1615. }
  1616. return FALSE;
  1617. }
  1618. /* delete a mac address from DA table */
  1619. enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
  1620. {
  1621. enum vxge_hw_status status = VXGE_HW_OK;
  1622. struct vxge_vpath *vpath;
  1623. vpath = &vdev->vpaths[mac->vpath_no];
  1624. status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
  1625. mac->macmask);
  1626. if (status != VXGE_HW_OK) {
  1627. vxge_debug_init(VXGE_ERR,
  1628. "DA config delete entry failed for vpath:%d",
  1629. vpath->device_id);
  1630. } else
  1631. vxge_mac_list_del(vpath, mac);
  1632. return status;
  1633. }
  1634. /* list all mac addresses from DA table */
  1635. enum vxge_hw_status
  1636. static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
  1637. struct macInfo *mac)
  1638. {
  1639. enum vxge_hw_status status = VXGE_HW_OK;
  1640. unsigned char macmask[ETH_ALEN];
  1641. unsigned char macaddr[ETH_ALEN];
  1642. status = vxge_hw_vpath_mac_addr_get(vpath->handle,
  1643. macaddr, macmask);
  1644. if (status != VXGE_HW_OK) {
  1645. vxge_debug_init(VXGE_ERR,
  1646. "DA config list entry failed for vpath:%d",
  1647. vpath->device_id);
  1648. return status;
  1649. }
  1650. while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
  1651. status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
  1652. macaddr, macmask);
  1653. if (status != VXGE_HW_OK)
  1654. break;
  1655. }
  1656. return status;
  1657. }
  1658. /* Store all vlan ids from the list to the vid table */
  1659. enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
  1660. {
  1661. enum vxge_hw_status status = VXGE_HW_OK;
  1662. struct vxgedev *vdev = vpath->vdev;
  1663. u16 vid;
  1664. if (vdev->vlgrp && vpath->is_open) {
  1665. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  1666. if (!vlan_group_get_device(vdev->vlgrp, vid))
  1667. continue;
  1668. /* Add these vlan to the vid table */
  1669. status = vxge_hw_vpath_vid_add(vpath->handle, vid);
  1670. }
  1671. }
  1672. return status;
  1673. }
  1674. /* Store all mac addresses from the list to the DA table */
  1675. enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
  1676. {
  1677. enum vxge_hw_status status = VXGE_HW_OK;
  1678. struct macInfo mac_info;
  1679. u8 *mac_address = NULL;
  1680. struct list_head *entry, *next;
  1681. memset(&mac_info, 0, sizeof(struct macInfo));
  1682. if (vpath->is_open) {
  1683. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  1684. mac_address =
  1685. (u8 *)&
  1686. ((struct vxge_mac_addrs *)entry)->macaddr;
  1687. memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
  1688. ((struct vxge_mac_addrs *)entry)->state =
  1689. VXGE_LL_MAC_ADDR_IN_DA_TABLE;
  1690. /* does this mac address already exist in da table? */
  1691. status = vxge_search_mac_addr_in_da_table(vpath,
  1692. &mac_info);
  1693. if (status != VXGE_HW_OK) {
  1694. /* Add this mac address to the DA table */
  1695. status = vxge_hw_vpath_mac_addr_add(
  1696. vpath->handle, mac_info.macaddr,
  1697. mac_info.macmask,
  1698. VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
  1699. if (status != VXGE_HW_OK) {
  1700. vxge_debug_init(VXGE_ERR,
  1701. "DA add entry failed for vpath:%d",
  1702. vpath->device_id);
  1703. ((struct vxge_mac_addrs *)entry)->state
  1704. = VXGE_LL_MAC_ADDR_IN_LIST;
  1705. }
  1706. }
  1707. }
  1708. }
  1709. return status;
  1710. }
  1711. /* reset vpaths */
  1712. enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
  1713. {
  1714. int i;
  1715. enum vxge_hw_status status = VXGE_HW_OK;
  1716. for (i = 0; i < vdev->no_of_vpath; i++)
  1717. if (vdev->vpaths[i].handle) {
  1718. if (vxge_hw_vpath_reset(vdev->vpaths[i].handle)
  1719. == VXGE_HW_OK) {
  1720. if (is_vxge_card_up(vdev) &&
  1721. vxge_hw_vpath_recover_from_reset(
  1722. vdev->vpaths[i].handle)
  1723. != VXGE_HW_OK) {
  1724. vxge_debug_init(VXGE_ERR,
  1725. "vxge_hw_vpath_recover_"
  1726. "from_reset failed for vpath: "
  1727. "%d", i);
  1728. return status;
  1729. }
  1730. } else {
  1731. vxge_debug_init(VXGE_ERR,
  1732. "vxge_hw_vpath_reset failed for "
  1733. "vpath:%d", i);
  1734. return status;
  1735. }
  1736. }
  1737. return status;
  1738. }
  1739. /* close vpaths */
  1740. void vxge_close_vpaths(struct vxgedev *vdev, int index)
  1741. {
  1742. int i;
  1743. for (i = index; i < vdev->no_of_vpath; i++) {
  1744. if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) {
  1745. vxge_hw_vpath_close(vdev->vpaths[i].handle);
  1746. vdev->stats.vpaths_open--;
  1747. }
  1748. vdev->vpaths[i].is_open = 0;
  1749. vdev->vpaths[i].handle = NULL;
  1750. }
  1751. }
  1752. /* open vpaths */
  1753. int vxge_open_vpaths(struct vxgedev *vdev)
  1754. {
  1755. enum vxge_hw_status status;
  1756. int i;
  1757. u32 vp_id = 0;
  1758. struct vxge_hw_vpath_attr attr;
  1759. for (i = 0; i < vdev->no_of_vpath; i++) {
  1760. vxge_assert(vdev->vpaths[i].is_configured);
  1761. attr.vp_id = vdev->vpaths[i].device_id;
  1762. attr.fifo_attr.callback = vxge_xmit_compl;
  1763. attr.fifo_attr.txdl_term = vxge_tx_term;
  1764. attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
  1765. attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo;
  1766. attr.ring_attr.callback = vxge_rx_1b_compl;
  1767. attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
  1768. attr.ring_attr.rxd_term = vxge_rx_term;
  1769. attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
  1770. attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring;
  1771. vdev->vpaths[i].ring.ndev = vdev->ndev;
  1772. vdev->vpaths[i].ring.pdev = vdev->pdev;
  1773. status = vxge_hw_vpath_open(vdev->devh, &attr,
  1774. &(vdev->vpaths[i].handle));
  1775. if (status == VXGE_HW_OK) {
  1776. vdev->vpaths[i].fifo.handle =
  1777. (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
  1778. vdev->vpaths[i].ring.handle =
  1779. (struct __vxge_hw_ring *)attr.ring_attr.userdata;
  1780. vdev->vpaths[i].fifo.tx_steering_type =
  1781. vdev->config.tx_steering_type;
  1782. vdev->vpaths[i].fifo.ndev = vdev->ndev;
  1783. vdev->vpaths[i].fifo.pdev = vdev->pdev;
  1784. vdev->vpaths[i].fifo.indicate_max_pkts =
  1785. vdev->config.fifo_indicate_max_pkts;
  1786. vdev->vpaths[i].ring.rx_vector_no = 0;
  1787. vdev->vpaths[i].ring.rx_csum = vdev->rx_csum;
  1788. vdev->vpaths[i].is_open = 1;
  1789. vdev->vp_handles[i] = vdev->vpaths[i].handle;
  1790. vdev->vpaths[i].ring.gro_enable =
  1791. vdev->config.gro_enable;
  1792. vdev->vpaths[i].ring.vlan_tag_strip =
  1793. vdev->vlan_tag_strip;
  1794. vdev->stats.vpaths_open++;
  1795. } else {
  1796. vdev->stats.vpath_open_fail++;
  1797. vxge_debug_init(VXGE_ERR,
  1798. "%s: vpath: %d failed to open "
  1799. "with status: %d",
  1800. vdev->ndev->name, vdev->vpaths[i].device_id,
  1801. status);
  1802. vxge_close_vpaths(vdev, 0);
  1803. return -EPERM;
  1804. }
  1805. vp_id =
  1806. ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
  1807. vpath->vp_id;
  1808. vdev->vpaths_deployed |= vxge_mBIT(vp_id);
  1809. }
  1810. return VXGE_HW_OK;
  1811. }
  1812. /*
  1813. * vxge_isr_napi
  1814. * @irq: the irq of the device.
  1815. * @dev_id: a void pointer to the hldev structure of the Titan device
  1816. * @ptregs: pointer to the registers pushed on the stack.
  1817. *
  1818. * This function is the ISR handler of the device when napi is enabled. It
  1819. * identifies the reason for the interrupt and calls the relevant service
  1820. * routines.
  1821. */
  1822. static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
  1823. {
  1824. struct net_device *dev;
  1825. struct __vxge_hw_device *hldev;
  1826. u64 reason;
  1827. enum vxge_hw_status status;
  1828. struct vxgedev *vdev = (struct vxgedev *) dev_id;;
  1829. vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  1830. dev = vdev->ndev;
  1831. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  1832. if (pci_channel_offline(vdev->pdev))
  1833. return IRQ_NONE;
  1834. if (unlikely(!is_vxge_card_up(vdev)))
  1835. return IRQ_NONE;
  1836. status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
  1837. &reason);
  1838. if (status == VXGE_HW_OK) {
  1839. vxge_hw_device_mask_all(hldev);
  1840. if (reason &
  1841. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
  1842. vdev->vpaths_deployed >>
  1843. (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
  1844. vxge_hw_device_clear_tx_rx(hldev);
  1845. napi_schedule(&vdev->napi);
  1846. vxge_debug_intr(VXGE_TRACE,
  1847. "%s:%d Exiting...", __func__, __LINE__);
  1848. return IRQ_HANDLED;
  1849. } else
  1850. vxge_hw_device_unmask_all(hldev);
  1851. } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
  1852. (status == VXGE_HW_ERR_CRITICAL) ||
  1853. (status == VXGE_HW_ERR_FIFO))) {
  1854. vxge_hw_device_mask_all(hldev);
  1855. vxge_hw_device_flush_io(hldev);
  1856. return IRQ_HANDLED;
  1857. } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
  1858. return IRQ_HANDLED;
  1859. vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
  1860. return IRQ_NONE;
  1861. }
  1862. #ifdef CONFIG_PCI_MSI
  1863. static irqreturn_t
  1864. vxge_tx_msix_handle(int irq, void *dev_id)
  1865. {
  1866. struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
  1867. VXGE_COMPLETE_VPATH_TX(fifo);
  1868. return IRQ_HANDLED;
  1869. }
  1870. static irqreturn_t
  1871. vxge_rx_msix_napi_handle(int irq, void *dev_id)
  1872. {
  1873. struct vxge_ring *ring = (struct vxge_ring *)dev_id;
  1874. /* MSIX_IDX for Rx is 1 */
  1875. vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
  1876. ring->rx_vector_no);
  1877. napi_schedule(&ring->napi);
  1878. return IRQ_HANDLED;
  1879. }
  1880. static irqreturn_t
  1881. vxge_alarm_msix_handle(int irq, void *dev_id)
  1882. {
  1883. int i;
  1884. enum vxge_hw_status status;
  1885. struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
  1886. struct vxgedev *vdev = vpath->vdev;
  1887. int msix_id = (vpath->handle->vpath->vp_id *
  1888. VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
  1889. for (i = 0; i < vdev->no_of_vpath; i++) {
  1890. vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
  1891. status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
  1892. vdev->exec_mode);
  1893. if (status == VXGE_HW_OK) {
  1894. vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
  1895. msix_id);
  1896. continue;
  1897. }
  1898. vxge_debug_intr(VXGE_ERR,
  1899. "%s: vxge_hw_vpath_alarm_process failed %x ",
  1900. VXGE_DRIVER_NAME, status);
  1901. }
  1902. return IRQ_HANDLED;
  1903. }
  1904. static int vxge_alloc_msix(struct vxgedev *vdev)
  1905. {
  1906. int j, i, ret = 0;
  1907. int msix_intr_vect = 0, temp;
  1908. vdev->intr_cnt = 0;
  1909. start:
  1910. /* Tx/Rx MSIX Vectors count */
  1911. vdev->intr_cnt = vdev->no_of_vpath * 2;
  1912. /* Alarm MSIX Vectors count */
  1913. vdev->intr_cnt++;
  1914. vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
  1915. GFP_KERNEL);
  1916. if (!vdev->entries) {
  1917. vxge_debug_init(VXGE_ERR,
  1918. "%s: memory allocation failed",
  1919. VXGE_DRIVER_NAME);
  1920. return -ENOMEM;
  1921. }
  1922. vdev->vxge_entries =
  1923. kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
  1924. GFP_KERNEL);
  1925. if (!vdev->vxge_entries) {
  1926. vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
  1927. VXGE_DRIVER_NAME);
  1928. kfree(vdev->entries);
  1929. return -ENOMEM;
  1930. }
  1931. for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
  1932. msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
  1933. /* Initialize the fifo vector */
  1934. vdev->entries[j].entry = msix_intr_vect;
  1935. vdev->vxge_entries[j].entry = msix_intr_vect;
  1936. vdev->vxge_entries[j].in_use = 0;
  1937. j++;
  1938. /* Initialize the ring vector */
  1939. vdev->entries[j].entry = msix_intr_vect + 1;
  1940. vdev->vxge_entries[j].entry = msix_intr_vect + 1;
  1941. vdev->vxge_entries[j].in_use = 0;
  1942. j++;
  1943. }
  1944. /* Initialize the alarm vector */
  1945. vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
  1946. vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
  1947. vdev->vxge_entries[j].in_use = 0;
  1948. ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
  1949. if (ret > 0) {
  1950. vxge_debug_init(VXGE_ERR,
  1951. "%s: MSI-X enable failed for %d vectors, ret: %d",
  1952. VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
  1953. kfree(vdev->entries);
  1954. kfree(vdev->vxge_entries);
  1955. vdev->entries = NULL;
  1956. vdev->vxge_entries = NULL;
  1957. if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
  1958. return -ENODEV;
  1959. /* Try with less no of vector by reducing no of vpaths count */
  1960. temp = (ret - 1)/2;
  1961. vxge_close_vpaths(vdev, temp);
  1962. vdev->no_of_vpath = temp;
  1963. goto start;
  1964. } else if (ret < 0)
  1965. return -ENODEV;
  1966. return 0;
  1967. }
  1968. static int vxge_enable_msix(struct vxgedev *vdev)
  1969. {
  1970. int i, ret = 0;
  1971. /* 0 - Tx, 1 - Rx */
  1972. int tim_msix_id[4] = {0, 1, 0, 0};
  1973. vdev->intr_cnt = 0;
  1974. /* allocate msix vectors */
  1975. ret = vxge_alloc_msix(vdev);
  1976. if (!ret) {
  1977. for (i = 0; i < vdev->no_of_vpath; i++) {
  1978. /* If fifo or ring are not enabled
  1979. the MSIX vector for that should be set to 0
  1980. Hence initializeing this array to all 0s.
  1981. */
  1982. vdev->vpaths[i].ring.rx_vector_no =
  1983. (vdev->vpaths[i].device_id *
  1984. VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
  1985. vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
  1986. tim_msix_id, VXGE_ALARM_MSIX_ID);
  1987. }
  1988. }
  1989. return ret;
  1990. }
  1991. static void vxge_rem_msix_isr(struct vxgedev *vdev)
  1992. {
  1993. int intr_cnt;
  1994. for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
  1995. intr_cnt++) {
  1996. if (vdev->vxge_entries[intr_cnt].in_use) {
  1997. synchronize_irq(vdev->entries[intr_cnt].vector);
  1998. free_irq(vdev->entries[intr_cnt].vector,
  1999. vdev->vxge_entries[intr_cnt].arg);
  2000. vdev->vxge_entries[intr_cnt].in_use = 0;
  2001. }
  2002. }
  2003. kfree(vdev->entries);
  2004. kfree(vdev->vxge_entries);
  2005. vdev->entries = NULL;
  2006. vdev->vxge_entries = NULL;
  2007. if (vdev->config.intr_type == MSI_X)
  2008. pci_disable_msix(vdev->pdev);
  2009. }
  2010. #endif
  2011. static void vxge_rem_isr(struct vxgedev *vdev)
  2012. {
  2013. struct __vxge_hw_device *hldev;
  2014. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  2015. #ifdef CONFIG_PCI_MSI
  2016. if (vdev->config.intr_type == MSI_X) {
  2017. vxge_rem_msix_isr(vdev);
  2018. } else
  2019. #endif
  2020. if (vdev->config.intr_type == INTA) {
  2021. synchronize_irq(vdev->pdev->irq);
  2022. free_irq(vdev->pdev->irq, vdev);
  2023. }
  2024. }
  2025. static int vxge_add_isr(struct vxgedev *vdev)
  2026. {
  2027. int ret = 0;
  2028. #ifdef CONFIG_PCI_MSI
  2029. int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
  2030. int pci_fun = PCI_FUNC(vdev->pdev->devfn);
  2031. if (vdev->config.intr_type == MSI_X)
  2032. ret = vxge_enable_msix(vdev);
  2033. if (ret) {
  2034. vxge_debug_init(VXGE_ERR,
  2035. "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
  2036. vxge_debug_init(VXGE_ERR,
  2037. "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
  2038. vdev->config.intr_type = INTA;
  2039. }
  2040. if (vdev->config.intr_type == MSI_X) {
  2041. for (intr_idx = 0;
  2042. intr_idx < (vdev->no_of_vpath *
  2043. VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
  2044. msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
  2045. irq_req = 0;
  2046. switch (msix_idx) {
  2047. case 0:
  2048. snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
  2049. "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
  2050. vdev->ndev->name,
  2051. vdev->entries[intr_cnt].entry,
  2052. pci_fun, vp_idx);
  2053. ret = request_irq(
  2054. vdev->entries[intr_cnt].vector,
  2055. vxge_tx_msix_handle, 0,
  2056. vdev->desc[intr_cnt],
  2057. &vdev->vpaths[vp_idx].fifo);
  2058. vdev->vxge_entries[intr_cnt].arg =
  2059. &vdev->vpaths[vp_idx].fifo;
  2060. irq_req = 1;
  2061. break;
  2062. case 1:
  2063. snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
  2064. "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
  2065. vdev->ndev->name,
  2066. vdev->entries[intr_cnt].entry,
  2067. pci_fun, vp_idx);
  2068. ret = request_irq(
  2069. vdev->entries[intr_cnt].vector,
  2070. vxge_rx_msix_napi_handle,
  2071. 0,
  2072. vdev->desc[intr_cnt],
  2073. &vdev->vpaths[vp_idx].ring);
  2074. vdev->vxge_entries[intr_cnt].arg =
  2075. &vdev->vpaths[vp_idx].ring;
  2076. irq_req = 1;
  2077. break;
  2078. }
  2079. if (ret) {
  2080. vxge_debug_init(VXGE_ERR,
  2081. "%s: MSIX - %d Registration failed",
  2082. vdev->ndev->name, intr_cnt);
  2083. vxge_rem_msix_isr(vdev);
  2084. vdev->config.intr_type = INTA;
  2085. vxge_debug_init(VXGE_ERR,
  2086. "%s: Defaulting to INTA"
  2087. , vdev->ndev->name);
  2088. goto INTA_MODE;
  2089. }
  2090. if (irq_req) {
  2091. /* We requested for this msix interrupt */
  2092. vdev->vxge_entries[intr_cnt].in_use = 1;
  2093. msix_idx += vdev->vpaths[vp_idx].device_id *
  2094. VXGE_HW_VPATH_MSIX_ACTIVE;
  2095. vxge_hw_vpath_msix_unmask(
  2096. vdev->vpaths[vp_idx].handle,
  2097. msix_idx);
  2098. intr_cnt++;
  2099. }
  2100. /* Point to next vpath handler */
  2101. if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
  2102. (vp_idx < (vdev->no_of_vpath - 1)))
  2103. vp_idx++;
  2104. }
  2105. intr_cnt = vdev->no_of_vpath * 2;
  2106. snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
  2107. "%s:vxge:MSI-X %d - Alarm - fn:%d",
  2108. vdev->ndev->name,
  2109. vdev->entries[intr_cnt].entry,
  2110. pci_fun);
  2111. /* For Alarm interrupts */
  2112. ret = request_irq(vdev->entries[intr_cnt].vector,
  2113. vxge_alarm_msix_handle, 0,
  2114. vdev->desc[intr_cnt],
  2115. &vdev->vpaths[0]);
  2116. if (ret) {
  2117. vxge_debug_init(VXGE_ERR,
  2118. "%s: MSIX - %d Registration failed",
  2119. vdev->ndev->name, intr_cnt);
  2120. vxge_rem_msix_isr(vdev);
  2121. vdev->config.intr_type = INTA;
  2122. vxge_debug_init(VXGE_ERR,
  2123. "%s: Defaulting to INTA",
  2124. vdev->ndev->name);
  2125. goto INTA_MODE;
  2126. }
  2127. msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
  2128. VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
  2129. vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
  2130. msix_idx);
  2131. vdev->vxge_entries[intr_cnt].in_use = 1;
  2132. vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
  2133. }
  2134. INTA_MODE:
  2135. #endif
  2136. if (vdev->config.intr_type == INTA) {
  2137. snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
  2138. "%s:vxge:INTA", vdev->ndev->name);
  2139. vxge_hw_device_set_intr_type(vdev->devh,
  2140. VXGE_HW_INTR_MODE_IRQLINE);
  2141. vxge_hw_vpath_tti_ci_set(vdev->devh,
  2142. vdev->vpaths[0].device_id);
  2143. ret = request_irq((int) vdev->pdev->irq,
  2144. vxge_isr_napi,
  2145. IRQF_SHARED, vdev->desc[0], vdev);
  2146. if (ret) {
  2147. vxge_debug_init(VXGE_ERR,
  2148. "%s %s-%d: ISR registration failed",
  2149. VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
  2150. return -ENODEV;
  2151. }
  2152. vxge_debug_init(VXGE_TRACE,
  2153. "new %s-%d line allocated",
  2154. "IRQ", vdev->pdev->irq);
  2155. }
  2156. return VXGE_HW_OK;
  2157. }
  2158. static void vxge_poll_vp_reset(unsigned long data)
  2159. {
  2160. struct vxgedev *vdev = (struct vxgedev *)data;
  2161. int i, j = 0;
  2162. for (i = 0; i < vdev->no_of_vpath; i++) {
  2163. if (test_bit(i, &vdev->vp_reset)) {
  2164. vxge_reset_vpath(vdev, i);
  2165. j++;
  2166. }
  2167. }
  2168. if (j && (vdev->config.intr_type != MSI_X)) {
  2169. vxge_hw_device_unmask_all(vdev->devh);
  2170. vxge_hw_device_flush_io(vdev->devh);
  2171. }
  2172. mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
  2173. }
  2174. static void vxge_poll_vp_lockup(unsigned long data)
  2175. {
  2176. struct vxgedev *vdev = (struct vxgedev *)data;
  2177. int i;
  2178. struct vxge_ring *ring;
  2179. enum vxge_hw_status status = VXGE_HW_OK;
  2180. for (i = 0; i < vdev->no_of_vpath; i++) {
  2181. ring = &vdev->vpaths[i].ring;
  2182. /* Did this vpath received any packets */
  2183. if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
  2184. status = vxge_hw_vpath_check_leak(ring->handle);
  2185. /* Did it received any packets last time */
  2186. if ((VXGE_HW_FAIL == status) &&
  2187. (VXGE_HW_FAIL == ring->last_status)) {
  2188. /* schedule vpath reset */
  2189. if (!test_and_set_bit(i, &vdev->vp_reset)) {
  2190. /* disable interrupts for this vpath */
  2191. vxge_vpath_intr_disable(vdev, i);
  2192. /* stop the queue for this vpath */
  2193. vxge_stop_tx_queue(&vdev->vpaths[i].
  2194. fifo);
  2195. continue;
  2196. }
  2197. }
  2198. }
  2199. ring->stats.prev_rx_frms = ring->stats.rx_frms;
  2200. ring->last_status = status;
  2201. }
  2202. /* Check every 1 milli second */
  2203. mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
  2204. }
  2205. /**
  2206. * vxge_open
  2207. * @dev: pointer to the device structure.
  2208. *
  2209. * This function is the open entry point of the driver. It mainly calls a
  2210. * function to allocate Rx buffers and inserts them into the buffer
  2211. * descriptors and then enables the Rx part of the NIC.
  2212. * Return value: '0' on success and an appropriate (-)ve integer as
  2213. * defined in errno.h file on failure.
  2214. */
  2215. int
  2216. vxge_open(struct net_device *dev)
  2217. {
  2218. enum vxge_hw_status status;
  2219. struct vxgedev *vdev;
  2220. struct __vxge_hw_device *hldev;
  2221. int ret = 0;
  2222. int i;
  2223. u64 val64, function_mode;
  2224. vxge_debug_entryexit(VXGE_TRACE,
  2225. "%s: %s:%d", dev->name, __func__, __LINE__);
  2226. vdev = (struct vxgedev *)netdev_priv(dev);
  2227. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  2228. function_mode = vdev->config.device_hw_info.function_mode;
  2229. /* make sure you have link off by default every time Nic is
  2230. * initialized */
  2231. netif_carrier_off(dev);
  2232. /* Open VPATHs */
  2233. status = vxge_open_vpaths(vdev);
  2234. if (status != VXGE_HW_OK) {
  2235. vxge_debug_init(VXGE_ERR,
  2236. "%s: fatal: Vpath open failed", vdev->ndev->name);
  2237. ret = -EPERM;
  2238. goto out0;
  2239. }
  2240. vdev->mtu = dev->mtu;
  2241. status = vxge_add_isr(vdev);
  2242. if (status != VXGE_HW_OK) {
  2243. vxge_debug_init(VXGE_ERR,
  2244. "%s: fatal: ISR add failed", dev->name);
  2245. ret = -EPERM;
  2246. goto out1;
  2247. }
  2248. if (vdev->config.intr_type != MSI_X) {
  2249. netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
  2250. vdev->config.napi_weight);
  2251. napi_enable(&vdev->napi);
  2252. for (i = 0; i < vdev->no_of_vpath; i++)
  2253. vdev->vpaths[i].ring.napi_p = &vdev->napi;
  2254. } else {
  2255. for (i = 0; i < vdev->no_of_vpath; i++) {
  2256. netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
  2257. vxge_poll_msix, vdev->config.napi_weight);
  2258. napi_enable(&vdev->vpaths[i].ring.napi);
  2259. vdev->vpaths[i].ring.napi_p =
  2260. &vdev->vpaths[i].ring.napi;
  2261. }
  2262. }
  2263. /* configure RTH */
  2264. if (vdev->config.rth_steering) {
  2265. status = vxge_rth_configure(vdev);
  2266. if (status != VXGE_HW_OK) {
  2267. vxge_debug_init(VXGE_ERR,
  2268. "%s: fatal: RTH configuration failed",
  2269. dev->name);
  2270. ret = -EPERM;
  2271. goto out2;
  2272. }
  2273. }
  2274. for (i = 0; i < vdev->no_of_vpath; i++) {
  2275. /* set initial mtu before enabling the device */
  2276. status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle,
  2277. vdev->mtu);
  2278. if (status != VXGE_HW_OK) {
  2279. vxge_debug_init(VXGE_ERR,
  2280. "%s: fatal: can not set new MTU", dev->name);
  2281. ret = -EPERM;
  2282. goto out2;
  2283. }
  2284. }
  2285. VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
  2286. vxge_debug_init(vdev->level_trace,
  2287. "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
  2288. VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
  2289. /* Reprogram the DA table with populated mac addresses */
  2290. for (i = 0; i < vdev->no_of_vpath; i++) {
  2291. vxge_restore_vpath_mac_addr(&vdev->vpaths[i]);
  2292. vxge_restore_vpath_vid_table(&vdev->vpaths[i]);
  2293. }
  2294. /* Enable vpath to sniff all unicast/multicast traffic that not
  2295. * addressed to them. We allow promiscous mode for PF only
  2296. */
  2297. val64 = 0;
  2298. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2299. val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
  2300. vxge_hw_mgmt_reg_write(vdev->devh,
  2301. vxge_hw_mgmt_reg_type_mrpcim,
  2302. 0,
  2303. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2304. rxmac_authorize_all_addr),
  2305. val64);
  2306. vxge_hw_mgmt_reg_write(vdev->devh,
  2307. vxge_hw_mgmt_reg_type_mrpcim,
  2308. 0,
  2309. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2310. rxmac_authorize_all_vid),
  2311. val64);
  2312. vxge_set_multicast(dev);
  2313. /* Enabling Bcast and mcast for all vpath */
  2314. for (i = 0; i < vdev->no_of_vpath; i++) {
  2315. status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle);
  2316. if (status != VXGE_HW_OK)
  2317. vxge_debug_init(VXGE_ERR,
  2318. "%s : Can not enable bcast for vpath "
  2319. "id %d", dev->name, i);
  2320. if (vdev->config.addr_learn_en) {
  2321. status =
  2322. vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
  2323. if (status != VXGE_HW_OK)
  2324. vxge_debug_init(VXGE_ERR,
  2325. "%s : Can not enable mcast for vpath "
  2326. "id %d", dev->name, i);
  2327. }
  2328. }
  2329. vxge_hw_device_setpause_data(vdev->devh, 0,
  2330. vdev->config.tx_pause_enable,
  2331. vdev->config.rx_pause_enable);
  2332. if (vdev->vp_reset_timer.function == NULL)
  2333. vxge_os_timer(vdev->vp_reset_timer,
  2334. vxge_poll_vp_reset, vdev, (HZ/2));
  2335. if (vdev->vp_lockup_timer.function == NULL)
  2336. vxge_os_timer(vdev->vp_lockup_timer,
  2337. vxge_poll_vp_lockup, vdev, (HZ/2));
  2338. set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2339. smp_wmb();
  2340. if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
  2341. netif_carrier_on(vdev->ndev);
  2342. printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
  2343. vdev->stats.link_up++;
  2344. }
  2345. vxge_hw_device_intr_enable(vdev->devh);
  2346. smp_wmb();
  2347. for (i = 0; i < vdev->no_of_vpath; i++) {
  2348. vxge_hw_vpath_enable(vdev->vpaths[i].handle);
  2349. smp_wmb();
  2350. vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
  2351. }
  2352. vxge_start_all_tx_queue(vdev);
  2353. goto out0;
  2354. out2:
  2355. vxge_rem_isr(vdev);
  2356. /* Disable napi */
  2357. if (vdev->config.intr_type != MSI_X)
  2358. napi_disable(&vdev->napi);
  2359. else {
  2360. for (i = 0; i < vdev->no_of_vpath; i++)
  2361. napi_disable(&vdev->vpaths[i].ring.napi);
  2362. }
  2363. out1:
  2364. vxge_close_vpaths(vdev, 0);
  2365. out0:
  2366. vxge_debug_entryexit(VXGE_TRACE,
  2367. "%s: %s:%d Exiting...",
  2368. dev->name, __func__, __LINE__);
  2369. return ret;
  2370. }
  2371. /* Loop throught the mac address list and delete all the entries */
  2372. void vxge_free_mac_add_list(struct vxge_vpath *vpath)
  2373. {
  2374. struct list_head *entry, *next;
  2375. if (list_empty(&vpath->mac_addr_list))
  2376. return;
  2377. list_for_each_safe(entry, next, &vpath->mac_addr_list) {
  2378. list_del(entry);
  2379. kfree((struct vxge_mac_addrs *)entry);
  2380. }
  2381. }
  2382. static void vxge_napi_del_all(struct vxgedev *vdev)
  2383. {
  2384. int i;
  2385. if (vdev->config.intr_type != MSI_X)
  2386. netif_napi_del(&vdev->napi);
  2387. else {
  2388. for (i = 0; i < vdev->no_of_vpath; i++)
  2389. netif_napi_del(&vdev->vpaths[i].ring.napi);
  2390. }
  2391. return;
  2392. }
  2393. int do_vxge_close(struct net_device *dev, int do_io)
  2394. {
  2395. enum vxge_hw_status status;
  2396. struct vxgedev *vdev;
  2397. struct __vxge_hw_device *hldev;
  2398. int i;
  2399. u64 val64, vpath_vector;
  2400. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
  2401. dev->name, __func__, __LINE__);
  2402. vdev = (struct vxgedev *)netdev_priv(dev);
  2403. hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
  2404. if (unlikely(!is_vxge_card_up(vdev)))
  2405. return 0;
  2406. /* If vxge_handle_crit_err task is executing,
  2407. * wait till it completes. */
  2408. while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
  2409. msleep(50);
  2410. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2411. if (do_io) {
  2412. /* Put the vpath back in normal mode */
  2413. vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
  2414. status = vxge_hw_mgmt_reg_read(vdev->devh,
  2415. vxge_hw_mgmt_reg_type_mrpcim,
  2416. 0,
  2417. (ulong)offsetof(
  2418. struct vxge_hw_mrpcim_reg,
  2419. rts_mgr_cbasin_cfg),
  2420. &val64);
  2421. if (status == VXGE_HW_OK) {
  2422. val64 &= ~vpath_vector;
  2423. status = vxge_hw_mgmt_reg_write(vdev->devh,
  2424. vxge_hw_mgmt_reg_type_mrpcim,
  2425. 0,
  2426. (ulong)offsetof(
  2427. struct vxge_hw_mrpcim_reg,
  2428. rts_mgr_cbasin_cfg),
  2429. val64);
  2430. }
  2431. /* Remove the function 0 from promiscous mode */
  2432. vxge_hw_mgmt_reg_write(vdev->devh,
  2433. vxge_hw_mgmt_reg_type_mrpcim,
  2434. 0,
  2435. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2436. rxmac_authorize_all_addr),
  2437. 0);
  2438. vxge_hw_mgmt_reg_write(vdev->devh,
  2439. vxge_hw_mgmt_reg_type_mrpcim,
  2440. 0,
  2441. (ulong)offsetof(struct vxge_hw_mrpcim_reg,
  2442. rxmac_authorize_all_vid),
  2443. 0);
  2444. smp_wmb();
  2445. }
  2446. del_timer_sync(&vdev->vp_lockup_timer);
  2447. del_timer_sync(&vdev->vp_reset_timer);
  2448. /* Disable napi */
  2449. if (vdev->config.intr_type != MSI_X)
  2450. napi_disable(&vdev->napi);
  2451. else {
  2452. for (i = 0; i < vdev->no_of_vpath; i++)
  2453. napi_disable(&vdev->vpaths[i].ring.napi);
  2454. }
  2455. netif_carrier_off(vdev->ndev);
  2456. printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
  2457. vxge_stop_all_tx_queue(vdev);
  2458. /* Note that at this point xmit() is stopped by upper layer */
  2459. if (do_io)
  2460. vxge_hw_device_intr_disable(vdev->devh);
  2461. mdelay(1000);
  2462. vxge_rem_isr(vdev);
  2463. vxge_napi_del_all(vdev);
  2464. if (do_io)
  2465. vxge_reset_all_vpaths(vdev);
  2466. vxge_close_vpaths(vdev, 0);
  2467. vxge_debug_entryexit(VXGE_TRACE,
  2468. "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
  2469. clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
  2470. return 0;
  2471. }
  2472. /**
  2473. * vxge_close
  2474. * @dev: device pointer.
  2475. *
  2476. * This is the stop entry point of the driver. It needs to undo exactly
  2477. * whatever was done by the open entry point, thus it's usually referred to
  2478. * as the close function.Among other things this function mainly stops the
  2479. * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
  2480. * Return value: '0' on success and an appropriate (-)ve integer as
  2481. * defined in errno.h file on failure.
  2482. */
  2483. int
  2484. vxge_close(struct net_device *dev)
  2485. {
  2486. do_vxge_close(dev, 1);
  2487. return 0;
  2488. }
  2489. /**
  2490. * vxge_change_mtu
  2491. * @dev: net device pointer.
  2492. * @new_mtu :the new MTU size for the device.
  2493. *
  2494. * A driver entry point to change MTU size for the device. Before changing
  2495. * the MTU the device must be stopped.
  2496. */
  2497. static int vxge_change_mtu(struct net_device *dev, int new_mtu)
  2498. {
  2499. struct vxgedev *vdev = netdev_priv(dev);
  2500. vxge_debug_entryexit(vdev->level_trace,
  2501. "%s:%d", __func__, __LINE__);
  2502. if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
  2503. vxge_debug_init(vdev->level_err,
  2504. "%s: mtu size is invalid", dev->name);
  2505. return -EPERM;
  2506. }
  2507. /* check if device is down already */
  2508. if (unlikely(!is_vxge_card_up(vdev))) {
  2509. /* just store new value, will use later on open() */
  2510. dev->mtu = new_mtu;
  2511. vxge_debug_init(vdev->level_err,
  2512. "%s", "device is down on MTU change");
  2513. return 0;
  2514. }
  2515. vxge_debug_init(vdev->level_trace,
  2516. "trying to apply new MTU %d", new_mtu);
  2517. if (vxge_close(dev))
  2518. return -EIO;
  2519. dev->mtu = new_mtu;
  2520. vdev->mtu = new_mtu;
  2521. if (vxge_open(dev))
  2522. return -EIO;
  2523. vxge_debug_init(vdev->level_trace,
  2524. "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
  2525. vxge_debug_entryexit(vdev->level_trace,
  2526. "%s:%d Exiting...", __func__, __LINE__);
  2527. return 0;
  2528. }
  2529. /**
  2530. * vxge_get_stats
  2531. * @dev: pointer to the device structure
  2532. *
  2533. * Updates the device statistics structure. This function updates the device
  2534. * statistics structure in the net_device structure and returns a pointer
  2535. * to the same.
  2536. */
  2537. static struct net_device_stats *
  2538. vxge_get_stats(struct net_device *dev)
  2539. {
  2540. struct vxgedev *vdev;
  2541. struct net_device_stats *net_stats;
  2542. int k;
  2543. vdev = netdev_priv(dev);
  2544. net_stats = &vdev->stats.net_stats;
  2545. memset(net_stats, 0, sizeof(struct net_device_stats));
  2546. for (k = 0; k < vdev->no_of_vpath; k++) {
  2547. net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
  2548. net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
  2549. net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
  2550. net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
  2551. net_stats->rx_dropped +=
  2552. vdev->vpaths[k].ring.stats.rx_dropped;
  2553. net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
  2554. net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
  2555. net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
  2556. }
  2557. return net_stats;
  2558. }
  2559. /**
  2560. * vxge_ioctl
  2561. * @dev: Device pointer.
  2562. * @ifr: An IOCTL specific structure, that can contain a pointer to
  2563. * a proprietary structure used to pass information to the driver.
  2564. * @cmd: This is used to distinguish between the different commands that
  2565. * can be passed to the IOCTL functions.
  2566. *
  2567. * Entry point for the Ioctl.
  2568. */
  2569. static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2570. {
  2571. return -EOPNOTSUPP;
  2572. }
  2573. /**
  2574. * vxge_tx_watchdog
  2575. * @dev: pointer to net device structure
  2576. *
  2577. * Watchdog for transmit side.
  2578. * This function is triggered if the Tx Queue is stopped
  2579. * for a pre-defined amount of time when the Interface is still up.
  2580. */
  2581. static void
  2582. vxge_tx_watchdog(struct net_device *dev)
  2583. {
  2584. struct vxgedev *vdev;
  2585. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  2586. vdev = (struct vxgedev *)netdev_priv(dev);
  2587. vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
  2588. vxge_reset(vdev);
  2589. vxge_debug_entryexit(VXGE_TRACE,
  2590. "%s:%d Exiting...", __func__, __LINE__);
  2591. }
  2592. /**
  2593. * vxge_vlan_rx_register
  2594. * @dev: net device pointer.
  2595. * @grp: vlan group
  2596. *
  2597. * Vlan group registration
  2598. */
  2599. static void
  2600. vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  2601. {
  2602. struct vxgedev *vdev;
  2603. struct vxge_vpath *vpath;
  2604. int vp;
  2605. u64 vid;
  2606. enum vxge_hw_status status;
  2607. int i;
  2608. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  2609. vdev = (struct vxgedev *)netdev_priv(dev);
  2610. vpath = &vdev->vpaths[0];
  2611. if ((NULL == grp) && (vpath->is_open)) {
  2612. /* Get the first vlan */
  2613. status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
  2614. while (status == VXGE_HW_OK) {
  2615. /* Delete this vlan from the vid table */
  2616. for (vp = 0; vp < vdev->no_of_vpath; vp++) {
  2617. vpath = &vdev->vpaths[vp];
  2618. if (!vpath->is_open)
  2619. continue;
  2620. vxge_hw_vpath_vid_delete(vpath->handle, vid);
  2621. }
  2622. /* Get the next vlan to be deleted */
  2623. vpath = &vdev->vpaths[0];
  2624. status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
  2625. }
  2626. }
  2627. vdev->vlgrp = grp;
  2628. for (i = 0; i < vdev->no_of_vpath; i++) {
  2629. if (vdev->vpaths[i].is_configured)
  2630. vdev->vpaths[i].ring.vlgrp = grp;
  2631. }
  2632. vxge_debug_entryexit(VXGE_TRACE,
  2633. "%s:%d Exiting...", __func__, __LINE__);
  2634. }
  2635. /**
  2636. * vxge_vlan_rx_add_vid
  2637. * @dev: net device pointer.
  2638. * @vid: vid
  2639. *
  2640. * Add the vlan id to the devices vlan id table
  2641. */
  2642. static void
  2643. vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  2644. {
  2645. struct vxgedev *vdev;
  2646. struct vxge_vpath *vpath;
  2647. int vp_id;
  2648. vdev = (struct vxgedev *)netdev_priv(dev);
  2649. /* Add these vlan to the vid table */
  2650. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  2651. vpath = &vdev->vpaths[vp_id];
  2652. if (!vpath->is_open)
  2653. continue;
  2654. vxge_hw_vpath_vid_add(vpath->handle, vid);
  2655. }
  2656. }
  2657. /**
  2658. * vxge_vlan_rx_add_vid
  2659. * @dev: net device pointer.
  2660. * @vid: vid
  2661. *
  2662. * Remove the vlan id from the device's vlan id table
  2663. */
  2664. static void
  2665. vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  2666. {
  2667. struct vxgedev *vdev;
  2668. struct vxge_vpath *vpath;
  2669. int vp_id;
  2670. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  2671. vdev = (struct vxgedev *)netdev_priv(dev);
  2672. vlan_group_set_device(vdev->vlgrp, vid, NULL);
  2673. /* Delete this vlan from the vid table */
  2674. for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
  2675. vpath = &vdev->vpaths[vp_id];
  2676. if (!vpath->is_open)
  2677. continue;
  2678. vxge_hw_vpath_vid_delete(vpath->handle, vid);
  2679. }
  2680. vxge_debug_entryexit(VXGE_TRACE,
  2681. "%s:%d Exiting...", __func__, __LINE__);
  2682. }
  2683. static const struct net_device_ops vxge_netdev_ops = {
  2684. .ndo_open = vxge_open,
  2685. .ndo_stop = vxge_close,
  2686. .ndo_get_stats = vxge_get_stats,
  2687. .ndo_start_xmit = vxge_xmit,
  2688. .ndo_validate_addr = eth_validate_addr,
  2689. .ndo_set_multicast_list = vxge_set_multicast,
  2690. .ndo_do_ioctl = vxge_ioctl,
  2691. .ndo_set_mac_address = vxge_set_mac_addr,
  2692. .ndo_change_mtu = vxge_change_mtu,
  2693. .ndo_vlan_rx_register = vxge_vlan_rx_register,
  2694. .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
  2695. .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
  2696. .ndo_tx_timeout = vxge_tx_watchdog,
  2697. #ifdef CONFIG_NET_POLL_CONTROLLER
  2698. .ndo_poll_controller = vxge_netpoll,
  2699. #endif
  2700. };
  2701. int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
  2702. struct vxge_config *config,
  2703. int high_dma, int no_of_vpath,
  2704. struct vxgedev **vdev_out)
  2705. {
  2706. struct net_device *ndev;
  2707. enum vxge_hw_status status = VXGE_HW_OK;
  2708. struct vxgedev *vdev;
  2709. int i, ret = 0, no_of_queue = 1;
  2710. u64 stat;
  2711. *vdev_out = NULL;
  2712. if (config->tx_steering_type == TX_MULTIQ_STEERING)
  2713. no_of_queue = no_of_vpath;
  2714. ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
  2715. no_of_queue);
  2716. if (ndev == NULL) {
  2717. vxge_debug_init(
  2718. vxge_hw_device_trace_level_get(hldev),
  2719. "%s : device allocation failed", __func__);
  2720. ret = -ENODEV;
  2721. goto _out0;
  2722. }
  2723. vxge_debug_entryexit(
  2724. vxge_hw_device_trace_level_get(hldev),
  2725. "%s: %s:%d Entering...",
  2726. ndev->name, __func__, __LINE__);
  2727. vdev = netdev_priv(ndev);
  2728. memset(vdev, 0, sizeof(struct vxgedev));
  2729. vdev->ndev = ndev;
  2730. vdev->devh = hldev;
  2731. vdev->pdev = hldev->pdev;
  2732. memcpy(&vdev->config, config, sizeof(struct vxge_config));
  2733. vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
  2734. SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
  2735. ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  2736. NETIF_F_HW_VLAN_FILTER;
  2737. /* Driver entry points */
  2738. ndev->irq = vdev->pdev->irq;
  2739. ndev->base_addr = (unsigned long) hldev->bar0;
  2740. ndev->netdev_ops = &vxge_netdev_ops;
  2741. ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
  2742. initialize_ethtool_ops(ndev);
  2743. /* Allocate memory for vpath */
  2744. vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
  2745. no_of_vpath, GFP_KERNEL);
  2746. if (!vdev->vpaths) {
  2747. vxge_debug_init(VXGE_ERR,
  2748. "%s: vpath memory allocation failed",
  2749. vdev->ndev->name);
  2750. ret = -ENODEV;
  2751. goto _out1;
  2752. }
  2753. ndev->features |= NETIF_F_SG;
  2754. ndev->features |= NETIF_F_HW_CSUM;
  2755. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2756. "%s : checksuming enabled", __func__);
  2757. if (high_dma) {
  2758. ndev->features |= NETIF_F_HIGHDMA;
  2759. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2760. "%s : using High DMA", __func__);
  2761. }
  2762. ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
  2763. if (vdev->config.gro_enable)
  2764. ndev->features |= NETIF_F_GRO;
  2765. if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
  2766. ndev->real_num_tx_queues = no_of_vpath;
  2767. #ifdef NETIF_F_LLTX
  2768. ndev->features |= NETIF_F_LLTX;
  2769. #endif
  2770. for (i = 0; i < no_of_vpath; i++)
  2771. spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
  2772. if (register_netdev(ndev)) {
  2773. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2774. "%s: %s : device registration failed!",
  2775. ndev->name, __func__);
  2776. ret = -ENODEV;
  2777. goto _out2;
  2778. }
  2779. /* Set the factory defined MAC address initially */
  2780. ndev->addr_len = ETH_ALEN;
  2781. /* Make Link state as off at this point, when the Link change
  2782. * interrupt comes the state will be automatically changed to
  2783. * the right state.
  2784. */
  2785. netif_carrier_off(ndev);
  2786. vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
  2787. "%s: Ethernet device registered",
  2788. ndev->name);
  2789. *vdev_out = vdev;
  2790. /* Resetting the Device stats */
  2791. status = vxge_hw_mrpcim_stats_access(
  2792. hldev,
  2793. VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
  2794. 0,
  2795. 0,
  2796. &stat);
  2797. if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
  2798. vxge_debug_init(
  2799. vxge_hw_device_trace_level_get(hldev),
  2800. "%s: device stats clear returns"
  2801. "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
  2802. vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
  2803. "%s: %s:%d Exiting...",
  2804. ndev->name, __func__, __LINE__);
  2805. return ret;
  2806. _out2:
  2807. kfree(vdev->vpaths);
  2808. _out1:
  2809. free_netdev(ndev);
  2810. _out0:
  2811. return ret;
  2812. }
  2813. /*
  2814. * vxge_device_unregister
  2815. *
  2816. * This function will unregister and free network device
  2817. */
  2818. void
  2819. vxge_device_unregister(struct __vxge_hw_device *hldev)
  2820. {
  2821. struct vxgedev *vdev;
  2822. struct net_device *dev;
  2823. char buf[IFNAMSIZ];
  2824. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  2825. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  2826. u32 level_trace;
  2827. #endif
  2828. dev = hldev->ndev;
  2829. vdev = netdev_priv(dev);
  2830. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  2831. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  2832. level_trace = vdev->level_trace;
  2833. #endif
  2834. vxge_debug_entryexit(level_trace,
  2835. "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
  2836. memcpy(buf, vdev->ndev->name, IFNAMSIZ);
  2837. /* in 2.6 will call stop() if device is up */
  2838. unregister_netdev(dev);
  2839. flush_scheduled_work();
  2840. vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
  2841. vxge_debug_entryexit(level_trace,
  2842. "%s: %s:%d Exiting...", buf, __func__, __LINE__);
  2843. }
  2844. /*
  2845. * vxge_callback_crit_err
  2846. *
  2847. * This function is called by the alarm handler in interrupt context.
  2848. * Driver must analyze it based on the event type.
  2849. */
  2850. static void
  2851. vxge_callback_crit_err(struct __vxge_hw_device *hldev,
  2852. enum vxge_hw_event type, u64 vp_id)
  2853. {
  2854. struct net_device *dev = hldev->ndev;
  2855. struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
  2856. int vpath_idx;
  2857. vxge_debug_entryexit(vdev->level_trace,
  2858. "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
  2859. /* Note: This event type should be used for device wide
  2860. * indications only - Serious errors, Slot freeze and critical errors
  2861. */
  2862. vdev->cric_err_event = type;
  2863. for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
  2864. if (vdev->vpaths[vpath_idx].device_id == vp_id)
  2865. break;
  2866. if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
  2867. if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
  2868. vxge_debug_init(VXGE_ERR,
  2869. "%s: Slot is frozen", vdev->ndev->name);
  2870. } else if (type == VXGE_HW_EVENT_SERR) {
  2871. vxge_debug_init(VXGE_ERR,
  2872. "%s: Encountered Serious Error",
  2873. vdev->ndev->name);
  2874. } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
  2875. vxge_debug_init(VXGE_ERR,
  2876. "%s: Encountered Critical Error",
  2877. vdev->ndev->name);
  2878. }
  2879. if ((type == VXGE_HW_EVENT_SERR) ||
  2880. (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
  2881. if (unlikely(vdev->exec_mode))
  2882. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2883. } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
  2884. vxge_hw_device_mask_all(hldev);
  2885. if (unlikely(vdev->exec_mode))
  2886. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2887. } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
  2888. (type == VXGE_HW_EVENT_VPATH_ERR)) {
  2889. if (unlikely(vdev->exec_mode))
  2890. clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  2891. else {
  2892. /* check if this vpath is already set for reset */
  2893. if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
  2894. /* disable interrupts for this vpath */
  2895. vxge_vpath_intr_disable(vdev, vpath_idx);
  2896. /* stop the queue for this vpath */
  2897. vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
  2898. fifo);
  2899. }
  2900. }
  2901. }
  2902. vxge_debug_entryexit(vdev->level_trace,
  2903. "%s: %s:%d Exiting...",
  2904. vdev->ndev->name, __func__, __LINE__);
  2905. }
  2906. static void verify_bandwidth(void)
  2907. {
  2908. int i, band_width, total = 0, equal_priority = 0;
  2909. /* 1. If user enters 0 for some fifo, give equal priority to all */
  2910. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2911. if (bw_percentage[i] == 0) {
  2912. equal_priority = 1;
  2913. break;
  2914. }
  2915. }
  2916. if (!equal_priority) {
  2917. /* 2. If sum exceeds 100, give equal priority to all */
  2918. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  2919. if (bw_percentage[i] == 0xFF)
  2920. break;
  2921. total += bw_percentage[i];
  2922. if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
  2923. equal_priority = 1;
  2924. break;
  2925. }
  2926. }
  2927. }
  2928. if (!equal_priority) {
  2929. /* Is all the bandwidth consumed? */
  2930. if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
  2931. if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
  2932. /* Split rest of bw equally among next VPs*/
  2933. band_width =
  2934. (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
  2935. (VXGE_HW_MAX_VIRTUAL_PATHS - i);
  2936. if (band_width < 2) /* min of 2% */
  2937. equal_priority = 1;
  2938. else {
  2939. for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
  2940. i++)
  2941. bw_percentage[i] =
  2942. band_width;
  2943. }
  2944. }
  2945. } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
  2946. equal_priority = 1;
  2947. }
  2948. if (equal_priority) {
  2949. vxge_debug_init(VXGE_ERR,
  2950. "%s: Assigning equal bandwidth to all the vpaths",
  2951. VXGE_DRIVER_NAME);
  2952. bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
  2953. VXGE_HW_MAX_VIRTUAL_PATHS;
  2954. for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2955. bw_percentage[i] = bw_percentage[0];
  2956. }
  2957. return;
  2958. }
  2959. /*
  2960. * Vpath configuration
  2961. */
  2962. static int __devinit vxge_config_vpaths(
  2963. struct vxge_hw_device_config *device_config,
  2964. u64 vpath_mask, struct vxge_config *config_param)
  2965. {
  2966. int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
  2967. u32 txdl_size, txdl_per_memblock;
  2968. temp = driver_config->vpath_per_dev;
  2969. if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
  2970. (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
  2971. /* No more CPU. Return vpath number as zero.*/
  2972. if (driver_config->g_no_cpus == -1)
  2973. return 0;
  2974. if (!driver_config->g_no_cpus)
  2975. driver_config->g_no_cpus = num_online_cpus();
  2976. driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
  2977. if (!driver_config->vpath_per_dev)
  2978. driver_config->vpath_per_dev = 1;
  2979. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  2980. if (!vxge_bVALn(vpath_mask, i, 1))
  2981. continue;
  2982. else
  2983. default_no_vpath++;
  2984. if (default_no_vpath < driver_config->vpath_per_dev)
  2985. driver_config->vpath_per_dev = default_no_vpath;
  2986. driver_config->g_no_cpus = driver_config->g_no_cpus -
  2987. (driver_config->vpath_per_dev * 2);
  2988. if (driver_config->g_no_cpus <= 0)
  2989. driver_config->g_no_cpus = -1;
  2990. }
  2991. if (driver_config->vpath_per_dev == 1) {
  2992. vxge_debug_ll_config(VXGE_TRACE,
  2993. "%s: Disable tx and rx steering, "
  2994. "as single vpath is configured", VXGE_DRIVER_NAME);
  2995. config_param->rth_steering = NO_STEERING;
  2996. config_param->tx_steering_type = NO_STEERING;
  2997. device_config->rth_en = 0;
  2998. }
  2999. /* configure bandwidth */
  3000. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  3001. device_config->vp_config[i].min_bandwidth = bw_percentage[i];
  3002. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3003. device_config->vp_config[i].vp_id = i;
  3004. device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
  3005. if (no_of_vpaths < driver_config->vpath_per_dev) {
  3006. if (!vxge_bVALn(vpath_mask, i, 1)) {
  3007. vxge_debug_ll_config(VXGE_TRACE,
  3008. "%s: vpath: %d is not available",
  3009. VXGE_DRIVER_NAME, i);
  3010. continue;
  3011. } else {
  3012. vxge_debug_ll_config(VXGE_TRACE,
  3013. "%s: vpath: %d available",
  3014. VXGE_DRIVER_NAME, i);
  3015. no_of_vpaths++;
  3016. }
  3017. } else {
  3018. vxge_debug_ll_config(VXGE_TRACE,
  3019. "%s: vpath: %d is not configured, "
  3020. "max_config_vpath exceeded",
  3021. VXGE_DRIVER_NAME, i);
  3022. break;
  3023. }
  3024. /* Configure Tx fifo's */
  3025. device_config->vp_config[i].fifo.enable =
  3026. VXGE_HW_FIFO_ENABLE;
  3027. device_config->vp_config[i].fifo.max_frags =
  3028. MAX_SKB_FRAGS + 1;
  3029. device_config->vp_config[i].fifo.memblock_size =
  3030. VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
  3031. txdl_size = device_config->vp_config[i].fifo.max_frags *
  3032. sizeof(struct vxge_hw_fifo_txd);
  3033. txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
  3034. device_config->vp_config[i].fifo.fifo_blocks =
  3035. ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
  3036. device_config->vp_config[i].fifo.intr =
  3037. VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
  3038. /* Configure tti properties */
  3039. device_config->vp_config[i].tti.intr_enable =
  3040. VXGE_HW_TIM_INTR_ENABLE;
  3041. device_config->vp_config[i].tti.btimer_val =
  3042. (VXGE_TTI_BTIMER_VAL * 1000) / 272;
  3043. device_config->vp_config[i].tti.timer_ac_en =
  3044. VXGE_HW_TIM_TIMER_AC_ENABLE;
  3045. /* For msi-x with napi (each vector
  3046. has a handler of its own) -
  3047. Set CI to OFF for all vpaths */
  3048. device_config->vp_config[i].tti.timer_ci_en =
  3049. VXGE_HW_TIM_TIMER_CI_DISABLE;
  3050. device_config->vp_config[i].tti.timer_ri_en =
  3051. VXGE_HW_TIM_TIMER_RI_DISABLE;
  3052. device_config->vp_config[i].tti.util_sel =
  3053. VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
  3054. device_config->vp_config[i].tti.ltimer_val =
  3055. (VXGE_TTI_LTIMER_VAL * 1000) / 272;
  3056. device_config->vp_config[i].tti.rtimer_val =
  3057. (VXGE_TTI_RTIMER_VAL * 1000) / 272;
  3058. device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
  3059. device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
  3060. device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
  3061. device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
  3062. device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
  3063. device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
  3064. device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
  3065. /* Configure Rx rings */
  3066. device_config->vp_config[i].ring.enable =
  3067. VXGE_HW_RING_ENABLE;
  3068. device_config->vp_config[i].ring.ring_blocks =
  3069. VXGE_HW_DEF_RING_BLOCKS;
  3070. device_config->vp_config[i].ring.buffer_mode =
  3071. VXGE_HW_RING_RXD_BUFFER_MODE_1;
  3072. device_config->vp_config[i].ring.rxds_limit =
  3073. VXGE_HW_DEF_RING_RXDS_LIMIT;
  3074. device_config->vp_config[i].ring.scatter_mode =
  3075. VXGE_HW_RING_SCATTER_MODE_A;
  3076. /* Configure rti properties */
  3077. device_config->vp_config[i].rti.intr_enable =
  3078. VXGE_HW_TIM_INTR_ENABLE;
  3079. device_config->vp_config[i].rti.btimer_val =
  3080. (VXGE_RTI_BTIMER_VAL * 1000)/272;
  3081. device_config->vp_config[i].rti.timer_ac_en =
  3082. VXGE_HW_TIM_TIMER_AC_ENABLE;
  3083. device_config->vp_config[i].rti.timer_ci_en =
  3084. VXGE_HW_TIM_TIMER_CI_DISABLE;
  3085. device_config->vp_config[i].rti.timer_ri_en =
  3086. VXGE_HW_TIM_TIMER_RI_DISABLE;
  3087. device_config->vp_config[i].rti.util_sel =
  3088. VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
  3089. device_config->vp_config[i].rti.urange_a =
  3090. RTI_RX_URANGE_A;
  3091. device_config->vp_config[i].rti.urange_b =
  3092. RTI_RX_URANGE_B;
  3093. device_config->vp_config[i].rti.urange_c =
  3094. RTI_RX_URANGE_C;
  3095. device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
  3096. device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
  3097. device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
  3098. device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
  3099. device_config->vp_config[i].rti.rtimer_val =
  3100. (VXGE_RTI_RTIMER_VAL * 1000) / 272;
  3101. device_config->vp_config[i].rti.ltimer_val =
  3102. (VXGE_RTI_LTIMER_VAL * 1000) / 272;
  3103. device_config->vp_config[i].rpa_strip_vlan_tag =
  3104. vlan_tag_strip;
  3105. }
  3106. driver_config->vpath_per_dev = temp;
  3107. return no_of_vpaths;
  3108. }
  3109. /* initialize device configuratrions */
  3110. static void __devinit vxge_device_config_init(
  3111. struct vxge_hw_device_config *device_config,
  3112. int *intr_type)
  3113. {
  3114. /* Used for CQRQ/SRQ. */
  3115. device_config->dma_blockpool_initial =
  3116. VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
  3117. device_config->dma_blockpool_max =
  3118. VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
  3119. if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
  3120. max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
  3121. #ifndef CONFIG_PCI_MSI
  3122. vxge_debug_init(VXGE_ERR,
  3123. "%s: This Kernel does not support "
  3124. "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
  3125. *intr_type = INTA;
  3126. #endif
  3127. /* Configure whether MSI-X or IRQL. */
  3128. switch (*intr_type) {
  3129. case INTA:
  3130. device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  3131. break;
  3132. case MSI_X:
  3133. device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
  3134. break;
  3135. }
  3136. /* Timer period between device poll */
  3137. device_config->device_poll_millis = VXGE_TIMER_DELAY;
  3138. /* Configure mac based steering. */
  3139. device_config->rts_mac_en = addr_learn_en;
  3140. /* Configure Vpaths */
  3141. device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
  3142. vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
  3143. __func__);
  3144. vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
  3145. device_config->dma_blockpool_initial);
  3146. vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
  3147. device_config->dma_blockpool_max);
  3148. vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
  3149. device_config->intr_mode);
  3150. vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
  3151. device_config->device_poll_millis);
  3152. vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
  3153. device_config->rts_mac_en);
  3154. vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
  3155. device_config->rth_en);
  3156. vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
  3157. device_config->rth_it_type);
  3158. }
  3159. static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
  3160. {
  3161. int i;
  3162. vxge_debug_init(VXGE_TRACE,
  3163. "%s: %d Vpath(s) opened",
  3164. vdev->ndev->name, vdev->no_of_vpath);
  3165. switch (vdev->config.intr_type) {
  3166. case INTA:
  3167. vxge_debug_init(VXGE_TRACE,
  3168. "%s: Interrupt type INTA", vdev->ndev->name);
  3169. break;
  3170. case MSI_X:
  3171. vxge_debug_init(VXGE_TRACE,
  3172. "%s: Interrupt type MSI-X", vdev->ndev->name);
  3173. break;
  3174. }
  3175. if (vdev->config.rth_steering) {
  3176. vxge_debug_init(VXGE_TRACE,
  3177. "%s: RTH steering enabled for TCP_IPV4",
  3178. vdev->ndev->name);
  3179. } else {
  3180. vxge_debug_init(VXGE_TRACE,
  3181. "%s: RTH steering disabled", vdev->ndev->name);
  3182. }
  3183. switch (vdev->config.tx_steering_type) {
  3184. case NO_STEERING:
  3185. vxge_debug_init(VXGE_TRACE,
  3186. "%s: Tx steering disabled", vdev->ndev->name);
  3187. break;
  3188. case TX_PRIORITY_STEERING:
  3189. vxge_debug_init(VXGE_TRACE,
  3190. "%s: Unsupported tx steering option",
  3191. vdev->ndev->name);
  3192. vxge_debug_init(VXGE_TRACE,
  3193. "%s: Tx steering disabled", vdev->ndev->name);
  3194. vdev->config.tx_steering_type = 0;
  3195. break;
  3196. case TX_VLAN_STEERING:
  3197. vxge_debug_init(VXGE_TRACE,
  3198. "%s: Unsupported tx steering option",
  3199. vdev->ndev->name);
  3200. vxge_debug_init(VXGE_TRACE,
  3201. "%s: Tx steering disabled", vdev->ndev->name);
  3202. vdev->config.tx_steering_type = 0;
  3203. break;
  3204. case TX_MULTIQ_STEERING:
  3205. vxge_debug_init(VXGE_TRACE,
  3206. "%s: Tx multiqueue steering enabled",
  3207. vdev->ndev->name);
  3208. break;
  3209. case TX_PORT_STEERING:
  3210. vxge_debug_init(VXGE_TRACE,
  3211. "%s: Tx port steering enabled",
  3212. vdev->ndev->name);
  3213. break;
  3214. default:
  3215. vxge_debug_init(VXGE_ERR,
  3216. "%s: Unsupported tx steering type",
  3217. vdev->ndev->name);
  3218. vxge_debug_init(VXGE_TRACE,
  3219. "%s: Tx steering disabled", vdev->ndev->name);
  3220. vdev->config.tx_steering_type = 0;
  3221. }
  3222. if (vdev->config.gro_enable) {
  3223. vxge_debug_init(VXGE_ERR,
  3224. "%s: Generic receive offload enabled",
  3225. vdev->ndev->name);
  3226. } else
  3227. vxge_debug_init(VXGE_TRACE,
  3228. "%s: Generic receive offload disabled",
  3229. vdev->ndev->name);
  3230. if (vdev->config.addr_learn_en)
  3231. vxge_debug_init(VXGE_TRACE,
  3232. "%s: MAC Address learning enabled", vdev->ndev->name);
  3233. vxge_debug_init(VXGE_TRACE,
  3234. "%s: Rx doorbell mode enabled", vdev->ndev->name);
  3235. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3236. if (!vxge_bVALn(vpath_mask, i, 1))
  3237. continue;
  3238. vxge_debug_ll_config(VXGE_TRACE,
  3239. "%s: MTU size - %d", vdev->ndev->name,
  3240. ((struct __vxge_hw_device *)(vdev->devh))->
  3241. config.vp_config[i].mtu);
  3242. vxge_debug_init(VXGE_TRACE,
  3243. "%s: VLAN tag stripping %s", vdev->ndev->name,
  3244. ((struct __vxge_hw_device *)(vdev->devh))->
  3245. config.vp_config[i].rpa_strip_vlan_tag
  3246. ? "Enabled" : "Disabled");
  3247. vxge_debug_init(VXGE_TRACE,
  3248. "%s: Ring blocks : %d", vdev->ndev->name,
  3249. ((struct __vxge_hw_device *)(vdev->devh))->
  3250. config.vp_config[i].ring.ring_blocks);
  3251. vxge_debug_init(VXGE_TRACE,
  3252. "%s: Fifo blocks : %d", vdev->ndev->name,
  3253. ((struct __vxge_hw_device *)(vdev->devh))->
  3254. config.vp_config[i].fifo.fifo_blocks);
  3255. vxge_debug_ll_config(VXGE_TRACE,
  3256. "%s: Max frags : %d", vdev->ndev->name,
  3257. ((struct __vxge_hw_device *)(vdev->devh))->
  3258. config.vp_config[i].fifo.max_frags);
  3259. break;
  3260. }
  3261. }
  3262. #ifdef CONFIG_PM
  3263. /**
  3264. * vxge_pm_suspend - vxge power management suspend entry point
  3265. *
  3266. */
  3267. static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
  3268. {
  3269. return -ENOSYS;
  3270. }
  3271. /**
  3272. * vxge_pm_resume - vxge power management resume entry point
  3273. *
  3274. */
  3275. static int vxge_pm_resume(struct pci_dev *pdev)
  3276. {
  3277. return -ENOSYS;
  3278. }
  3279. #endif
  3280. /**
  3281. * vxge_io_error_detected - called when PCI error is detected
  3282. * @pdev: Pointer to PCI device
  3283. * @state: The current pci connection state
  3284. *
  3285. * This function is called after a PCI bus error affecting
  3286. * this device has been detected.
  3287. */
  3288. static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
  3289. pci_channel_state_t state)
  3290. {
  3291. struct __vxge_hw_device *hldev =
  3292. (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3293. struct net_device *netdev = hldev->ndev;
  3294. netif_device_detach(netdev);
  3295. if (state == pci_channel_io_perm_failure)
  3296. return PCI_ERS_RESULT_DISCONNECT;
  3297. if (netif_running(netdev)) {
  3298. /* Bring down the card, while avoiding PCI I/O */
  3299. do_vxge_close(netdev, 0);
  3300. }
  3301. pci_disable_device(pdev);
  3302. return PCI_ERS_RESULT_NEED_RESET;
  3303. }
  3304. /**
  3305. * vxge_io_slot_reset - called after the pci bus has been reset.
  3306. * @pdev: Pointer to PCI device
  3307. *
  3308. * Restart the card from scratch, as if from a cold-boot.
  3309. * At this point, the card has exprienced a hard reset,
  3310. * followed by fixups by BIOS, and has its config space
  3311. * set up identically to what it was at cold boot.
  3312. */
  3313. static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
  3314. {
  3315. struct __vxge_hw_device *hldev =
  3316. (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3317. struct net_device *netdev = hldev->ndev;
  3318. struct vxgedev *vdev = netdev_priv(netdev);
  3319. if (pci_enable_device(pdev)) {
  3320. printk(KERN_ERR "%s: "
  3321. "Cannot re-enable device after reset\n",
  3322. VXGE_DRIVER_NAME);
  3323. return PCI_ERS_RESULT_DISCONNECT;
  3324. }
  3325. pci_set_master(pdev);
  3326. vxge_reset(vdev);
  3327. return PCI_ERS_RESULT_RECOVERED;
  3328. }
  3329. /**
  3330. * vxge_io_resume - called when traffic can start flowing again.
  3331. * @pdev: Pointer to PCI device
  3332. *
  3333. * This callback is called when the error recovery driver tells
  3334. * us that its OK to resume normal operation.
  3335. */
  3336. static void vxge_io_resume(struct pci_dev *pdev)
  3337. {
  3338. struct __vxge_hw_device *hldev =
  3339. (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3340. struct net_device *netdev = hldev->ndev;
  3341. if (netif_running(netdev)) {
  3342. if (vxge_open(netdev)) {
  3343. printk(KERN_ERR "%s: "
  3344. "Can't bring device back up after reset\n",
  3345. VXGE_DRIVER_NAME);
  3346. return;
  3347. }
  3348. }
  3349. netif_device_attach(netdev);
  3350. }
  3351. static inline u32 vxge_get_num_vfs(u64 function_mode)
  3352. {
  3353. u32 num_functions = 0;
  3354. switch (function_mode) {
  3355. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
  3356. case VXGE_HW_FUNCTION_MODE_SRIOV_8:
  3357. num_functions = 8;
  3358. break;
  3359. case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
  3360. num_functions = 1;
  3361. break;
  3362. case VXGE_HW_FUNCTION_MODE_SRIOV:
  3363. case VXGE_HW_FUNCTION_MODE_MRIOV:
  3364. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
  3365. num_functions = 17;
  3366. break;
  3367. case VXGE_HW_FUNCTION_MODE_SRIOV_4:
  3368. num_functions = 4;
  3369. break;
  3370. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
  3371. num_functions = 2;
  3372. break;
  3373. case VXGE_HW_FUNCTION_MODE_MRIOV_8:
  3374. num_functions = 8; /* TODO */
  3375. break;
  3376. }
  3377. return num_functions;
  3378. }
  3379. /**
  3380. * vxge_probe
  3381. * @pdev : structure containing the PCI related information of the device.
  3382. * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
  3383. * Description:
  3384. * This function is called when a new PCI device gets detected and initializes
  3385. * it.
  3386. * Return value:
  3387. * returns 0 on success and negative on failure.
  3388. *
  3389. */
  3390. static int __devinit
  3391. vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
  3392. {
  3393. struct __vxge_hw_device *hldev;
  3394. enum vxge_hw_status status;
  3395. int ret;
  3396. int high_dma = 0;
  3397. u64 vpath_mask = 0;
  3398. struct vxgedev *vdev;
  3399. struct vxge_config ll_config;
  3400. struct vxge_hw_device_config *device_config = NULL;
  3401. struct vxge_hw_device_attr attr;
  3402. int i, j, no_of_vpath = 0, max_vpath_supported = 0;
  3403. u8 *macaddr;
  3404. struct vxge_mac_addrs *entry;
  3405. static int bus = -1, device = -1;
  3406. u32 host_type;
  3407. u8 new_device = 0;
  3408. enum vxge_hw_status is_privileged;
  3409. u32 function_mode;
  3410. u32 num_vfs = 0;
  3411. vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
  3412. attr.pdev = pdev;
  3413. /* In SRIOV-17 mode, functions of the same adapter
  3414. * can be deployed on different buses */
  3415. if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
  3416. (device != PCI_SLOT(pdev->devfn))))
  3417. new_device = 1;
  3418. bus = pdev->bus->number;
  3419. device = PCI_SLOT(pdev->devfn);
  3420. if (new_device) {
  3421. if (driver_config->config_dev_cnt &&
  3422. (driver_config->config_dev_cnt !=
  3423. driver_config->total_dev_cnt))
  3424. vxge_debug_init(VXGE_ERR,
  3425. "%s: Configured %d of %d devices",
  3426. VXGE_DRIVER_NAME,
  3427. driver_config->config_dev_cnt,
  3428. driver_config->total_dev_cnt);
  3429. driver_config->config_dev_cnt = 0;
  3430. driver_config->total_dev_cnt = 0;
  3431. }
  3432. /* Now making the CPU based no of vpath calculation
  3433. * applicable for individual functions as well.
  3434. */
  3435. driver_config->g_no_cpus = 0;
  3436. driver_config->vpath_per_dev = max_config_vpath;
  3437. driver_config->total_dev_cnt++;
  3438. if (++driver_config->config_dev_cnt > max_config_dev) {
  3439. ret = 0;
  3440. goto _exit0;
  3441. }
  3442. device_config = kzalloc(sizeof(struct vxge_hw_device_config),
  3443. GFP_KERNEL);
  3444. if (!device_config) {
  3445. ret = -ENOMEM;
  3446. vxge_debug_init(VXGE_ERR,
  3447. "device_config : malloc failed %s %d",
  3448. __FILE__, __LINE__);
  3449. goto _exit0;
  3450. }
  3451. memset(&ll_config, 0, sizeof(struct vxge_config));
  3452. ll_config.tx_steering_type = TX_MULTIQ_STEERING;
  3453. ll_config.intr_type = MSI_X;
  3454. ll_config.napi_weight = NEW_NAPI_WEIGHT;
  3455. ll_config.rth_steering = RTH_STEERING;
  3456. /* get the default configuration parameters */
  3457. vxge_hw_device_config_default_get(device_config);
  3458. /* initialize configuration parameters */
  3459. vxge_device_config_init(device_config, &ll_config.intr_type);
  3460. ret = pci_enable_device(pdev);
  3461. if (ret) {
  3462. vxge_debug_init(VXGE_ERR,
  3463. "%s : can not enable PCI device", __func__);
  3464. goto _exit0;
  3465. }
  3466. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3467. vxge_debug_ll_config(VXGE_TRACE,
  3468. "%s : using 64bit DMA", __func__);
  3469. high_dma = 1;
  3470. if (pci_set_consistent_dma_mask(pdev,
  3471. DMA_BIT_MASK(64))) {
  3472. vxge_debug_init(VXGE_ERR,
  3473. "%s : unable to obtain 64bit DMA for "
  3474. "consistent allocations", __func__);
  3475. ret = -ENOMEM;
  3476. goto _exit1;
  3477. }
  3478. } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  3479. vxge_debug_ll_config(VXGE_TRACE,
  3480. "%s : using 32bit DMA", __func__);
  3481. } else {
  3482. ret = -ENOMEM;
  3483. goto _exit1;
  3484. }
  3485. if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
  3486. vxge_debug_init(VXGE_ERR,
  3487. "%s : request regions failed", __func__);
  3488. ret = -ENODEV;
  3489. goto _exit1;
  3490. }
  3491. pci_set_master(pdev);
  3492. attr.bar0 = pci_ioremap_bar(pdev, 0);
  3493. if (!attr.bar0) {
  3494. vxge_debug_init(VXGE_ERR,
  3495. "%s : cannot remap io memory bar0", __func__);
  3496. ret = -ENODEV;
  3497. goto _exit2;
  3498. }
  3499. vxge_debug_ll_config(VXGE_TRACE,
  3500. "pci ioremap bar0: %p:0x%llx",
  3501. attr.bar0,
  3502. (unsigned long long)pci_resource_start(pdev, 0));
  3503. status = vxge_hw_device_hw_info_get(attr.bar0,
  3504. &ll_config.device_hw_info);
  3505. if (status != VXGE_HW_OK) {
  3506. vxge_debug_init(VXGE_ERR,
  3507. "%s: Reading of hardware info failed."
  3508. "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
  3509. ret = -EINVAL;
  3510. goto _exit3;
  3511. }
  3512. if (ll_config.device_hw_info.fw_version.major !=
  3513. VXGE_DRIVER_FW_VERSION_MAJOR) {
  3514. vxge_debug_init(VXGE_ERR,
  3515. "%s: Incorrect firmware version."
  3516. "Please upgrade the firmware to version 1.x.x",
  3517. VXGE_DRIVER_NAME);
  3518. ret = -EINVAL;
  3519. goto _exit3;
  3520. }
  3521. vpath_mask = ll_config.device_hw_info.vpath_mask;
  3522. if (vpath_mask == 0) {
  3523. vxge_debug_ll_config(VXGE_TRACE,
  3524. "%s: No vpaths available in device", VXGE_DRIVER_NAME);
  3525. ret = -EINVAL;
  3526. goto _exit3;
  3527. }
  3528. vxge_debug_ll_config(VXGE_TRACE,
  3529. "%s:%d Vpath mask = %llx", __func__, __LINE__,
  3530. (unsigned long long)vpath_mask);
  3531. function_mode = ll_config.device_hw_info.function_mode;
  3532. host_type = ll_config.device_hw_info.host_type;
  3533. is_privileged = __vxge_hw_device_is_privilaged(host_type,
  3534. ll_config.device_hw_info.func_id);
  3535. /* Check how many vpaths are available */
  3536. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3537. if (!((vpath_mask) & vxge_mBIT(i)))
  3538. continue;
  3539. max_vpath_supported++;
  3540. }
  3541. if (new_device)
  3542. num_vfs = vxge_get_num_vfs(function_mode) - 1;
  3543. /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
  3544. if (is_sriov(function_mode) && (max_config_dev > 1) &&
  3545. (ll_config.intr_type != INTA) &&
  3546. (is_privileged == VXGE_HW_OK)) {
  3547. ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
  3548. ? (max_config_dev - 1) : num_vfs);
  3549. if (ret)
  3550. vxge_debug_ll_config(VXGE_ERR,
  3551. "Failed in enabling SRIOV mode: %d\n", ret);
  3552. }
  3553. /*
  3554. * Configure vpaths and get driver configured number of vpaths
  3555. * which is less than or equal to the maximum vpaths per function.
  3556. */
  3557. no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
  3558. if (!no_of_vpath) {
  3559. vxge_debug_ll_config(VXGE_ERR,
  3560. "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
  3561. ret = 0;
  3562. goto _exit3;
  3563. }
  3564. /* Setting driver callbacks */
  3565. attr.uld_callbacks.link_up = vxge_callback_link_up;
  3566. attr.uld_callbacks.link_down = vxge_callback_link_down;
  3567. attr.uld_callbacks.crit_err = vxge_callback_crit_err;
  3568. status = vxge_hw_device_initialize(&hldev, &attr, device_config);
  3569. if (status != VXGE_HW_OK) {
  3570. vxge_debug_init(VXGE_ERR,
  3571. "Failed to initialize device (%d)", status);
  3572. ret = -EINVAL;
  3573. goto _exit3;
  3574. }
  3575. /* if FCS stripping is not disabled in MAC fail driver load */
  3576. if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
  3577. vxge_debug_init(VXGE_ERR,
  3578. "%s: FCS stripping is not disabled in MAC"
  3579. " failing driver load", VXGE_DRIVER_NAME);
  3580. ret = -EINVAL;
  3581. goto _exit4;
  3582. }
  3583. vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
  3584. /* set private device info */
  3585. pci_set_drvdata(pdev, hldev);
  3586. ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
  3587. ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
  3588. ll_config.addr_learn_en = addr_learn_en;
  3589. ll_config.rth_algorithm = RTH_ALG_JENKINS;
  3590. ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
  3591. ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
  3592. ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
  3593. ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
  3594. ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
  3595. ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
  3596. ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
  3597. ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
  3598. ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
  3599. if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
  3600. &vdev)) {
  3601. ret = -EINVAL;
  3602. goto _exit4;
  3603. }
  3604. vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
  3605. VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
  3606. vxge_hw_device_trace_level_get(hldev));
  3607. /* set private HW device info */
  3608. hldev->ndev = vdev->ndev;
  3609. vdev->mtu = VXGE_HW_DEFAULT_MTU;
  3610. vdev->bar0 = attr.bar0;
  3611. vdev->max_vpath_supported = max_vpath_supported;
  3612. vdev->no_of_vpath = no_of_vpath;
  3613. /* Virtual Path count */
  3614. for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  3615. if (!vxge_bVALn(vpath_mask, i, 1))
  3616. continue;
  3617. if (j >= vdev->no_of_vpath)
  3618. break;
  3619. vdev->vpaths[j].is_configured = 1;
  3620. vdev->vpaths[j].device_id = i;
  3621. vdev->vpaths[j].fifo.driver_id = j;
  3622. vdev->vpaths[j].ring.driver_id = j;
  3623. vdev->vpaths[j].vdev = vdev;
  3624. vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
  3625. memcpy((u8 *)vdev->vpaths[j].macaddr,
  3626. (u8 *)ll_config.device_hw_info.mac_addrs[i],
  3627. ETH_ALEN);
  3628. /* Initialize the mac address list header */
  3629. INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
  3630. vdev->vpaths[j].mac_addr_cnt = 0;
  3631. vdev->vpaths[j].mcast_addr_cnt = 0;
  3632. j++;
  3633. }
  3634. vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
  3635. vdev->max_config_port = max_config_port;
  3636. vdev->vlan_tag_strip = vlan_tag_strip;
  3637. /* map the hashing selector table to the configured vpaths */
  3638. for (i = 0; i < vdev->no_of_vpath; i++)
  3639. vdev->vpath_selector[i] = vpath_selector[i];
  3640. macaddr = (u8 *)vdev->vpaths[0].macaddr;
  3641. ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
  3642. ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
  3643. ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
  3644. vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
  3645. vdev->ndev->name, ll_config.device_hw_info.serial_number);
  3646. vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
  3647. vdev->ndev->name, ll_config.device_hw_info.part_number);
  3648. vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
  3649. vdev->ndev->name, ll_config.device_hw_info.product_desc);
  3650. vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
  3651. vdev->ndev->name, macaddr);
  3652. vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
  3653. vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
  3654. vxge_debug_init(VXGE_TRACE,
  3655. "%s: Firmware version : %s Date : %s", vdev->ndev->name,
  3656. ll_config.device_hw_info.fw_version.version,
  3657. ll_config.device_hw_info.fw_date.date);
  3658. if (new_device) {
  3659. switch (ll_config.device_hw_info.function_mode) {
  3660. case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
  3661. vxge_debug_init(VXGE_TRACE,
  3662. "%s: Single Function Mode Enabled", vdev->ndev->name);
  3663. break;
  3664. case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
  3665. vxge_debug_init(VXGE_TRACE,
  3666. "%s: Multi Function Mode Enabled", vdev->ndev->name);
  3667. break;
  3668. case VXGE_HW_FUNCTION_MODE_SRIOV:
  3669. vxge_debug_init(VXGE_TRACE,
  3670. "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
  3671. break;
  3672. case VXGE_HW_FUNCTION_MODE_MRIOV:
  3673. vxge_debug_init(VXGE_TRACE,
  3674. "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
  3675. break;
  3676. }
  3677. }
  3678. vxge_print_parm(vdev, vpath_mask);
  3679. /* Store the fw version for ethttool option */
  3680. strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
  3681. memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
  3682. memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
  3683. /* Copy the station mac address to the list */
  3684. for (i = 0; i < vdev->no_of_vpath; i++) {
  3685. entry = (struct vxge_mac_addrs *)
  3686. kzalloc(sizeof(struct vxge_mac_addrs),
  3687. GFP_KERNEL);
  3688. if (NULL == entry) {
  3689. vxge_debug_init(VXGE_ERR,
  3690. "%s: mac_addr_list : memory allocation failed",
  3691. vdev->ndev->name);
  3692. ret = -EPERM;
  3693. goto _exit5;
  3694. }
  3695. macaddr = (u8 *)&entry->macaddr;
  3696. memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
  3697. list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
  3698. vdev->vpaths[i].mac_addr_cnt = 1;
  3699. }
  3700. kfree(device_config);
  3701. /*
  3702. * INTA is shared in multi-function mode. This is unlike the INTA
  3703. * implementation in MR mode, where each VH has its own INTA message.
  3704. * - INTA is masked (disabled) as long as at least one function sets
  3705. * its TITAN_MASK_ALL_INT.ALARM bit.
  3706. * - INTA is unmasked (enabled) when all enabled functions have cleared
  3707. * their own TITAN_MASK_ALL_INT.ALARM bit.
  3708. * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
  3709. * Though this driver leaves the top level interrupts unmasked while
  3710. * leaving the required module interrupt bits masked on exit, there
  3711. * could be a rougue driver around that does not follow this procedure
  3712. * resulting in a failure to generate interrupts. The following code is
  3713. * present to prevent such a failure.
  3714. */
  3715. if (ll_config.device_hw_info.function_mode ==
  3716. VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
  3717. if (vdev->config.intr_type == INTA)
  3718. vxge_hw_device_unmask_all(hldev);
  3719. vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
  3720. vdev->ndev->name, __func__, __LINE__);
  3721. vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
  3722. VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
  3723. vxge_hw_device_trace_level_get(hldev));
  3724. return 0;
  3725. _exit5:
  3726. for (i = 0; i < vdev->no_of_vpath; i++)
  3727. vxge_free_mac_add_list(&vdev->vpaths[i]);
  3728. vxge_device_unregister(hldev);
  3729. _exit4:
  3730. pci_disable_sriov(pdev);
  3731. vxge_hw_device_terminate(hldev);
  3732. _exit3:
  3733. iounmap(attr.bar0);
  3734. _exit2:
  3735. pci_release_regions(pdev);
  3736. _exit1:
  3737. pci_disable_device(pdev);
  3738. _exit0:
  3739. kfree(device_config);
  3740. driver_config->config_dev_cnt--;
  3741. pci_set_drvdata(pdev, NULL);
  3742. return ret;
  3743. }
  3744. /**
  3745. * vxge_rem_nic - Free the PCI device
  3746. * @pdev: structure containing the PCI related information of the device.
  3747. * Description: This function is called by the Pci subsystem to release a
  3748. * PCI device and free up all resource held up by the device.
  3749. */
  3750. static void __devexit
  3751. vxge_remove(struct pci_dev *pdev)
  3752. {
  3753. struct __vxge_hw_device *hldev;
  3754. struct vxgedev *vdev = NULL;
  3755. struct net_device *dev;
  3756. int i = 0;
  3757. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  3758. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  3759. u32 level_trace;
  3760. #endif
  3761. hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  3762. if (hldev == NULL)
  3763. return;
  3764. dev = hldev->ndev;
  3765. vdev = netdev_priv(dev);
  3766. #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
  3767. (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
  3768. level_trace = vdev->level_trace;
  3769. #endif
  3770. vxge_debug_entryexit(level_trace,
  3771. "%s:%d", __func__, __LINE__);
  3772. vxge_debug_init(level_trace,
  3773. "%s : removing PCI device...", __func__);
  3774. vxge_device_unregister(hldev);
  3775. for (i = 0; i < vdev->no_of_vpath; i++) {
  3776. vxge_free_mac_add_list(&vdev->vpaths[i]);
  3777. vdev->vpaths[i].mcast_addr_cnt = 0;
  3778. vdev->vpaths[i].mac_addr_cnt = 0;
  3779. }
  3780. kfree(vdev->vpaths);
  3781. iounmap(vdev->bar0);
  3782. pci_disable_sriov(pdev);
  3783. /* we are safe to free it now */
  3784. free_netdev(dev);
  3785. vxge_debug_init(level_trace,
  3786. "%s:%d Device unregistered", __func__, __LINE__);
  3787. vxge_hw_device_terminate(hldev);
  3788. pci_disable_device(pdev);
  3789. pci_release_regions(pdev);
  3790. pci_set_drvdata(pdev, NULL);
  3791. vxge_debug_entryexit(level_trace,
  3792. "%s:%d Exiting...", __func__, __LINE__);
  3793. }
  3794. static struct pci_error_handlers vxge_err_handler = {
  3795. .error_detected = vxge_io_error_detected,
  3796. .slot_reset = vxge_io_slot_reset,
  3797. .resume = vxge_io_resume,
  3798. };
  3799. static struct pci_driver vxge_driver = {
  3800. .name = VXGE_DRIVER_NAME,
  3801. .id_table = vxge_id_table,
  3802. .probe = vxge_probe,
  3803. .remove = __devexit_p(vxge_remove),
  3804. #ifdef CONFIG_PM
  3805. .suspend = vxge_pm_suspend,
  3806. .resume = vxge_pm_resume,
  3807. #endif
  3808. .err_handler = &vxge_err_handler,
  3809. };
  3810. static int __init
  3811. vxge_starter(void)
  3812. {
  3813. int ret = 0;
  3814. char version[32];
  3815. snprintf(version, 32, "%s", DRV_VERSION);
  3816. printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n",
  3817. VXGE_DRIVER_NAME);
  3818. printk(KERN_CRIT "%s: Driver version: %s\n",
  3819. VXGE_DRIVER_NAME, version);
  3820. verify_bandwidth();
  3821. driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
  3822. if (!driver_config)
  3823. return -ENOMEM;
  3824. ret = pci_register_driver(&vxge_driver);
  3825. if (driver_config->config_dev_cnt &&
  3826. (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
  3827. vxge_debug_init(VXGE_ERR,
  3828. "%s: Configured %d of %d devices",
  3829. VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
  3830. driver_config->total_dev_cnt);
  3831. if (ret)
  3832. kfree(driver_config);
  3833. return ret;
  3834. }
  3835. static void __exit
  3836. vxge_closer(void)
  3837. {
  3838. pci_unregister_driver(&vxge_driver);
  3839. kfree(driver_config);
  3840. }
  3841. module_init(vxge_starter);
  3842. module_exit(vxge_closer);