myri10ge.c 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140
  1. /*************************************************************************
  2. * myri10ge.c: Myricom Myri-10G Ethernet driver.
  3. *
  4. * Copyright (C) 2005 - 2009 Myricom, Inc.
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Myricom, Inc. nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  20. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  23. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  24. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  25. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  26. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  27. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  28. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  29. * POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. *
  32. * If the eeprom on your board is not recent enough, you will need to get a
  33. * newer firmware image at:
  34. * http://www.myri.com/scs/download-Myri10GE.html
  35. *
  36. * Contact Information:
  37. * <help@myri.com>
  38. * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
  39. *************************************************************************/
  40. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  41. #include <linux/tcp.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/skbuff.h>
  44. #include <linux/string.h>
  45. #include <linux/module.h>
  46. #include <linux/pci.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/etherdevice.h>
  49. #include <linux/if_ether.h>
  50. #include <linux/if_vlan.h>
  51. #include <linux/inet_lro.h>
  52. #include <linux/dca.h>
  53. #include <linux/ip.h>
  54. #include <linux/inet.h>
  55. #include <linux/in.h>
  56. #include <linux/ethtool.h>
  57. #include <linux/firmware.h>
  58. #include <linux/delay.h>
  59. #include <linux/timer.h>
  60. #include <linux/vmalloc.h>
  61. #include <linux/crc32.h>
  62. #include <linux/moduleparam.h>
  63. #include <linux/io.h>
  64. #include <linux/log2.h>
  65. #include <linux/slab.h>
  66. #include <linux/prefetch.h>
  67. #include <net/checksum.h>
  68. #include <net/ip.h>
  69. #include <net/tcp.h>
  70. #include <asm/byteorder.h>
  71. #include <asm/io.h>
  72. #include <asm/processor.h>
  73. #ifdef CONFIG_MTRR
  74. #include <asm/mtrr.h>
  75. #endif
  76. #include "myri10ge_mcp.h"
  77. #include "myri10ge_mcp_gen_header.h"
  78. #define MYRI10GE_VERSION_STR "1.5.2-1.459"
  79. MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
  80. MODULE_AUTHOR("Maintainer: help@myri.com");
  81. MODULE_VERSION(MYRI10GE_VERSION_STR);
  82. MODULE_LICENSE("Dual BSD/GPL");
  83. #define MYRI10GE_MAX_ETHER_MTU 9014
  84. #define MYRI10GE_ETH_STOPPED 0
  85. #define MYRI10GE_ETH_STOPPING 1
  86. #define MYRI10GE_ETH_STARTING 2
  87. #define MYRI10GE_ETH_RUNNING 3
  88. #define MYRI10GE_ETH_OPEN_FAILED 4
  89. #define MYRI10GE_EEPROM_STRINGS_SIZE 256
  90. #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
  91. #define MYRI10GE_MAX_LRO_DESCRIPTORS 8
  92. #define MYRI10GE_LRO_MAX_PKTS 64
  93. #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
  94. #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
  95. #define MYRI10GE_ALLOC_ORDER 0
  96. #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
  97. #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
  98. #define MYRI10GE_MAX_SLICES 32
  99. struct myri10ge_rx_buffer_state {
  100. struct page *page;
  101. int page_offset;
  102. DEFINE_DMA_UNMAP_ADDR(bus);
  103. DEFINE_DMA_UNMAP_LEN(len);
  104. };
  105. struct myri10ge_tx_buffer_state {
  106. struct sk_buff *skb;
  107. int last;
  108. DEFINE_DMA_UNMAP_ADDR(bus);
  109. DEFINE_DMA_UNMAP_LEN(len);
  110. };
  111. struct myri10ge_cmd {
  112. u32 data0;
  113. u32 data1;
  114. u32 data2;
  115. };
  116. struct myri10ge_rx_buf {
  117. struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
  118. struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
  119. struct myri10ge_rx_buffer_state *info;
  120. struct page *page;
  121. dma_addr_t bus;
  122. int page_offset;
  123. int cnt;
  124. int fill_cnt;
  125. int alloc_fail;
  126. int mask; /* number of rx slots -1 */
  127. int watchdog_needed;
  128. };
  129. struct myri10ge_tx_buf {
  130. struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
  131. __be32 __iomem *send_go; /* "go" doorbell ptr */
  132. __be32 __iomem *send_stop; /* "stop" doorbell ptr */
  133. struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
  134. char *req_bytes;
  135. struct myri10ge_tx_buffer_state *info;
  136. int mask; /* number of transmit slots -1 */
  137. int req ____cacheline_aligned; /* transmit slots submitted */
  138. int pkt_start; /* packets started */
  139. int stop_queue;
  140. int linearized;
  141. int done ____cacheline_aligned; /* transmit slots completed */
  142. int pkt_done; /* packets completed */
  143. int wake_queue;
  144. int queue_active;
  145. };
  146. struct myri10ge_rx_done {
  147. struct mcp_slot *entry;
  148. dma_addr_t bus;
  149. int cnt;
  150. int idx;
  151. struct net_lro_mgr lro_mgr;
  152. struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
  153. };
  154. struct myri10ge_slice_netstats {
  155. unsigned long rx_packets;
  156. unsigned long tx_packets;
  157. unsigned long rx_bytes;
  158. unsigned long tx_bytes;
  159. unsigned long rx_dropped;
  160. unsigned long tx_dropped;
  161. };
  162. struct myri10ge_slice_state {
  163. struct myri10ge_tx_buf tx; /* transmit ring */
  164. struct myri10ge_rx_buf rx_small;
  165. struct myri10ge_rx_buf rx_big;
  166. struct myri10ge_rx_done rx_done;
  167. struct net_device *dev;
  168. struct napi_struct napi;
  169. struct myri10ge_priv *mgp;
  170. struct myri10ge_slice_netstats stats;
  171. __be32 __iomem *irq_claim;
  172. struct mcp_irq_data *fw_stats;
  173. dma_addr_t fw_stats_bus;
  174. int watchdog_tx_done;
  175. int watchdog_tx_req;
  176. int watchdog_rx_done;
  177. #ifdef CONFIG_MYRI10GE_DCA
  178. int cached_dca_tag;
  179. int cpu;
  180. __be32 __iomem *dca_tag;
  181. #endif
  182. char irq_desc[32];
  183. };
  184. struct myri10ge_priv {
  185. struct myri10ge_slice_state *ss;
  186. int tx_boundary; /* boundary transmits cannot cross */
  187. int num_slices;
  188. int running; /* running? */
  189. int small_bytes;
  190. int big_bytes;
  191. int max_intr_slots;
  192. struct net_device *dev;
  193. spinlock_t stats_lock;
  194. u8 __iomem *sram;
  195. int sram_size;
  196. unsigned long board_span;
  197. unsigned long iomem_base;
  198. __be32 __iomem *irq_deassert;
  199. char *mac_addr_string;
  200. struct mcp_cmd_response *cmd;
  201. dma_addr_t cmd_bus;
  202. struct pci_dev *pdev;
  203. int msi_enabled;
  204. int msix_enabled;
  205. struct msix_entry *msix_vectors;
  206. #ifdef CONFIG_MYRI10GE_DCA
  207. int dca_enabled;
  208. int relaxed_order;
  209. #endif
  210. u32 link_state;
  211. unsigned int rdma_tags_available;
  212. int intr_coal_delay;
  213. __be32 __iomem *intr_coal_delay_ptr;
  214. int mtrr;
  215. int wc_enabled;
  216. int down_cnt;
  217. wait_queue_head_t down_wq;
  218. struct work_struct watchdog_work;
  219. struct timer_list watchdog_timer;
  220. int watchdog_resets;
  221. int watchdog_pause;
  222. int pause;
  223. bool fw_name_allocated;
  224. char *fw_name;
  225. char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
  226. char *product_code_string;
  227. char fw_version[128];
  228. int fw_ver_major;
  229. int fw_ver_minor;
  230. int fw_ver_tiny;
  231. int adopted_rx_filter_bug;
  232. u8 mac_addr[6]; /* eeprom mac address */
  233. unsigned long serial_number;
  234. int vendor_specific_offset;
  235. int fw_multicast_support;
  236. u32 features;
  237. u32 max_tso6;
  238. u32 read_dma;
  239. u32 write_dma;
  240. u32 read_write_dma;
  241. u32 link_changes;
  242. u32 msg_enable;
  243. unsigned int board_number;
  244. int rebooted;
  245. };
  246. static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
  247. static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
  248. static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
  249. static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
  250. MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
  251. MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
  252. MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
  253. MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
  254. /* Careful: must be accessed under kparam_block_sysfs_write */
  255. static char *myri10ge_fw_name = NULL;
  256. module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
  257. MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
  258. #define MYRI10GE_MAX_BOARDS 8
  259. static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
  260. {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
  261. module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
  262. 0444);
  263. MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
  264. static int myri10ge_ecrc_enable = 1;
  265. module_param(myri10ge_ecrc_enable, int, S_IRUGO);
  266. MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
  267. static int myri10ge_small_bytes = -1; /* -1 == auto */
  268. module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
  269. MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
  270. static int myri10ge_msi = 1; /* enable msi by default */
  271. module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
  272. MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
  273. static int myri10ge_intr_coal_delay = 75;
  274. module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
  275. MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
  276. static int myri10ge_flow_control = 1;
  277. module_param(myri10ge_flow_control, int, S_IRUGO);
  278. MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
  279. static int myri10ge_deassert_wait = 1;
  280. module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
  281. MODULE_PARM_DESC(myri10ge_deassert_wait,
  282. "Wait when deasserting legacy interrupts");
  283. static int myri10ge_force_firmware = 0;
  284. module_param(myri10ge_force_firmware, int, S_IRUGO);
  285. MODULE_PARM_DESC(myri10ge_force_firmware,
  286. "Force firmware to assume aligned completions");
  287. static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
  288. module_param(myri10ge_initial_mtu, int, S_IRUGO);
  289. MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
  290. static int myri10ge_napi_weight = 64;
  291. module_param(myri10ge_napi_weight, int, S_IRUGO);
  292. MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
  293. static int myri10ge_watchdog_timeout = 1;
  294. module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
  295. MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
  296. static int myri10ge_max_irq_loops = 1048576;
  297. module_param(myri10ge_max_irq_loops, int, S_IRUGO);
  298. MODULE_PARM_DESC(myri10ge_max_irq_loops,
  299. "Set stuck legacy IRQ detection threshold");
  300. #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
  301. static int myri10ge_debug = -1; /* defaults above */
  302. module_param(myri10ge_debug, int, 0);
  303. MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
  304. static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
  305. module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
  306. MODULE_PARM_DESC(myri10ge_lro_max_pkts,
  307. "Number of LRO packets to be aggregated");
  308. static int myri10ge_fill_thresh = 256;
  309. module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
  310. MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
  311. static int myri10ge_reset_recover = 1;
  312. static int myri10ge_max_slices = 1;
  313. module_param(myri10ge_max_slices, int, S_IRUGO);
  314. MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
  315. static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
  316. module_param(myri10ge_rss_hash, int, S_IRUGO);
  317. MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
  318. static int myri10ge_dca = 1;
  319. module_param(myri10ge_dca, int, S_IRUGO);
  320. MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
  321. #define MYRI10GE_FW_OFFSET 1024*1024
  322. #define MYRI10GE_HIGHPART_TO_U32(X) \
  323. (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
  324. #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
  325. #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
  326. static void myri10ge_set_multicast_list(struct net_device *dev);
  327. static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
  328. struct net_device *dev);
  329. static inline void put_be32(__be32 val, __be32 __iomem * p)
  330. {
  331. __raw_writel((__force __u32) val, (__force void __iomem *)p);
  332. }
  333. static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
  334. struct rtnl_link_stats64 *stats);
  335. static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
  336. {
  337. if (mgp->fw_name_allocated)
  338. kfree(mgp->fw_name);
  339. mgp->fw_name = name;
  340. mgp->fw_name_allocated = allocated;
  341. }
  342. static int
  343. myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
  344. struct myri10ge_cmd *data, int atomic)
  345. {
  346. struct mcp_cmd *buf;
  347. char buf_bytes[sizeof(*buf) + 8];
  348. struct mcp_cmd_response *response = mgp->cmd;
  349. char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
  350. u32 dma_low, dma_high, result, value;
  351. int sleep_total = 0;
  352. /* ensure buf is aligned to 8 bytes */
  353. buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
  354. buf->data0 = htonl(data->data0);
  355. buf->data1 = htonl(data->data1);
  356. buf->data2 = htonl(data->data2);
  357. buf->cmd = htonl(cmd);
  358. dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
  359. dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
  360. buf->response_addr.low = htonl(dma_low);
  361. buf->response_addr.high = htonl(dma_high);
  362. response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
  363. mb();
  364. myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
  365. /* wait up to 15ms. Longest command is the DMA benchmark,
  366. * which is capped at 5ms, but runs from a timeout handler
  367. * that runs every 7.8ms. So a 15ms timeout leaves us with
  368. * a 2.2ms margin
  369. */
  370. if (atomic) {
  371. /* if atomic is set, do not sleep,
  372. * and try to get the completion quickly
  373. * (1ms will be enough for those commands) */
  374. for (sleep_total = 0;
  375. sleep_total < 1000 &&
  376. response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
  377. sleep_total += 10) {
  378. udelay(10);
  379. mb();
  380. }
  381. } else {
  382. /* use msleep for most command */
  383. for (sleep_total = 0;
  384. sleep_total < 15 &&
  385. response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
  386. sleep_total++)
  387. msleep(1);
  388. }
  389. result = ntohl(response->result);
  390. value = ntohl(response->data);
  391. if (result != MYRI10GE_NO_RESPONSE_RESULT) {
  392. if (result == 0) {
  393. data->data0 = value;
  394. return 0;
  395. } else if (result == MXGEFW_CMD_UNKNOWN) {
  396. return -ENOSYS;
  397. } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
  398. return -E2BIG;
  399. } else if (result == MXGEFW_CMD_ERROR_RANGE &&
  400. cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
  401. (data->
  402. data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
  403. 0) {
  404. return -ERANGE;
  405. } else {
  406. dev_err(&mgp->pdev->dev,
  407. "command %d failed, result = %d\n",
  408. cmd, result);
  409. return -ENXIO;
  410. }
  411. }
  412. dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
  413. cmd, result);
  414. return -EAGAIN;
  415. }
  416. /*
  417. * The eeprom strings on the lanaiX have the format
  418. * SN=x\0
  419. * MAC=x:x:x:x:x:x\0
  420. * PT:ddd mmm xx xx:xx:xx xx\0
  421. * PV:ddd mmm xx xx:xx:xx xx\0
  422. */
  423. static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
  424. {
  425. char *ptr, *limit;
  426. int i;
  427. ptr = mgp->eeprom_strings;
  428. limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
  429. while (*ptr != '\0' && ptr < limit) {
  430. if (memcmp(ptr, "MAC=", 4) == 0) {
  431. ptr += 4;
  432. mgp->mac_addr_string = ptr;
  433. for (i = 0; i < 6; i++) {
  434. if ((ptr + 2) > limit)
  435. goto abort;
  436. mgp->mac_addr[i] =
  437. simple_strtoul(ptr, &ptr, 16);
  438. ptr += 1;
  439. }
  440. }
  441. if (memcmp(ptr, "PC=", 3) == 0) {
  442. ptr += 3;
  443. mgp->product_code_string = ptr;
  444. }
  445. if (memcmp((const void *)ptr, "SN=", 3) == 0) {
  446. ptr += 3;
  447. mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
  448. }
  449. while (ptr < limit && *ptr++) ;
  450. }
  451. return 0;
  452. abort:
  453. dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
  454. return -ENXIO;
  455. }
  456. /*
  457. * Enable or disable periodic RDMAs from the host to make certain
  458. * chipsets resend dropped PCIe messages
  459. */
  460. static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
  461. {
  462. char __iomem *submit;
  463. __be32 buf[16] __attribute__ ((__aligned__(8)));
  464. u32 dma_low, dma_high;
  465. int i;
  466. /* clear confirmation addr */
  467. mgp->cmd->data = 0;
  468. mb();
  469. /* send a rdma command to the PCIe engine, and wait for the
  470. * response in the confirmation address. The firmware should
  471. * write a -1 there to indicate it is alive and well
  472. */
  473. dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
  474. dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
  475. buf[0] = htonl(dma_high); /* confirm addr MSW */
  476. buf[1] = htonl(dma_low); /* confirm addr LSW */
  477. buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
  478. buf[3] = htonl(dma_high); /* dummy addr MSW */
  479. buf[4] = htonl(dma_low); /* dummy addr LSW */
  480. buf[5] = htonl(enable); /* enable? */
  481. submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
  482. myri10ge_pio_copy(submit, &buf, sizeof(buf));
  483. for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
  484. msleep(1);
  485. if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
  486. dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
  487. (enable ? "enable" : "disable"));
  488. }
  489. static int
  490. myri10ge_validate_firmware(struct myri10ge_priv *mgp,
  491. struct mcp_gen_header *hdr)
  492. {
  493. struct device *dev = &mgp->pdev->dev;
  494. /* check firmware type */
  495. if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
  496. dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
  497. return -EINVAL;
  498. }
  499. /* save firmware version for ethtool */
  500. strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
  501. sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
  502. &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
  503. if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
  504. mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
  505. dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
  506. dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
  507. MXGEFW_VERSION_MINOR);
  508. return -EINVAL;
  509. }
  510. return 0;
  511. }
  512. static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
  513. {
  514. unsigned crc, reread_crc;
  515. const struct firmware *fw;
  516. struct device *dev = &mgp->pdev->dev;
  517. unsigned char *fw_readback;
  518. struct mcp_gen_header *hdr;
  519. size_t hdr_offset;
  520. int status;
  521. unsigned i;
  522. if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
  523. dev_err(dev, "Unable to load %s firmware image via hotplug\n",
  524. mgp->fw_name);
  525. status = -EINVAL;
  526. goto abort_with_nothing;
  527. }
  528. /* check size */
  529. if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
  530. fw->size < MCP_HEADER_PTR_OFFSET + 4) {
  531. dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
  532. status = -EINVAL;
  533. goto abort_with_fw;
  534. }
  535. /* check id */
  536. hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
  537. if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
  538. dev_err(dev, "Bad firmware file\n");
  539. status = -EINVAL;
  540. goto abort_with_fw;
  541. }
  542. hdr = (void *)(fw->data + hdr_offset);
  543. status = myri10ge_validate_firmware(mgp, hdr);
  544. if (status != 0)
  545. goto abort_with_fw;
  546. crc = crc32(~0, fw->data, fw->size);
  547. for (i = 0; i < fw->size; i += 256) {
  548. myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
  549. fw->data + i,
  550. min(256U, (unsigned)(fw->size - i)));
  551. mb();
  552. readb(mgp->sram);
  553. }
  554. fw_readback = vmalloc(fw->size);
  555. if (!fw_readback) {
  556. status = -ENOMEM;
  557. goto abort_with_fw;
  558. }
  559. /* corruption checking is good for parity recovery and buggy chipset */
  560. memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
  561. reread_crc = crc32(~0, fw_readback, fw->size);
  562. vfree(fw_readback);
  563. if (crc != reread_crc) {
  564. dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
  565. (unsigned)fw->size, reread_crc, crc);
  566. status = -EIO;
  567. goto abort_with_fw;
  568. }
  569. *size = (u32) fw->size;
  570. abort_with_fw:
  571. release_firmware(fw);
  572. abort_with_nothing:
  573. return status;
  574. }
  575. static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
  576. {
  577. struct mcp_gen_header *hdr;
  578. struct device *dev = &mgp->pdev->dev;
  579. const size_t bytes = sizeof(struct mcp_gen_header);
  580. size_t hdr_offset;
  581. int status;
  582. /* find running firmware header */
  583. hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
  584. if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
  585. dev_err(dev, "Running firmware has bad header offset (%d)\n",
  586. (int)hdr_offset);
  587. return -EIO;
  588. }
  589. /* copy header of running firmware from SRAM to host memory to
  590. * validate firmware */
  591. hdr = kmalloc(bytes, GFP_KERNEL);
  592. if (hdr == NULL) {
  593. dev_err(dev, "could not malloc firmware hdr\n");
  594. return -ENOMEM;
  595. }
  596. memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
  597. status = myri10ge_validate_firmware(mgp, hdr);
  598. kfree(hdr);
  599. /* check to see if adopted firmware has bug where adopting
  600. * it will cause broadcasts to be filtered unless the NIC
  601. * is kept in ALLMULTI mode */
  602. if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
  603. mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
  604. mgp->adopted_rx_filter_bug = 1;
  605. dev_warn(dev, "Adopting fw %d.%d.%d: "
  606. "working around rx filter bug\n",
  607. mgp->fw_ver_major, mgp->fw_ver_minor,
  608. mgp->fw_ver_tiny);
  609. }
  610. return status;
  611. }
  612. static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
  613. {
  614. struct myri10ge_cmd cmd;
  615. int status;
  616. /* probe for IPv6 TSO support */
  617. mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
  618. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
  619. &cmd, 0);
  620. if (status == 0) {
  621. mgp->max_tso6 = cmd.data0;
  622. mgp->features |= NETIF_F_TSO6;
  623. }
  624. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
  625. if (status != 0) {
  626. dev_err(&mgp->pdev->dev,
  627. "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
  628. return -ENXIO;
  629. }
  630. mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
  631. return 0;
  632. }
  633. static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
  634. {
  635. char __iomem *submit;
  636. __be32 buf[16] __attribute__ ((__aligned__(8)));
  637. u32 dma_low, dma_high, size;
  638. int status, i;
  639. size = 0;
  640. status = myri10ge_load_hotplug_firmware(mgp, &size);
  641. if (status) {
  642. if (!adopt)
  643. return status;
  644. dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
  645. /* Do not attempt to adopt firmware if there
  646. * was a bad crc */
  647. if (status == -EIO)
  648. return status;
  649. status = myri10ge_adopt_running_firmware(mgp);
  650. if (status != 0) {
  651. dev_err(&mgp->pdev->dev,
  652. "failed to adopt running firmware\n");
  653. return status;
  654. }
  655. dev_info(&mgp->pdev->dev,
  656. "Successfully adopted running firmware\n");
  657. if (mgp->tx_boundary == 4096) {
  658. dev_warn(&mgp->pdev->dev,
  659. "Using firmware currently running on NIC"
  660. ". For optimal\n");
  661. dev_warn(&mgp->pdev->dev,
  662. "performance consider loading optimized "
  663. "firmware\n");
  664. dev_warn(&mgp->pdev->dev, "via hotplug\n");
  665. }
  666. set_fw_name(mgp, "adopted", false);
  667. mgp->tx_boundary = 2048;
  668. myri10ge_dummy_rdma(mgp, 1);
  669. status = myri10ge_get_firmware_capabilities(mgp);
  670. return status;
  671. }
  672. /* clear confirmation addr */
  673. mgp->cmd->data = 0;
  674. mb();
  675. /* send a reload command to the bootstrap MCP, and wait for the
  676. * response in the confirmation address. The firmware should
  677. * write a -1 there to indicate it is alive and well
  678. */
  679. dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
  680. dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
  681. buf[0] = htonl(dma_high); /* confirm addr MSW */
  682. buf[1] = htonl(dma_low); /* confirm addr LSW */
  683. buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
  684. /* FIX: All newest firmware should un-protect the bottom of
  685. * the sram before handoff. However, the very first interfaces
  686. * do not. Therefore the handoff copy must skip the first 8 bytes
  687. */
  688. buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
  689. buf[4] = htonl(size - 8); /* length of code */
  690. buf[5] = htonl(8); /* where to copy to */
  691. buf[6] = htonl(0); /* where to jump to */
  692. submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
  693. myri10ge_pio_copy(submit, &buf, sizeof(buf));
  694. mb();
  695. msleep(1);
  696. mb();
  697. i = 0;
  698. while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
  699. msleep(1 << i);
  700. i++;
  701. }
  702. if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
  703. dev_err(&mgp->pdev->dev, "handoff failed\n");
  704. return -ENXIO;
  705. }
  706. myri10ge_dummy_rdma(mgp, 1);
  707. status = myri10ge_get_firmware_capabilities(mgp);
  708. return status;
  709. }
  710. static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
  711. {
  712. struct myri10ge_cmd cmd;
  713. int status;
  714. cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
  715. | (addr[2] << 8) | addr[3]);
  716. cmd.data1 = ((addr[4] << 8) | (addr[5]));
  717. status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
  718. return status;
  719. }
  720. static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
  721. {
  722. struct myri10ge_cmd cmd;
  723. int status, ctl;
  724. ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
  725. status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
  726. if (status) {
  727. netdev_err(mgp->dev, "Failed to set flow control mode\n");
  728. return status;
  729. }
  730. mgp->pause = pause;
  731. return 0;
  732. }
  733. static void
  734. myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
  735. {
  736. struct myri10ge_cmd cmd;
  737. int status, ctl;
  738. ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
  739. status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
  740. if (status)
  741. netdev_err(mgp->dev, "Failed to set promisc mode\n");
  742. }
  743. static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
  744. {
  745. struct myri10ge_cmd cmd;
  746. int status;
  747. u32 len;
  748. struct page *dmatest_page;
  749. dma_addr_t dmatest_bus;
  750. char *test = " ";
  751. dmatest_page = alloc_page(GFP_KERNEL);
  752. if (!dmatest_page)
  753. return -ENOMEM;
  754. dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
  755. DMA_BIDIRECTIONAL);
  756. /* Run a small DMA test.
  757. * The magic multipliers to the length tell the firmware
  758. * to do DMA read, write, or read+write tests. The
  759. * results are returned in cmd.data0. The upper 16
  760. * bits or the return is the number of transfers completed.
  761. * The lower 16 bits is the time in 0.5us ticks that the
  762. * transfers took to complete.
  763. */
  764. len = mgp->tx_boundary;
  765. cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
  766. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
  767. cmd.data2 = len * 0x10000;
  768. status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
  769. if (status != 0) {
  770. test = "read";
  771. goto abort;
  772. }
  773. mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
  774. cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
  775. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
  776. cmd.data2 = len * 0x1;
  777. status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
  778. if (status != 0) {
  779. test = "write";
  780. goto abort;
  781. }
  782. mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
  783. cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
  784. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
  785. cmd.data2 = len * 0x10001;
  786. status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
  787. if (status != 0) {
  788. test = "read/write";
  789. goto abort;
  790. }
  791. mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
  792. (cmd.data0 & 0xffff);
  793. abort:
  794. pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
  795. put_page(dmatest_page);
  796. if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
  797. dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
  798. test, status);
  799. return status;
  800. }
  801. static int myri10ge_reset(struct myri10ge_priv *mgp)
  802. {
  803. struct myri10ge_cmd cmd;
  804. struct myri10ge_slice_state *ss;
  805. int i, status;
  806. size_t bytes;
  807. #ifdef CONFIG_MYRI10GE_DCA
  808. unsigned long dca_tag_off;
  809. #endif
  810. /* try to send a reset command to the card to see if it
  811. * is alive */
  812. memset(&cmd, 0, sizeof(cmd));
  813. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
  814. if (status != 0) {
  815. dev_err(&mgp->pdev->dev, "failed reset\n");
  816. return -ENXIO;
  817. }
  818. (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
  819. /*
  820. * Use non-ndis mcp_slot (eg, 4 bytes total,
  821. * no toeplitz hash value returned. Older firmware will
  822. * not understand this command, but will use the correct
  823. * sized mcp_slot, so we ignore error returns
  824. */
  825. cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
  826. (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
  827. /* Now exchange information about interrupts */
  828. bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
  829. cmd.data0 = (u32) bytes;
  830. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
  831. /*
  832. * Even though we already know how many slices are supported
  833. * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
  834. * has magic side effects, and must be called after a reset.
  835. * It must be called prior to calling any RSS related cmds,
  836. * including assigning an interrupt queue for anything but
  837. * slice 0. It must also be called *after*
  838. * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
  839. * the firmware to compute offsets.
  840. */
  841. if (mgp->num_slices > 1) {
  842. /* ask the maximum number of slices it supports */
  843. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
  844. &cmd, 0);
  845. if (status != 0) {
  846. dev_err(&mgp->pdev->dev,
  847. "failed to get number of slices\n");
  848. }
  849. /*
  850. * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
  851. * to setting up the interrupt queue DMA
  852. */
  853. cmd.data0 = mgp->num_slices;
  854. cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
  855. if (mgp->dev->real_num_tx_queues > 1)
  856. cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
  857. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
  858. &cmd, 0);
  859. /* Firmware older than 1.4.32 only supports multiple
  860. * RX queues, so if we get an error, first retry using a
  861. * single TX queue before giving up */
  862. if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
  863. netif_set_real_num_tx_queues(mgp->dev, 1);
  864. cmd.data0 = mgp->num_slices;
  865. cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
  866. status = myri10ge_send_cmd(mgp,
  867. MXGEFW_CMD_ENABLE_RSS_QUEUES,
  868. &cmd, 0);
  869. }
  870. if (status != 0) {
  871. dev_err(&mgp->pdev->dev,
  872. "failed to set number of slices\n");
  873. return status;
  874. }
  875. }
  876. for (i = 0; i < mgp->num_slices; i++) {
  877. ss = &mgp->ss[i];
  878. cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
  879. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
  880. cmd.data2 = i;
  881. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
  882. &cmd, 0);
  883. }
  884. status |=
  885. myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
  886. for (i = 0; i < mgp->num_slices; i++) {
  887. ss = &mgp->ss[i];
  888. ss->irq_claim =
  889. (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
  890. }
  891. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
  892. &cmd, 0);
  893. mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
  894. status |= myri10ge_send_cmd
  895. (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
  896. mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
  897. if (status != 0) {
  898. dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
  899. return status;
  900. }
  901. put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
  902. #ifdef CONFIG_MYRI10GE_DCA
  903. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
  904. dca_tag_off = cmd.data0;
  905. for (i = 0; i < mgp->num_slices; i++) {
  906. ss = &mgp->ss[i];
  907. if (status == 0) {
  908. ss->dca_tag = (__iomem __be32 *)
  909. (mgp->sram + dca_tag_off + 4 * i);
  910. } else {
  911. ss->dca_tag = NULL;
  912. }
  913. }
  914. #endif /* CONFIG_MYRI10GE_DCA */
  915. /* reset mcp/driver shared state back to 0 */
  916. mgp->link_changes = 0;
  917. for (i = 0; i < mgp->num_slices; i++) {
  918. ss = &mgp->ss[i];
  919. memset(ss->rx_done.entry, 0, bytes);
  920. ss->tx.req = 0;
  921. ss->tx.done = 0;
  922. ss->tx.pkt_start = 0;
  923. ss->tx.pkt_done = 0;
  924. ss->rx_big.cnt = 0;
  925. ss->rx_small.cnt = 0;
  926. ss->rx_done.idx = 0;
  927. ss->rx_done.cnt = 0;
  928. ss->tx.wake_queue = 0;
  929. ss->tx.stop_queue = 0;
  930. }
  931. status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
  932. myri10ge_change_pause(mgp, mgp->pause);
  933. myri10ge_set_multicast_list(mgp->dev);
  934. return status;
  935. }
  936. #ifdef CONFIG_MYRI10GE_DCA
  937. static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
  938. {
  939. int ret, cap, err;
  940. u16 ctl;
  941. cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  942. if (!cap)
  943. return 0;
  944. err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
  945. ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
  946. if (ret != on) {
  947. ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
  948. ctl |= (on << 4);
  949. pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
  950. }
  951. return ret;
  952. }
  953. static void
  954. myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
  955. {
  956. ss->cached_dca_tag = tag;
  957. put_be32(htonl(tag), ss->dca_tag);
  958. }
  959. static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
  960. {
  961. int cpu = get_cpu();
  962. int tag;
  963. if (cpu != ss->cpu) {
  964. tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
  965. if (ss->cached_dca_tag != tag)
  966. myri10ge_write_dca(ss, cpu, tag);
  967. ss->cpu = cpu;
  968. }
  969. put_cpu();
  970. }
  971. static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
  972. {
  973. int err, i;
  974. struct pci_dev *pdev = mgp->pdev;
  975. if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
  976. return;
  977. if (!myri10ge_dca) {
  978. dev_err(&pdev->dev, "dca disabled by administrator\n");
  979. return;
  980. }
  981. err = dca_add_requester(&pdev->dev);
  982. if (err) {
  983. if (err != -ENODEV)
  984. dev_err(&pdev->dev,
  985. "dca_add_requester() failed, err=%d\n", err);
  986. return;
  987. }
  988. mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
  989. mgp->dca_enabled = 1;
  990. for (i = 0; i < mgp->num_slices; i++) {
  991. mgp->ss[i].cpu = -1;
  992. mgp->ss[i].cached_dca_tag = -1;
  993. myri10ge_update_dca(&mgp->ss[i]);
  994. }
  995. }
  996. static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
  997. {
  998. struct pci_dev *pdev = mgp->pdev;
  999. int err;
  1000. if (!mgp->dca_enabled)
  1001. return;
  1002. mgp->dca_enabled = 0;
  1003. if (mgp->relaxed_order)
  1004. myri10ge_toggle_relaxed(pdev, 1);
  1005. err = dca_remove_requester(&pdev->dev);
  1006. }
  1007. static int myri10ge_notify_dca_device(struct device *dev, void *data)
  1008. {
  1009. struct myri10ge_priv *mgp;
  1010. unsigned long event;
  1011. mgp = dev_get_drvdata(dev);
  1012. event = *(unsigned long *)data;
  1013. if (event == DCA_PROVIDER_ADD)
  1014. myri10ge_setup_dca(mgp);
  1015. else if (event == DCA_PROVIDER_REMOVE)
  1016. myri10ge_teardown_dca(mgp);
  1017. return 0;
  1018. }
  1019. #endif /* CONFIG_MYRI10GE_DCA */
  1020. static inline void
  1021. myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
  1022. struct mcp_kreq_ether_recv *src)
  1023. {
  1024. __be32 low;
  1025. low = src->addr_low;
  1026. src->addr_low = htonl(DMA_BIT_MASK(32));
  1027. myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
  1028. mb();
  1029. myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
  1030. mb();
  1031. src->addr_low = low;
  1032. put_be32(low, &dst->addr_low);
  1033. mb();
  1034. }
  1035. static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
  1036. {
  1037. struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
  1038. if ((skb->protocol == htons(ETH_P_8021Q)) &&
  1039. (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
  1040. vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
  1041. skb->csum = hw_csum;
  1042. skb->ip_summed = CHECKSUM_COMPLETE;
  1043. }
  1044. }
  1045. static inline void
  1046. myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
  1047. struct skb_frag_struct *rx_frags, int len, int hlen)
  1048. {
  1049. struct skb_frag_struct *skb_frags;
  1050. skb->len = skb->data_len = len;
  1051. skb->truesize = len + sizeof(struct sk_buff);
  1052. /* attach the page(s) */
  1053. skb_frags = skb_shinfo(skb)->frags;
  1054. while (len > 0) {
  1055. memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
  1056. len -= rx_frags->size;
  1057. skb_frags++;
  1058. rx_frags++;
  1059. skb_shinfo(skb)->nr_frags++;
  1060. }
  1061. /* pskb_may_pull is not available in irq context, but
  1062. * skb_pull() (for ether_pad and eth_type_trans()) requires
  1063. * the beginning of the packet in skb_headlen(), move it
  1064. * manually */
  1065. skb_copy_to_linear_data(skb, va, hlen);
  1066. skb_shinfo(skb)->frags[0].page_offset += hlen;
  1067. skb_shinfo(skb)->frags[0].size -= hlen;
  1068. skb->data_len -= hlen;
  1069. skb->tail += hlen;
  1070. skb_pull(skb, MXGEFW_PAD);
  1071. }
  1072. static void
  1073. myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
  1074. int bytes, int watchdog)
  1075. {
  1076. struct page *page;
  1077. int idx;
  1078. #if MYRI10GE_ALLOC_SIZE > 4096
  1079. int end_offset;
  1080. #endif
  1081. if (unlikely(rx->watchdog_needed && !watchdog))
  1082. return;
  1083. /* try to refill entire ring */
  1084. while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
  1085. idx = rx->fill_cnt & rx->mask;
  1086. if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
  1087. /* we can use part of previous page */
  1088. get_page(rx->page);
  1089. } else {
  1090. /* we need a new page */
  1091. page =
  1092. alloc_pages(GFP_ATOMIC | __GFP_COMP,
  1093. MYRI10GE_ALLOC_ORDER);
  1094. if (unlikely(page == NULL)) {
  1095. if (rx->fill_cnt - rx->cnt < 16)
  1096. rx->watchdog_needed = 1;
  1097. return;
  1098. }
  1099. rx->page = page;
  1100. rx->page_offset = 0;
  1101. rx->bus = pci_map_page(mgp->pdev, page, 0,
  1102. MYRI10GE_ALLOC_SIZE,
  1103. PCI_DMA_FROMDEVICE);
  1104. }
  1105. rx->info[idx].page = rx->page;
  1106. rx->info[idx].page_offset = rx->page_offset;
  1107. /* note that this is the address of the start of the
  1108. * page */
  1109. dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
  1110. rx->shadow[idx].addr_low =
  1111. htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
  1112. rx->shadow[idx].addr_high =
  1113. htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
  1114. /* start next packet on a cacheline boundary */
  1115. rx->page_offset += SKB_DATA_ALIGN(bytes);
  1116. #if MYRI10GE_ALLOC_SIZE > 4096
  1117. /* don't cross a 4KB boundary */
  1118. end_offset = rx->page_offset + bytes - 1;
  1119. if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
  1120. rx->page_offset = end_offset & ~4095;
  1121. #endif
  1122. rx->fill_cnt++;
  1123. /* copy 8 descriptors to the firmware at a time */
  1124. if ((idx & 7) == 7) {
  1125. myri10ge_submit_8rx(&rx->lanai[idx - 7],
  1126. &rx->shadow[idx - 7]);
  1127. }
  1128. }
  1129. }
  1130. static inline void
  1131. myri10ge_unmap_rx_page(struct pci_dev *pdev,
  1132. struct myri10ge_rx_buffer_state *info, int bytes)
  1133. {
  1134. /* unmap the recvd page if we're the only or last user of it */
  1135. if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
  1136. (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
  1137. pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
  1138. & ~(MYRI10GE_ALLOC_SIZE - 1)),
  1139. MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
  1140. }
  1141. }
  1142. #define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
  1143. * page into an skb */
  1144. static inline int
  1145. myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
  1146. int lro_enabled)
  1147. {
  1148. struct myri10ge_priv *mgp = ss->mgp;
  1149. struct sk_buff *skb;
  1150. struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
  1151. struct myri10ge_rx_buf *rx;
  1152. int i, idx, hlen, remainder, bytes;
  1153. struct pci_dev *pdev = mgp->pdev;
  1154. struct net_device *dev = mgp->dev;
  1155. u8 *va;
  1156. if (len <= mgp->small_bytes) {
  1157. rx = &ss->rx_small;
  1158. bytes = mgp->small_bytes;
  1159. } else {
  1160. rx = &ss->rx_big;
  1161. bytes = mgp->big_bytes;
  1162. }
  1163. len += MXGEFW_PAD;
  1164. idx = rx->cnt & rx->mask;
  1165. va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
  1166. prefetch(va);
  1167. /* Fill skb_frag_struct(s) with data from our receive */
  1168. for (i = 0, remainder = len; remainder > 0; i++) {
  1169. myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
  1170. rx_frags[i].page = rx->info[idx].page;
  1171. rx_frags[i].page_offset = rx->info[idx].page_offset;
  1172. if (remainder < MYRI10GE_ALLOC_SIZE)
  1173. rx_frags[i].size = remainder;
  1174. else
  1175. rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
  1176. rx->cnt++;
  1177. idx = rx->cnt & rx->mask;
  1178. remainder -= MYRI10GE_ALLOC_SIZE;
  1179. }
  1180. if (lro_enabled) {
  1181. rx_frags[0].page_offset += MXGEFW_PAD;
  1182. rx_frags[0].size -= MXGEFW_PAD;
  1183. len -= MXGEFW_PAD;
  1184. lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
  1185. /* opaque, will come back in get_frag_header */
  1186. len, len,
  1187. (void *)(__force unsigned long)csum, csum);
  1188. return 1;
  1189. }
  1190. hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
  1191. /* allocate an skb to attach the page(s) to. This is done
  1192. * after trying LRO, so as to avoid skb allocation overheads */
  1193. skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
  1194. if (unlikely(skb == NULL)) {
  1195. ss->stats.rx_dropped++;
  1196. do {
  1197. i--;
  1198. put_page(rx_frags[i].page);
  1199. } while (i != 0);
  1200. return 0;
  1201. }
  1202. /* Attach the pages to the skb, and trim off any padding */
  1203. myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
  1204. if (skb_shinfo(skb)->frags[0].size <= 0) {
  1205. put_page(skb_shinfo(skb)->frags[0].page);
  1206. skb_shinfo(skb)->nr_frags = 0;
  1207. }
  1208. skb->protocol = eth_type_trans(skb, dev);
  1209. skb_record_rx_queue(skb, ss - &mgp->ss[0]);
  1210. if (dev->features & NETIF_F_RXCSUM) {
  1211. if ((skb->protocol == htons(ETH_P_IP)) ||
  1212. (skb->protocol == htons(ETH_P_IPV6))) {
  1213. skb->csum = csum;
  1214. skb->ip_summed = CHECKSUM_COMPLETE;
  1215. } else
  1216. myri10ge_vlan_ip_csum(skb, csum);
  1217. }
  1218. netif_receive_skb(skb);
  1219. return 1;
  1220. }
  1221. static inline void
  1222. myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
  1223. {
  1224. struct pci_dev *pdev = ss->mgp->pdev;
  1225. struct myri10ge_tx_buf *tx = &ss->tx;
  1226. struct netdev_queue *dev_queue;
  1227. struct sk_buff *skb;
  1228. int idx, len;
  1229. while (tx->pkt_done != mcp_index) {
  1230. idx = tx->done & tx->mask;
  1231. skb = tx->info[idx].skb;
  1232. /* Mark as free */
  1233. tx->info[idx].skb = NULL;
  1234. if (tx->info[idx].last) {
  1235. tx->pkt_done++;
  1236. tx->info[idx].last = 0;
  1237. }
  1238. tx->done++;
  1239. len = dma_unmap_len(&tx->info[idx], len);
  1240. dma_unmap_len_set(&tx->info[idx], len, 0);
  1241. if (skb) {
  1242. ss->stats.tx_bytes += skb->len;
  1243. ss->stats.tx_packets++;
  1244. dev_kfree_skb_irq(skb);
  1245. if (len)
  1246. pci_unmap_single(pdev,
  1247. dma_unmap_addr(&tx->info[idx],
  1248. bus), len,
  1249. PCI_DMA_TODEVICE);
  1250. } else {
  1251. if (len)
  1252. pci_unmap_page(pdev,
  1253. dma_unmap_addr(&tx->info[idx],
  1254. bus), len,
  1255. PCI_DMA_TODEVICE);
  1256. }
  1257. }
  1258. dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
  1259. /*
  1260. * Make a minimal effort to prevent the NIC from polling an
  1261. * idle tx queue. If we can't get the lock we leave the queue
  1262. * active. In this case, either a thread was about to start
  1263. * using the queue anyway, or we lost a race and the NIC will
  1264. * waste some of its resources polling an inactive queue for a
  1265. * while.
  1266. */
  1267. if ((ss->mgp->dev->real_num_tx_queues > 1) &&
  1268. __netif_tx_trylock(dev_queue)) {
  1269. if (tx->req == tx->done) {
  1270. tx->queue_active = 0;
  1271. put_be32(htonl(1), tx->send_stop);
  1272. mb();
  1273. mmiowb();
  1274. }
  1275. __netif_tx_unlock(dev_queue);
  1276. }
  1277. /* start the queue if we've stopped it */
  1278. if (netif_tx_queue_stopped(dev_queue) &&
  1279. tx->req - tx->done < (tx->mask >> 1)) {
  1280. tx->wake_queue++;
  1281. netif_tx_wake_queue(dev_queue);
  1282. }
  1283. }
  1284. static inline int
  1285. myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
  1286. {
  1287. struct myri10ge_rx_done *rx_done = &ss->rx_done;
  1288. struct myri10ge_priv *mgp = ss->mgp;
  1289. unsigned long rx_bytes = 0;
  1290. unsigned long rx_packets = 0;
  1291. unsigned long rx_ok;
  1292. int idx = rx_done->idx;
  1293. int cnt = rx_done->cnt;
  1294. int work_done = 0;
  1295. u16 length;
  1296. __wsum checksum;
  1297. /*
  1298. * Prevent compiler from generating more than one ->features memory
  1299. * access to avoid theoretical race condition with functions that
  1300. * change NETIF_F_LRO flag at runtime.
  1301. */
  1302. bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
  1303. while (rx_done->entry[idx].length != 0 && work_done < budget) {
  1304. length = ntohs(rx_done->entry[idx].length);
  1305. rx_done->entry[idx].length = 0;
  1306. checksum = csum_unfold(rx_done->entry[idx].checksum);
  1307. rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
  1308. rx_packets += rx_ok;
  1309. rx_bytes += rx_ok * (unsigned long)length;
  1310. cnt++;
  1311. idx = cnt & (mgp->max_intr_slots - 1);
  1312. work_done++;
  1313. }
  1314. rx_done->idx = idx;
  1315. rx_done->cnt = cnt;
  1316. ss->stats.rx_packets += rx_packets;
  1317. ss->stats.rx_bytes += rx_bytes;
  1318. if (lro_enabled)
  1319. lro_flush_all(&rx_done->lro_mgr);
  1320. /* restock receive rings if needed */
  1321. if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
  1322. myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
  1323. mgp->small_bytes + MXGEFW_PAD, 0);
  1324. if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
  1325. myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
  1326. return work_done;
  1327. }
  1328. static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
  1329. {
  1330. struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
  1331. if (unlikely(stats->stats_updated)) {
  1332. unsigned link_up = ntohl(stats->link_up);
  1333. if (mgp->link_state != link_up) {
  1334. mgp->link_state = link_up;
  1335. if (mgp->link_state == MXGEFW_LINK_UP) {
  1336. if (netif_msg_link(mgp))
  1337. netdev_info(mgp->dev, "link up\n");
  1338. netif_carrier_on(mgp->dev);
  1339. mgp->link_changes++;
  1340. } else {
  1341. if (netif_msg_link(mgp))
  1342. netdev_info(mgp->dev, "link %s\n",
  1343. link_up == MXGEFW_LINK_MYRINET ?
  1344. "mismatch (Myrinet detected)" :
  1345. "down");
  1346. netif_carrier_off(mgp->dev);
  1347. mgp->link_changes++;
  1348. }
  1349. }
  1350. if (mgp->rdma_tags_available !=
  1351. ntohl(stats->rdma_tags_available)) {
  1352. mgp->rdma_tags_available =
  1353. ntohl(stats->rdma_tags_available);
  1354. netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
  1355. mgp->rdma_tags_available);
  1356. }
  1357. mgp->down_cnt += stats->link_down;
  1358. if (stats->link_down)
  1359. wake_up(&mgp->down_wq);
  1360. }
  1361. }
  1362. static int myri10ge_poll(struct napi_struct *napi, int budget)
  1363. {
  1364. struct myri10ge_slice_state *ss =
  1365. container_of(napi, struct myri10ge_slice_state, napi);
  1366. int work_done;
  1367. #ifdef CONFIG_MYRI10GE_DCA
  1368. if (ss->mgp->dca_enabled)
  1369. myri10ge_update_dca(ss);
  1370. #endif
  1371. /* process as many rx events as NAPI will allow */
  1372. work_done = myri10ge_clean_rx_done(ss, budget);
  1373. if (work_done < budget) {
  1374. napi_complete(napi);
  1375. put_be32(htonl(3), ss->irq_claim);
  1376. }
  1377. return work_done;
  1378. }
  1379. static irqreturn_t myri10ge_intr(int irq, void *arg)
  1380. {
  1381. struct myri10ge_slice_state *ss = arg;
  1382. struct myri10ge_priv *mgp = ss->mgp;
  1383. struct mcp_irq_data *stats = ss->fw_stats;
  1384. struct myri10ge_tx_buf *tx = &ss->tx;
  1385. u32 send_done_count;
  1386. int i;
  1387. /* an interrupt on a non-zero receive-only slice is implicitly
  1388. * valid since MSI-X irqs are not shared */
  1389. if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
  1390. napi_schedule(&ss->napi);
  1391. return IRQ_HANDLED;
  1392. }
  1393. /* make sure it is our IRQ, and that the DMA has finished */
  1394. if (unlikely(!stats->valid))
  1395. return IRQ_NONE;
  1396. /* low bit indicates receives are present, so schedule
  1397. * napi poll handler */
  1398. if (stats->valid & 1)
  1399. napi_schedule(&ss->napi);
  1400. if (!mgp->msi_enabled && !mgp->msix_enabled) {
  1401. put_be32(0, mgp->irq_deassert);
  1402. if (!myri10ge_deassert_wait)
  1403. stats->valid = 0;
  1404. mb();
  1405. } else
  1406. stats->valid = 0;
  1407. /* Wait for IRQ line to go low, if using INTx */
  1408. i = 0;
  1409. while (1) {
  1410. i++;
  1411. /* check for transmit completes and receives */
  1412. send_done_count = ntohl(stats->send_done_count);
  1413. if (send_done_count != tx->pkt_done)
  1414. myri10ge_tx_done(ss, (int)send_done_count);
  1415. if (unlikely(i > myri10ge_max_irq_loops)) {
  1416. netdev_err(mgp->dev, "irq stuck?\n");
  1417. stats->valid = 0;
  1418. schedule_work(&mgp->watchdog_work);
  1419. }
  1420. if (likely(stats->valid == 0))
  1421. break;
  1422. cpu_relax();
  1423. barrier();
  1424. }
  1425. /* Only slice 0 updates stats */
  1426. if (ss == mgp->ss)
  1427. myri10ge_check_statblock(mgp);
  1428. put_be32(htonl(3), ss->irq_claim + 1);
  1429. return IRQ_HANDLED;
  1430. }
  1431. static int
  1432. myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
  1433. {
  1434. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1435. char *ptr;
  1436. int i;
  1437. cmd->autoneg = AUTONEG_DISABLE;
  1438. ethtool_cmd_speed_set(cmd, SPEED_10000);
  1439. cmd->duplex = DUPLEX_FULL;
  1440. /*
  1441. * parse the product code to deterimine the interface type
  1442. * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
  1443. * after the 3rd dash in the driver's cached copy of the
  1444. * EEPROM's product code string.
  1445. */
  1446. ptr = mgp->product_code_string;
  1447. if (ptr == NULL) {
  1448. netdev_err(netdev, "Missing product code\n");
  1449. return 0;
  1450. }
  1451. for (i = 0; i < 3; i++, ptr++) {
  1452. ptr = strchr(ptr, '-');
  1453. if (ptr == NULL) {
  1454. netdev_err(netdev, "Invalid product code %s\n",
  1455. mgp->product_code_string);
  1456. return 0;
  1457. }
  1458. }
  1459. if (*ptr == '2')
  1460. ptr++;
  1461. if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
  1462. /* We've found either an XFP, quad ribbon fiber, or SFP+ */
  1463. cmd->port = PORT_FIBRE;
  1464. cmd->supported |= SUPPORTED_FIBRE;
  1465. cmd->advertising |= ADVERTISED_FIBRE;
  1466. } else {
  1467. cmd->port = PORT_OTHER;
  1468. }
  1469. if (*ptr == 'R' || *ptr == 'S')
  1470. cmd->transceiver = XCVR_EXTERNAL;
  1471. else
  1472. cmd->transceiver = XCVR_INTERNAL;
  1473. return 0;
  1474. }
  1475. static void
  1476. myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
  1477. {
  1478. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1479. strlcpy(info->driver, "myri10ge", sizeof(info->driver));
  1480. strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
  1481. strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
  1482. strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
  1483. }
  1484. static int
  1485. myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
  1486. {
  1487. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1488. coal->rx_coalesce_usecs = mgp->intr_coal_delay;
  1489. return 0;
  1490. }
  1491. static int
  1492. myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
  1493. {
  1494. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1495. mgp->intr_coal_delay = coal->rx_coalesce_usecs;
  1496. put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
  1497. return 0;
  1498. }
  1499. static void
  1500. myri10ge_get_pauseparam(struct net_device *netdev,
  1501. struct ethtool_pauseparam *pause)
  1502. {
  1503. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1504. pause->autoneg = 0;
  1505. pause->rx_pause = mgp->pause;
  1506. pause->tx_pause = mgp->pause;
  1507. }
  1508. static int
  1509. myri10ge_set_pauseparam(struct net_device *netdev,
  1510. struct ethtool_pauseparam *pause)
  1511. {
  1512. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1513. if (pause->tx_pause != mgp->pause)
  1514. return myri10ge_change_pause(mgp, pause->tx_pause);
  1515. if (pause->rx_pause != mgp->pause)
  1516. return myri10ge_change_pause(mgp, pause->rx_pause);
  1517. if (pause->autoneg != 0)
  1518. return -EINVAL;
  1519. return 0;
  1520. }
  1521. static void
  1522. myri10ge_get_ringparam(struct net_device *netdev,
  1523. struct ethtool_ringparam *ring)
  1524. {
  1525. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1526. ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
  1527. ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
  1528. ring->rx_jumbo_max_pending = 0;
  1529. ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
  1530. ring->rx_mini_pending = ring->rx_mini_max_pending;
  1531. ring->rx_pending = ring->rx_max_pending;
  1532. ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
  1533. ring->tx_pending = ring->tx_max_pending;
  1534. }
  1535. static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
  1536. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  1537. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  1538. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  1539. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  1540. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  1541. "tx_heartbeat_errors", "tx_window_errors",
  1542. /* device-specific stats */
  1543. "tx_boundary", "WC", "irq", "MSI", "MSIX",
  1544. "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
  1545. "serial_number", "watchdog_resets",
  1546. #ifdef CONFIG_MYRI10GE_DCA
  1547. "dca_capable_firmware", "dca_device_present",
  1548. #endif
  1549. "link_changes", "link_up", "dropped_link_overflow",
  1550. "dropped_link_error_or_filtered",
  1551. "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
  1552. "dropped_unicast_filtered", "dropped_multicast_filtered",
  1553. "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
  1554. "dropped_no_big_buffer"
  1555. };
  1556. static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
  1557. "----------- slice ---------",
  1558. "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
  1559. "rx_small_cnt", "rx_big_cnt",
  1560. "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
  1561. "LRO flushed",
  1562. "LRO avg aggr", "LRO no_desc"
  1563. };
  1564. #define MYRI10GE_NET_STATS_LEN 21
  1565. #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
  1566. #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
  1567. static void
  1568. myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
  1569. {
  1570. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1571. int i;
  1572. switch (stringset) {
  1573. case ETH_SS_STATS:
  1574. memcpy(data, *myri10ge_gstrings_main_stats,
  1575. sizeof(myri10ge_gstrings_main_stats));
  1576. data += sizeof(myri10ge_gstrings_main_stats);
  1577. for (i = 0; i < mgp->num_slices; i++) {
  1578. memcpy(data, *myri10ge_gstrings_slice_stats,
  1579. sizeof(myri10ge_gstrings_slice_stats));
  1580. data += sizeof(myri10ge_gstrings_slice_stats);
  1581. }
  1582. break;
  1583. }
  1584. }
  1585. static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
  1586. {
  1587. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1588. switch (sset) {
  1589. case ETH_SS_STATS:
  1590. return MYRI10GE_MAIN_STATS_LEN +
  1591. mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
  1592. default:
  1593. return -EOPNOTSUPP;
  1594. }
  1595. }
  1596. static void
  1597. myri10ge_get_ethtool_stats(struct net_device *netdev,
  1598. struct ethtool_stats *stats, u64 * data)
  1599. {
  1600. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1601. struct myri10ge_slice_state *ss;
  1602. struct rtnl_link_stats64 link_stats;
  1603. int slice;
  1604. int i;
  1605. /* force stats update */
  1606. (void)myri10ge_get_stats(netdev, &link_stats);
  1607. for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
  1608. data[i] = ((u64 *)&link_stats)[i];
  1609. data[i++] = (unsigned int)mgp->tx_boundary;
  1610. data[i++] = (unsigned int)mgp->wc_enabled;
  1611. data[i++] = (unsigned int)mgp->pdev->irq;
  1612. data[i++] = (unsigned int)mgp->msi_enabled;
  1613. data[i++] = (unsigned int)mgp->msix_enabled;
  1614. data[i++] = (unsigned int)mgp->read_dma;
  1615. data[i++] = (unsigned int)mgp->write_dma;
  1616. data[i++] = (unsigned int)mgp->read_write_dma;
  1617. data[i++] = (unsigned int)mgp->serial_number;
  1618. data[i++] = (unsigned int)mgp->watchdog_resets;
  1619. #ifdef CONFIG_MYRI10GE_DCA
  1620. data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
  1621. data[i++] = (unsigned int)(mgp->dca_enabled);
  1622. #endif
  1623. data[i++] = (unsigned int)mgp->link_changes;
  1624. /* firmware stats are useful only in the first slice */
  1625. ss = &mgp->ss[0];
  1626. data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
  1627. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
  1628. data[i++] =
  1629. (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
  1630. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
  1631. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
  1632. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
  1633. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
  1634. data[i++] =
  1635. (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
  1636. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
  1637. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
  1638. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
  1639. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
  1640. for (slice = 0; slice < mgp->num_slices; slice++) {
  1641. ss = &mgp->ss[slice];
  1642. data[i++] = slice;
  1643. data[i++] = (unsigned int)ss->tx.pkt_start;
  1644. data[i++] = (unsigned int)ss->tx.pkt_done;
  1645. data[i++] = (unsigned int)ss->tx.req;
  1646. data[i++] = (unsigned int)ss->tx.done;
  1647. data[i++] = (unsigned int)ss->rx_small.cnt;
  1648. data[i++] = (unsigned int)ss->rx_big.cnt;
  1649. data[i++] = (unsigned int)ss->tx.wake_queue;
  1650. data[i++] = (unsigned int)ss->tx.stop_queue;
  1651. data[i++] = (unsigned int)ss->tx.linearized;
  1652. data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
  1653. data[i++] = ss->rx_done.lro_mgr.stats.flushed;
  1654. if (ss->rx_done.lro_mgr.stats.flushed)
  1655. data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
  1656. ss->rx_done.lro_mgr.stats.flushed;
  1657. else
  1658. data[i++] = 0;
  1659. data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
  1660. }
  1661. }
  1662. static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
  1663. {
  1664. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1665. mgp->msg_enable = value;
  1666. }
  1667. static u32 myri10ge_get_msglevel(struct net_device *netdev)
  1668. {
  1669. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1670. return mgp->msg_enable;
  1671. }
  1672. static const struct ethtool_ops myri10ge_ethtool_ops = {
  1673. .get_settings = myri10ge_get_settings,
  1674. .get_drvinfo = myri10ge_get_drvinfo,
  1675. .get_coalesce = myri10ge_get_coalesce,
  1676. .set_coalesce = myri10ge_set_coalesce,
  1677. .get_pauseparam = myri10ge_get_pauseparam,
  1678. .set_pauseparam = myri10ge_set_pauseparam,
  1679. .get_ringparam = myri10ge_get_ringparam,
  1680. .get_link = ethtool_op_get_link,
  1681. .get_strings = myri10ge_get_strings,
  1682. .get_sset_count = myri10ge_get_sset_count,
  1683. .get_ethtool_stats = myri10ge_get_ethtool_stats,
  1684. .set_msglevel = myri10ge_set_msglevel,
  1685. .get_msglevel = myri10ge_get_msglevel,
  1686. };
  1687. static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
  1688. {
  1689. struct myri10ge_priv *mgp = ss->mgp;
  1690. struct myri10ge_cmd cmd;
  1691. struct net_device *dev = mgp->dev;
  1692. int tx_ring_size, rx_ring_size;
  1693. int tx_ring_entries, rx_ring_entries;
  1694. int i, slice, status;
  1695. size_t bytes;
  1696. /* get ring sizes */
  1697. slice = ss - mgp->ss;
  1698. cmd.data0 = slice;
  1699. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
  1700. tx_ring_size = cmd.data0;
  1701. cmd.data0 = slice;
  1702. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
  1703. if (status != 0)
  1704. return status;
  1705. rx_ring_size = cmd.data0;
  1706. tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
  1707. rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
  1708. ss->tx.mask = tx_ring_entries - 1;
  1709. ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
  1710. status = -ENOMEM;
  1711. /* allocate the host shadow rings */
  1712. bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
  1713. * sizeof(*ss->tx.req_list);
  1714. ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
  1715. if (ss->tx.req_bytes == NULL)
  1716. goto abort_with_nothing;
  1717. /* ensure req_list entries are aligned to 8 bytes */
  1718. ss->tx.req_list = (struct mcp_kreq_ether_send *)
  1719. ALIGN((unsigned long)ss->tx.req_bytes, 8);
  1720. ss->tx.queue_active = 0;
  1721. bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
  1722. ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
  1723. if (ss->rx_small.shadow == NULL)
  1724. goto abort_with_tx_req_bytes;
  1725. bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
  1726. ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
  1727. if (ss->rx_big.shadow == NULL)
  1728. goto abort_with_rx_small_shadow;
  1729. /* allocate the host info rings */
  1730. bytes = tx_ring_entries * sizeof(*ss->tx.info);
  1731. ss->tx.info = kzalloc(bytes, GFP_KERNEL);
  1732. if (ss->tx.info == NULL)
  1733. goto abort_with_rx_big_shadow;
  1734. bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
  1735. ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
  1736. if (ss->rx_small.info == NULL)
  1737. goto abort_with_tx_info;
  1738. bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
  1739. ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
  1740. if (ss->rx_big.info == NULL)
  1741. goto abort_with_rx_small_info;
  1742. /* Fill the receive rings */
  1743. ss->rx_big.cnt = 0;
  1744. ss->rx_small.cnt = 0;
  1745. ss->rx_big.fill_cnt = 0;
  1746. ss->rx_small.fill_cnt = 0;
  1747. ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
  1748. ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
  1749. ss->rx_small.watchdog_needed = 0;
  1750. ss->rx_big.watchdog_needed = 0;
  1751. myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
  1752. mgp->small_bytes + MXGEFW_PAD, 0);
  1753. if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
  1754. netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
  1755. slice, ss->rx_small.fill_cnt);
  1756. goto abort_with_rx_small_ring;
  1757. }
  1758. myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
  1759. if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
  1760. netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
  1761. slice, ss->rx_big.fill_cnt);
  1762. goto abort_with_rx_big_ring;
  1763. }
  1764. return 0;
  1765. abort_with_rx_big_ring:
  1766. for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
  1767. int idx = i & ss->rx_big.mask;
  1768. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
  1769. mgp->big_bytes);
  1770. put_page(ss->rx_big.info[idx].page);
  1771. }
  1772. abort_with_rx_small_ring:
  1773. for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
  1774. int idx = i & ss->rx_small.mask;
  1775. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
  1776. mgp->small_bytes + MXGEFW_PAD);
  1777. put_page(ss->rx_small.info[idx].page);
  1778. }
  1779. kfree(ss->rx_big.info);
  1780. abort_with_rx_small_info:
  1781. kfree(ss->rx_small.info);
  1782. abort_with_tx_info:
  1783. kfree(ss->tx.info);
  1784. abort_with_rx_big_shadow:
  1785. kfree(ss->rx_big.shadow);
  1786. abort_with_rx_small_shadow:
  1787. kfree(ss->rx_small.shadow);
  1788. abort_with_tx_req_bytes:
  1789. kfree(ss->tx.req_bytes);
  1790. ss->tx.req_bytes = NULL;
  1791. ss->tx.req_list = NULL;
  1792. abort_with_nothing:
  1793. return status;
  1794. }
  1795. static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
  1796. {
  1797. struct myri10ge_priv *mgp = ss->mgp;
  1798. struct sk_buff *skb;
  1799. struct myri10ge_tx_buf *tx;
  1800. int i, len, idx;
  1801. /* If not allocated, skip it */
  1802. if (ss->tx.req_list == NULL)
  1803. return;
  1804. for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
  1805. idx = i & ss->rx_big.mask;
  1806. if (i == ss->rx_big.fill_cnt - 1)
  1807. ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
  1808. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
  1809. mgp->big_bytes);
  1810. put_page(ss->rx_big.info[idx].page);
  1811. }
  1812. for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
  1813. idx = i & ss->rx_small.mask;
  1814. if (i == ss->rx_small.fill_cnt - 1)
  1815. ss->rx_small.info[idx].page_offset =
  1816. MYRI10GE_ALLOC_SIZE;
  1817. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
  1818. mgp->small_bytes + MXGEFW_PAD);
  1819. put_page(ss->rx_small.info[idx].page);
  1820. }
  1821. tx = &ss->tx;
  1822. while (tx->done != tx->req) {
  1823. idx = tx->done & tx->mask;
  1824. skb = tx->info[idx].skb;
  1825. /* Mark as free */
  1826. tx->info[idx].skb = NULL;
  1827. tx->done++;
  1828. len = dma_unmap_len(&tx->info[idx], len);
  1829. dma_unmap_len_set(&tx->info[idx], len, 0);
  1830. if (skb) {
  1831. ss->stats.tx_dropped++;
  1832. dev_kfree_skb_any(skb);
  1833. if (len)
  1834. pci_unmap_single(mgp->pdev,
  1835. dma_unmap_addr(&tx->info[idx],
  1836. bus), len,
  1837. PCI_DMA_TODEVICE);
  1838. } else {
  1839. if (len)
  1840. pci_unmap_page(mgp->pdev,
  1841. dma_unmap_addr(&tx->info[idx],
  1842. bus), len,
  1843. PCI_DMA_TODEVICE);
  1844. }
  1845. }
  1846. kfree(ss->rx_big.info);
  1847. kfree(ss->rx_small.info);
  1848. kfree(ss->tx.info);
  1849. kfree(ss->rx_big.shadow);
  1850. kfree(ss->rx_small.shadow);
  1851. kfree(ss->tx.req_bytes);
  1852. ss->tx.req_bytes = NULL;
  1853. ss->tx.req_list = NULL;
  1854. }
  1855. static int myri10ge_request_irq(struct myri10ge_priv *mgp)
  1856. {
  1857. struct pci_dev *pdev = mgp->pdev;
  1858. struct myri10ge_slice_state *ss;
  1859. struct net_device *netdev = mgp->dev;
  1860. int i;
  1861. int status;
  1862. mgp->msi_enabled = 0;
  1863. mgp->msix_enabled = 0;
  1864. status = 0;
  1865. if (myri10ge_msi) {
  1866. if (mgp->num_slices > 1) {
  1867. status =
  1868. pci_enable_msix(pdev, mgp->msix_vectors,
  1869. mgp->num_slices);
  1870. if (status == 0) {
  1871. mgp->msix_enabled = 1;
  1872. } else {
  1873. dev_err(&pdev->dev,
  1874. "Error %d setting up MSI-X\n", status);
  1875. return status;
  1876. }
  1877. }
  1878. if (mgp->msix_enabled == 0) {
  1879. status = pci_enable_msi(pdev);
  1880. if (status != 0) {
  1881. dev_err(&pdev->dev,
  1882. "Error %d setting up MSI; falling back to xPIC\n",
  1883. status);
  1884. } else {
  1885. mgp->msi_enabled = 1;
  1886. }
  1887. }
  1888. }
  1889. if (mgp->msix_enabled) {
  1890. for (i = 0; i < mgp->num_slices; i++) {
  1891. ss = &mgp->ss[i];
  1892. snprintf(ss->irq_desc, sizeof(ss->irq_desc),
  1893. "%s:slice-%d", netdev->name, i);
  1894. status = request_irq(mgp->msix_vectors[i].vector,
  1895. myri10ge_intr, 0, ss->irq_desc,
  1896. ss);
  1897. if (status != 0) {
  1898. dev_err(&pdev->dev,
  1899. "slice %d failed to allocate IRQ\n", i);
  1900. i--;
  1901. while (i >= 0) {
  1902. free_irq(mgp->msix_vectors[i].vector,
  1903. &mgp->ss[i]);
  1904. i--;
  1905. }
  1906. pci_disable_msix(pdev);
  1907. return status;
  1908. }
  1909. }
  1910. } else {
  1911. status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
  1912. mgp->dev->name, &mgp->ss[0]);
  1913. if (status != 0) {
  1914. dev_err(&pdev->dev, "failed to allocate IRQ\n");
  1915. if (mgp->msi_enabled)
  1916. pci_disable_msi(pdev);
  1917. }
  1918. }
  1919. return status;
  1920. }
  1921. static void myri10ge_free_irq(struct myri10ge_priv *mgp)
  1922. {
  1923. struct pci_dev *pdev = mgp->pdev;
  1924. int i;
  1925. if (mgp->msix_enabled) {
  1926. for (i = 0; i < mgp->num_slices; i++)
  1927. free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
  1928. } else {
  1929. free_irq(pdev->irq, &mgp->ss[0]);
  1930. }
  1931. if (mgp->msi_enabled)
  1932. pci_disable_msi(pdev);
  1933. if (mgp->msix_enabled)
  1934. pci_disable_msix(pdev);
  1935. }
  1936. static int
  1937. myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
  1938. void **ip_hdr, void **tcpudp_hdr,
  1939. u64 * hdr_flags, void *priv)
  1940. {
  1941. struct ethhdr *eh;
  1942. struct vlan_ethhdr *veh;
  1943. struct iphdr *iph;
  1944. u8 *va = page_address(frag->page) + frag->page_offset;
  1945. unsigned long ll_hlen;
  1946. /* passed opaque through lro_receive_frags() */
  1947. __wsum csum = (__force __wsum) (unsigned long)priv;
  1948. /* find the mac header, aborting if not IPv4 */
  1949. eh = (struct ethhdr *)va;
  1950. *mac_hdr = eh;
  1951. ll_hlen = ETH_HLEN;
  1952. if (eh->h_proto != htons(ETH_P_IP)) {
  1953. if (eh->h_proto == htons(ETH_P_8021Q)) {
  1954. veh = (struct vlan_ethhdr *)va;
  1955. if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
  1956. return -1;
  1957. ll_hlen += VLAN_HLEN;
  1958. /*
  1959. * HW checksum starts ETH_HLEN bytes into
  1960. * frame, so we must subtract off the VLAN
  1961. * header's checksum before csum can be used
  1962. */
  1963. csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
  1964. VLAN_HLEN, 0));
  1965. } else {
  1966. return -1;
  1967. }
  1968. }
  1969. *hdr_flags = LRO_IPV4;
  1970. iph = (struct iphdr *)(va + ll_hlen);
  1971. *ip_hdr = iph;
  1972. if (iph->protocol != IPPROTO_TCP)
  1973. return -1;
  1974. if (iph->frag_off & htons(IP_MF | IP_OFFSET))
  1975. return -1;
  1976. *hdr_flags |= LRO_TCP;
  1977. *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
  1978. /* verify the IP checksum */
  1979. if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
  1980. return -1;
  1981. /* verify the checksum */
  1982. if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
  1983. ntohs(iph->tot_len) - (iph->ihl << 2),
  1984. IPPROTO_TCP, csum)))
  1985. return -1;
  1986. return 0;
  1987. }
  1988. static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
  1989. {
  1990. struct myri10ge_cmd cmd;
  1991. struct myri10ge_slice_state *ss;
  1992. int status;
  1993. ss = &mgp->ss[slice];
  1994. status = 0;
  1995. if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
  1996. cmd.data0 = slice;
  1997. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
  1998. &cmd, 0);
  1999. ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
  2000. (mgp->sram + cmd.data0);
  2001. }
  2002. cmd.data0 = slice;
  2003. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
  2004. &cmd, 0);
  2005. ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
  2006. (mgp->sram + cmd.data0);
  2007. cmd.data0 = slice;
  2008. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
  2009. ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
  2010. (mgp->sram + cmd.data0);
  2011. ss->tx.send_go = (__iomem __be32 *)
  2012. (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
  2013. ss->tx.send_stop = (__iomem __be32 *)
  2014. (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
  2015. return status;
  2016. }
  2017. static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
  2018. {
  2019. struct myri10ge_cmd cmd;
  2020. struct myri10ge_slice_state *ss;
  2021. int status;
  2022. ss = &mgp->ss[slice];
  2023. cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
  2024. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
  2025. cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
  2026. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
  2027. if (status == -ENOSYS) {
  2028. dma_addr_t bus = ss->fw_stats_bus;
  2029. if (slice != 0)
  2030. return -EINVAL;
  2031. bus += offsetof(struct mcp_irq_data, send_done_count);
  2032. cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
  2033. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
  2034. status = myri10ge_send_cmd(mgp,
  2035. MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
  2036. &cmd, 0);
  2037. /* Firmware cannot support multicast without STATS_DMA_V2 */
  2038. mgp->fw_multicast_support = 0;
  2039. } else {
  2040. mgp->fw_multicast_support = 1;
  2041. }
  2042. return 0;
  2043. }
  2044. static int myri10ge_open(struct net_device *dev)
  2045. {
  2046. struct myri10ge_slice_state *ss;
  2047. struct myri10ge_priv *mgp = netdev_priv(dev);
  2048. struct myri10ge_cmd cmd;
  2049. int i, status, big_pow2, slice;
  2050. u8 *itable;
  2051. struct net_lro_mgr *lro_mgr;
  2052. if (mgp->running != MYRI10GE_ETH_STOPPED)
  2053. return -EBUSY;
  2054. mgp->running = MYRI10GE_ETH_STARTING;
  2055. status = myri10ge_reset(mgp);
  2056. if (status != 0) {
  2057. netdev_err(dev, "failed reset\n");
  2058. goto abort_with_nothing;
  2059. }
  2060. if (mgp->num_slices > 1) {
  2061. cmd.data0 = mgp->num_slices;
  2062. cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
  2063. if (mgp->dev->real_num_tx_queues > 1)
  2064. cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
  2065. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
  2066. &cmd, 0);
  2067. if (status != 0) {
  2068. netdev_err(dev, "failed to set number of slices\n");
  2069. goto abort_with_nothing;
  2070. }
  2071. /* setup the indirection table */
  2072. cmd.data0 = mgp->num_slices;
  2073. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
  2074. &cmd, 0);
  2075. status |= myri10ge_send_cmd(mgp,
  2076. MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
  2077. &cmd, 0);
  2078. if (status != 0) {
  2079. netdev_err(dev, "failed to setup rss tables\n");
  2080. goto abort_with_nothing;
  2081. }
  2082. /* just enable an identity mapping */
  2083. itable = mgp->sram + cmd.data0;
  2084. for (i = 0; i < mgp->num_slices; i++)
  2085. __raw_writeb(i, &itable[i]);
  2086. cmd.data0 = 1;
  2087. cmd.data1 = myri10ge_rss_hash;
  2088. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
  2089. &cmd, 0);
  2090. if (status != 0) {
  2091. netdev_err(dev, "failed to enable slices\n");
  2092. goto abort_with_nothing;
  2093. }
  2094. }
  2095. status = myri10ge_request_irq(mgp);
  2096. if (status != 0)
  2097. goto abort_with_nothing;
  2098. /* decide what small buffer size to use. For good TCP rx
  2099. * performance, it is important to not receive 1514 byte
  2100. * frames into jumbo buffers, as it confuses the socket buffer
  2101. * accounting code, leading to drops and erratic performance.
  2102. */
  2103. if (dev->mtu <= ETH_DATA_LEN)
  2104. /* enough for a TCP header */
  2105. mgp->small_bytes = (128 > SMP_CACHE_BYTES)
  2106. ? (128 - MXGEFW_PAD)
  2107. : (SMP_CACHE_BYTES - MXGEFW_PAD);
  2108. else
  2109. /* enough for a vlan encapsulated ETH_DATA_LEN frame */
  2110. mgp->small_bytes = VLAN_ETH_FRAME_LEN;
  2111. /* Override the small buffer size? */
  2112. if (myri10ge_small_bytes > 0)
  2113. mgp->small_bytes = myri10ge_small_bytes;
  2114. /* Firmware needs the big buff size as a power of 2. Lie and
  2115. * tell him the buffer is larger, because we only use 1
  2116. * buffer/pkt, and the mtu will prevent overruns.
  2117. */
  2118. big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
  2119. if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
  2120. while (!is_power_of_2(big_pow2))
  2121. big_pow2++;
  2122. mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
  2123. } else {
  2124. big_pow2 = MYRI10GE_ALLOC_SIZE;
  2125. mgp->big_bytes = big_pow2;
  2126. }
  2127. /* setup the per-slice data structures */
  2128. for (slice = 0; slice < mgp->num_slices; slice++) {
  2129. ss = &mgp->ss[slice];
  2130. status = myri10ge_get_txrx(mgp, slice);
  2131. if (status != 0) {
  2132. netdev_err(dev, "failed to get ring sizes or locations\n");
  2133. goto abort_with_rings;
  2134. }
  2135. status = myri10ge_allocate_rings(ss);
  2136. if (status != 0)
  2137. goto abort_with_rings;
  2138. /* only firmware which supports multiple TX queues
  2139. * supports setting up the tx stats on non-zero
  2140. * slices */
  2141. if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
  2142. status = myri10ge_set_stats(mgp, slice);
  2143. if (status) {
  2144. netdev_err(dev, "Couldn't set stats DMA\n");
  2145. goto abort_with_rings;
  2146. }
  2147. lro_mgr = &ss->rx_done.lro_mgr;
  2148. lro_mgr->dev = dev;
  2149. lro_mgr->features = LRO_F_NAPI;
  2150. lro_mgr->ip_summed = CHECKSUM_COMPLETE;
  2151. lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
  2152. lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
  2153. lro_mgr->lro_arr = ss->rx_done.lro_desc;
  2154. lro_mgr->get_frag_header = myri10ge_get_frag_header;
  2155. lro_mgr->max_aggr = myri10ge_lro_max_pkts;
  2156. lro_mgr->frag_align_pad = 2;
  2157. if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
  2158. lro_mgr->max_aggr = MAX_SKB_FRAGS;
  2159. /* must happen prior to any irq */
  2160. napi_enable(&(ss)->napi);
  2161. }
  2162. /* now give firmware buffers sizes, and MTU */
  2163. cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
  2164. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
  2165. cmd.data0 = mgp->small_bytes;
  2166. status |=
  2167. myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
  2168. cmd.data0 = big_pow2;
  2169. status |=
  2170. myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
  2171. if (status) {
  2172. netdev_err(dev, "Couldn't set buffer sizes\n");
  2173. goto abort_with_rings;
  2174. }
  2175. /*
  2176. * Set Linux style TSO mode; this is needed only on newer
  2177. * firmware versions. Older versions default to Linux
  2178. * style TSO
  2179. */
  2180. cmd.data0 = 0;
  2181. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
  2182. if (status && status != -ENOSYS) {
  2183. netdev_err(dev, "Couldn't set TSO mode\n");
  2184. goto abort_with_rings;
  2185. }
  2186. mgp->link_state = ~0U;
  2187. mgp->rdma_tags_available = 15;
  2188. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
  2189. if (status) {
  2190. netdev_err(dev, "Couldn't bring up link\n");
  2191. goto abort_with_rings;
  2192. }
  2193. mgp->running = MYRI10GE_ETH_RUNNING;
  2194. mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
  2195. add_timer(&mgp->watchdog_timer);
  2196. netif_tx_wake_all_queues(dev);
  2197. return 0;
  2198. abort_with_rings:
  2199. while (slice) {
  2200. slice--;
  2201. napi_disable(&mgp->ss[slice].napi);
  2202. }
  2203. for (i = 0; i < mgp->num_slices; i++)
  2204. myri10ge_free_rings(&mgp->ss[i]);
  2205. myri10ge_free_irq(mgp);
  2206. abort_with_nothing:
  2207. mgp->running = MYRI10GE_ETH_STOPPED;
  2208. return -ENOMEM;
  2209. }
  2210. static int myri10ge_close(struct net_device *dev)
  2211. {
  2212. struct myri10ge_priv *mgp = netdev_priv(dev);
  2213. struct myri10ge_cmd cmd;
  2214. int status, old_down_cnt;
  2215. int i;
  2216. if (mgp->running != MYRI10GE_ETH_RUNNING)
  2217. return 0;
  2218. if (mgp->ss[0].tx.req_bytes == NULL)
  2219. return 0;
  2220. del_timer_sync(&mgp->watchdog_timer);
  2221. mgp->running = MYRI10GE_ETH_STOPPING;
  2222. for (i = 0; i < mgp->num_slices; i++) {
  2223. napi_disable(&mgp->ss[i].napi);
  2224. }
  2225. netif_carrier_off(dev);
  2226. netif_tx_stop_all_queues(dev);
  2227. if (mgp->rebooted == 0) {
  2228. old_down_cnt = mgp->down_cnt;
  2229. mb();
  2230. status =
  2231. myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
  2232. if (status)
  2233. netdev_err(dev, "Couldn't bring down link\n");
  2234. wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
  2235. HZ);
  2236. if (old_down_cnt == mgp->down_cnt)
  2237. netdev_err(dev, "never got down irq\n");
  2238. }
  2239. netif_tx_disable(dev);
  2240. myri10ge_free_irq(mgp);
  2241. for (i = 0; i < mgp->num_slices; i++)
  2242. myri10ge_free_rings(&mgp->ss[i]);
  2243. mgp->running = MYRI10GE_ETH_STOPPED;
  2244. return 0;
  2245. }
  2246. /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
  2247. * backwards one at a time and handle ring wraps */
  2248. static inline void
  2249. myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
  2250. struct mcp_kreq_ether_send *src, int cnt)
  2251. {
  2252. int idx, starting_slot;
  2253. starting_slot = tx->req;
  2254. while (cnt > 1) {
  2255. cnt--;
  2256. idx = (starting_slot + cnt) & tx->mask;
  2257. myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
  2258. mb();
  2259. }
  2260. }
  2261. /*
  2262. * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
  2263. * at most 32 bytes at a time, so as to avoid involving the software
  2264. * pio handler in the nic. We re-write the first segment's flags
  2265. * to mark them valid only after writing the entire chain.
  2266. */
  2267. static inline void
  2268. myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
  2269. int cnt)
  2270. {
  2271. int idx, i;
  2272. struct mcp_kreq_ether_send __iomem *dstp, *dst;
  2273. struct mcp_kreq_ether_send *srcp;
  2274. u8 last_flags;
  2275. idx = tx->req & tx->mask;
  2276. last_flags = src->flags;
  2277. src->flags = 0;
  2278. mb();
  2279. dst = dstp = &tx->lanai[idx];
  2280. srcp = src;
  2281. if ((idx + cnt) < tx->mask) {
  2282. for (i = 0; i < (cnt - 1); i += 2) {
  2283. myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
  2284. mb(); /* force write every 32 bytes */
  2285. srcp += 2;
  2286. dstp += 2;
  2287. }
  2288. } else {
  2289. /* submit all but the first request, and ensure
  2290. * that it is submitted below */
  2291. myri10ge_submit_req_backwards(tx, src, cnt);
  2292. i = 0;
  2293. }
  2294. if (i < cnt) {
  2295. /* submit the first request */
  2296. myri10ge_pio_copy(dstp, srcp, sizeof(*src));
  2297. mb(); /* barrier before setting valid flag */
  2298. }
  2299. /* re-write the last 32-bits with the valid flags */
  2300. src->flags = last_flags;
  2301. put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
  2302. tx->req += cnt;
  2303. mb();
  2304. }
  2305. /*
  2306. * Transmit a packet. We need to split the packet so that a single
  2307. * segment does not cross myri10ge->tx_boundary, so this makes segment
  2308. * counting tricky. So rather than try to count segments up front, we
  2309. * just give up if there are too few segments to hold a reasonably
  2310. * fragmented packet currently available. If we run
  2311. * out of segments while preparing a packet for DMA, we just linearize
  2312. * it and try again.
  2313. */
  2314. static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
  2315. struct net_device *dev)
  2316. {
  2317. struct myri10ge_priv *mgp = netdev_priv(dev);
  2318. struct myri10ge_slice_state *ss;
  2319. struct mcp_kreq_ether_send *req;
  2320. struct myri10ge_tx_buf *tx;
  2321. struct skb_frag_struct *frag;
  2322. struct netdev_queue *netdev_queue;
  2323. dma_addr_t bus;
  2324. u32 low;
  2325. __be32 high_swapped;
  2326. unsigned int len;
  2327. int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
  2328. u16 pseudo_hdr_offset, cksum_offset, queue;
  2329. int cum_len, seglen, boundary, rdma_count;
  2330. u8 flags, odd_flag;
  2331. queue = skb_get_queue_mapping(skb);
  2332. ss = &mgp->ss[queue];
  2333. netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
  2334. tx = &ss->tx;
  2335. again:
  2336. req = tx->req_list;
  2337. avail = tx->mask - 1 - (tx->req - tx->done);
  2338. mss = 0;
  2339. max_segments = MXGEFW_MAX_SEND_DESC;
  2340. if (skb_is_gso(skb)) {
  2341. mss = skb_shinfo(skb)->gso_size;
  2342. max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
  2343. }
  2344. if ((unlikely(avail < max_segments))) {
  2345. /* we are out of transmit resources */
  2346. tx->stop_queue++;
  2347. netif_tx_stop_queue(netdev_queue);
  2348. return NETDEV_TX_BUSY;
  2349. }
  2350. /* Setup checksum offloading, if needed */
  2351. cksum_offset = 0;
  2352. pseudo_hdr_offset = 0;
  2353. odd_flag = 0;
  2354. flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
  2355. if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  2356. cksum_offset = skb_checksum_start_offset(skb);
  2357. pseudo_hdr_offset = cksum_offset + skb->csum_offset;
  2358. /* If the headers are excessively large, then we must
  2359. * fall back to a software checksum */
  2360. if (unlikely(!mss && (cksum_offset > 255 ||
  2361. pseudo_hdr_offset > 127))) {
  2362. if (skb_checksum_help(skb))
  2363. goto drop;
  2364. cksum_offset = 0;
  2365. pseudo_hdr_offset = 0;
  2366. } else {
  2367. odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
  2368. flags |= MXGEFW_FLAGS_CKSUM;
  2369. }
  2370. }
  2371. cum_len = 0;
  2372. if (mss) { /* TSO */
  2373. /* this removes any CKSUM flag from before */
  2374. flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
  2375. /* negative cum_len signifies to the
  2376. * send loop that we are still in the
  2377. * header portion of the TSO packet.
  2378. * TSO header can be at most 1KB long */
  2379. cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
  2380. /* for IPv6 TSO, the checksum offset stores the
  2381. * TCP header length, to save the firmware from
  2382. * the need to parse the headers */
  2383. if (skb_is_gso_v6(skb)) {
  2384. cksum_offset = tcp_hdrlen(skb);
  2385. /* Can only handle headers <= max_tso6 long */
  2386. if (unlikely(-cum_len > mgp->max_tso6))
  2387. return myri10ge_sw_tso(skb, dev);
  2388. }
  2389. /* for TSO, pseudo_hdr_offset holds mss.
  2390. * The firmware figures out where to put
  2391. * the checksum by parsing the header. */
  2392. pseudo_hdr_offset = mss;
  2393. } else
  2394. /* Mark small packets, and pad out tiny packets */
  2395. if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
  2396. flags |= MXGEFW_FLAGS_SMALL;
  2397. /* pad frames to at least ETH_ZLEN bytes */
  2398. if (unlikely(skb->len < ETH_ZLEN)) {
  2399. if (skb_padto(skb, ETH_ZLEN)) {
  2400. /* The packet is gone, so we must
  2401. * return 0 */
  2402. ss->stats.tx_dropped += 1;
  2403. return NETDEV_TX_OK;
  2404. }
  2405. /* adjust the len to account for the zero pad
  2406. * so that the nic can know how long it is */
  2407. skb->len = ETH_ZLEN;
  2408. }
  2409. }
  2410. /* map the skb for DMA */
  2411. len = skb_headlen(skb);
  2412. idx = tx->req & tx->mask;
  2413. tx->info[idx].skb = skb;
  2414. bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
  2415. dma_unmap_addr_set(&tx->info[idx], bus, bus);
  2416. dma_unmap_len_set(&tx->info[idx], len, len);
  2417. frag_cnt = skb_shinfo(skb)->nr_frags;
  2418. frag_idx = 0;
  2419. count = 0;
  2420. rdma_count = 0;
  2421. /* "rdma_count" is the number of RDMAs belonging to the
  2422. * current packet BEFORE the current send request. For
  2423. * non-TSO packets, this is equal to "count".
  2424. * For TSO packets, rdma_count needs to be reset
  2425. * to 0 after a segment cut.
  2426. *
  2427. * The rdma_count field of the send request is
  2428. * the number of RDMAs of the packet starting at
  2429. * that request. For TSO send requests with one ore more cuts
  2430. * in the middle, this is the number of RDMAs starting
  2431. * after the last cut in the request. All previous
  2432. * segments before the last cut implicitly have 1 RDMA.
  2433. *
  2434. * Since the number of RDMAs is not known beforehand,
  2435. * it must be filled-in retroactively - after each
  2436. * segmentation cut or at the end of the entire packet.
  2437. */
  2438. while (1) {
  2439. /* Break the SKB or Fragment up into pieces which
  2440. * do not cross mgp->tx_boundary */
  2441. low = MYRI10GE_LOWPART_TO_U32(bus);
  2442. high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
  2443. while (len) {
  2444. u8 flags_next;
  2445. int cum_len_next;
  2446. if (unlikely(count == max_segments))
  2447. goto abort_linearize;
  2448. boundary =
  2449. (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
  2450. seglen = boundary - low;
  2451. if (seglen > len)
  2452. seglen = len;
  2453. flags_next = flags & ~MXGEFW_FLAGS_FIRST;
  2454. cum_len_next = cum_len + seglen;
  2455. if (mss) { /* TSO */
  2456. (req - rdma_count)->rdma_count = rdma_count + 1;
  2457. if (likely(cum_len >= 0)) { /* payload */
  2458. int next_is_first, chop;
  2459. chop = (cum_len_next > mss);
  2460. cum_len_next = cum_len_next % mss;
  2461. next_is_first = (cum_len_next == 0);
  2462. flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
  2463. flags_next |= next_is_first *
  2464. MXGEFW_FLAGS_FIRST;
  2465. rdma_count |= -(chop | next_is_first);
  2466. rdma_count += chop & !next_is_first;
  2467. } else if (likely(cum_len_next >= 0)) { /* header ends */
  2468. int small;
  2469. rdma_count = -1;
  2470. cum_len_next = 0;
  2471. seglen = -cum_len;
  2472. small = (mss <= MXGEFW_SEND_SMALL_SIZE);
  2473. flags_next = MXGEFW_FLAGS_TSO_PLD |
  2474. MXGEFW_FLAGS_FIRST |
  2475. (small * MXGEFW_FLAGS_SMALL);
  2476. }
  2477. }
  2478. req->addr_high = high_swapped;
  2479. req->addr_low = htonl(low);
  2480. req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
  2481. req->pad = 0; /* complete solid 16-byte block; does this matter? */
  2482. req->rdma_count = 1;
  2483. req->length = htons(seglen);
  2484. req->cksum_offset = cksum_offset;
  2485. req->flags = flags | ((cum_len & 1) * odd_flag);
  2486. low += seglen;
  2487. len -= seglen;
  2488. cum_len = cum_len_next;
  2489. flags = flags_next;
  2490. req++;
  2491. count++;
  2492. rdma_count++;
  2493. if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
  2494. if (unlikely(cksum_offset > seglen))
  2495. cksum_offset -= seglen;
  2496. else
  2497. cksum_offset = 0;
  2498. }
  2499. }
  2500. if (frag_idx == frag_cnt)
  2501. break;
  2502. /* map next fragment for DMA */
  2503. idx = (count + tx->req) & tx->mask;
  2504. frag = &skb_shinfo(skb)->frags[frag_idx];
  2505. frag_idx++;
  2506. len = frag->size;
  2507. bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
  2508. len, PCI_DMA_TODEVICE);
  2509. dma_unmap_addr_set(&tx->info[idx], bus, bus);
  2510. dma_unmap_len_set(&tx->info[idx], len, len);
  2511. }
  2512. (req - rdma_count)->rdma_count = rdma_count;
  2513. if (mss)
  2514. do {
  2515. req--;
  2516. req->flags |= MXGEFW_FLAGS_TSO_LAST;
  2517. } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
  2518. MXGEFW_FLAGS_FIRST)));
  2519. idx = ((count - 1) + tx->req) & tx->mask;
  2520. tx->info[idx].last = 1;
  2521. myri10ge_submit_req(tx, tx->req_list, count);
  2522. /* if using multiple tx queues, make sure NIC polls the
  2523. * current slice */
  2524. if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
  2525. tx->queue_active = 1;
  2526. put_be32(htonl(1), tx->send_go);
  2527. mb();
  2528. mmiowb();
  2529. }
  2530. tx->pkt_start++;
  2531. if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
  2532. tx->stop_queue++;
  2533. netif_tx_stop_queue(netdev_queue);
  2534. }
  2535. return NETDEV_TX_OK;
  2536. abort_linearize:
  2537. /* Free any DMA resources we've alloced and clear out the skb
  2538. * slot so as to not trip up assertions, and to avoid a
  2539. * double-free if linearizing fails */
  2540. last_idx = (idx + 1) & tx->mask;
  2541. idx = tx->req & tx->mask;
  2542. tx->info[idx].skb = NULL;
  2543. do {
  2544. len = dma_unmap_len(&tx->info[idx], len);
  2545. if (len) {
  2546. if (tx->info[idx].skb != NULL)
  2547. pci_unmap_single(mgp->pdev,
  2548. dma_unmap_addr(&tx->info[idx],
  2549. bus), len,
  2550. PCI_DMA_TODEVICE);
  2551. else
  2552. pci_unmap_page(mgp->pdev,
  2553. dma_unmap_addr(&tx->info[idx],
  2554. bus), len,
  2555. PCI_DMA_TODEVICE);
  2556. dma_unmap_len_set(&tx->info[idx], len, 0);
  2557. tx->info[idx].skb = NULL;
  2558. }
  2559. idx = (idx + 1) & tx->mask;
  2560. } while (idx != last_idx);
  2561. if (skb_is_gso(skb)) {
  2562. netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
  2563. goto drop;
  2564. }
  2565. if (skb_linearize(skb))
  2566. goto drop;
  2567. tx->linearized++;
  2568. goto again;
  2569. drop:
  2570. dev_kfree_skb_any(skb);
  2571. ss->stats.tx_dropped += 1;
  2572. return NETDEV_TX_OK;
  2573. }
  2574. static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
  2575. struct net_device *dev)
  2576. {
  2577. struct sk_buff *segs, *curr;
  2578. struct myri10ge_priv *mgp = netdev_priv(dev);
  2579. struct myri10ge_slice_state *ss;
  2580. netdev_tx_t status;
  2581. segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
  2582. if (IS_ERR(segs))
  2583. goto drop;
  2584. while (segs) {
  2585. curr = segs;
  2586. segs = segs->next;
  2587. curr->next = NULL;
  2588. status = myri10ge_xmit(curr, dev);
  2589. if (status != 0) {
  2590. dev_kfree_skb_any(curr);
  2591. if (segs != NULL) {
  2592. curr = segs;
  2593. segs = segs->next;
  2594. curr->next = NULL;
  2595. dev_kfree_skb_any(segs);
  2596. }
  2597. goto drop;
  2598. }
  2599. }
  2600. dev_kfree_skb_any(skb);
  2601. return NETDEV_TX_OK;
  2602. drop:
  2603. ss = &mgp->ss[skb_get_queue_mapping(skb)];
  2604. dev_kfree_skb_any(skb);
  2605. ss->stats.tx_dropped += 1;
  2606. return NETDEV_TX_OK;
  2607. }
  2608. static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
  2609. struct rtnl_link_stats64 *stats)
  2610. {
  2611. struct myri10ge_priv *mgp = netdev_priv(dev);
  2612. struct myri10ge_slice_netstats *slice_stats;
  2613. int i;
  2614. spin_lock(&mgp->stats_lock);
  2615. memset(stats, 0, sizeof(*stats));
  2616. for (i = 0; i < mgp->num_slices; i++) {
  2617. slice_stats = &mgp->ss[i].stats;
  2618. stats->rx_packets += slice_stats->rx_packets;
  2619. stats->tx_packets += slice_stats->tx_packets;
  2620. stats->rx_bytes += slice_stats->rx_bytes;
  2621. stats->tx_bytes += slice_stats->tx_bytes;
  2622. stats->rx_dropped += slice_stats->rx_dropped;
  2623. stats->tx_dropped += slice_stats->tx_dropped;
  2624. }
  2625. spin_unlock(&mgp->stats_lock);
  2626. return stats;
  2627. }
  2628. static void myri10ge_set_multicast_list(struct net_device *dev)
  2629. {
  2630. struct myri10ge_priv *mgp = netdev_priv(dev);
  2631. struct myri10ge_cmd cmd;
  2632. struct netdev_hw_addr *ha;
  2633. __be32 data[2] = { 0, 0 };
  2634. int err;
  2635. /* can be called from atomic contexts,
  2636. * pass 1 to force atomicity in myri10ge_send_cmd() */
  2637. myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
  2638. /* This firmware is known to not support multicast */
  2639. if (!mgp->fw_multicast_support)
  2640. return;
  2641. /* Disable multicast filtering */
  2642. err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
  2643. if (err != 0) {
  2644. netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
  2645. err);
  2646. goto abort;
  2647. }
  2648. if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
  2649. /* request to disable multicast filtering, so quit here */
  2650. return;
  2651. }
  2652. /* Flush the filters */
  2653. err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
  2654. &cmd, 1);
  2655. if (err != 0) {
  2656. netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
  2657. err);
  2658. goto abort;
  2659. }
  2660. /* Walk the multicast list, and add each address */
  2661. netdev_for_each_mc_addr(ha, dev) {
  2662. memcpy(data, &ha->addr, 6);
  2663. cmd.data0 = ntohl(data[0]);
  2664. cmd.data1 = ntohl(data[1]);
  2665. err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
  2666. &cmd, 1);
  2667. if (err != 0) {
  2668. netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
  2669. err, ha->addr);
  2670. goto abort;
  2671. }
  2672. }
  2673. /* Enable multicast filtering */
  2674. err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
  2675. if (err != 0) {
  2676. netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
  2677. err);
  2678. goto abort;
  2679. }
  2680. return;
  2681. abort:
  2682. return;
  2683. }
  2684. static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
  2685. {
  2686. struct sockaddr *sa = addr;
  2687. struct myri10ge_priv *mgp = netdev_priv(dev);
  2688. int status;
  2689. if (!is_valid_ether_addr(sa->sa_data))
  2690. return -EADDRNOTAVAIL;
  2691. status = myri10ge_update_mac_address(mgp, sa->sa_data);
  2692. if (status != 0) {
  2693. netdev_err(dev, "changing mac address failed with %d\n",
  2694. status);
  2695. return status;
  2696. }
  2697. /* change the dev structure */
  2698. memcpy(dev->dev_addr, sa->sa_data, 6);
  2699. return 0;
  2700. }
  2701. static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
  2702. {
  2703. if (!(features & NETIF_F_RXCSUM))
  2704. features &= ~NETIF_F_LRO;
  2705. return features;
  2706. }
  2707. static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
  2708. {
  2709. struct myri10ge_priv *mgp = netdev_priv(dev);
  2710. int error = 0;
  2711. if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
  2712. netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
  2713. return -EINVAL;
  2714. }
  2715. netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
  2716. if (mgp->running) {
  2717. /* if we change the mtu on an active device, we must
  2718. * reset the device so the firmware sees the change */
  2719. myri10ge_close(dev);
  2720. dev->mtu = new_mtu;
  2721. myri10ge_open(dev);
  2722. } else
  2723. dev->mtu = new_mtu;
  2724. return error;
  2725. }
  2726. /*
  2727. * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
  2728. * Only do it if the bridge is a root port since we don't want to disturb
  2729. * any other device, except if forced with myri10ge_ecrc_enable > 1.
  2730. */
  2731. static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
  2732. {
  2733. struct pci_dev *bridge = mgp->pdev->bus->self;
  2734. struct device *dev = &mgp->pdev->dev;
  2735. unsigned cap;
  2736. unsigned err_cap;
  2737. u16 val;
  2738. u8 ext_type;
  2739. int ret;
  2740. if (!myri10ge_ecrc_enable || !bridge)
  2741. return;
  2742. /* check that the bridge is a root port */
  2743. cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
  2744. pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
  2745. ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
  2746. if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
  2747. if (myri10ge_ecrc_enable > 1) {
  2748. struct pci_dev *prev_bridge, *old_bridge = bridge;
  2749. /* Walk the hierarchy up to the root port
  2750. * where ECRC has to be enabled */
  2751. do {
  2752. prev_bridge = bridge;
  2753. bridge = bridge->bus->self;
  2754. if (!bridge || prev_bridge == bridge) {
  2755. dev_err(dev,
  2756. "Failed to find root port"
  2757. " to force ECRC\n");
  2758. return;
  2759. }
  2760. cap =
  2761. pci_find_capability(bridge, PCI_CAP_ID_EXP);
  2762. pci_read_config_word(bridge,
  2763. cap + PCI_CAP_FLAGS, &val);
  2764. ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
  2765. } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
  2766. dev_info(dev,
  2767. "Forcing ECRC on non-root port %s"
  2768. " (enabling on root port %s)\n",
  2769. pci_name(old_bridge), pci_name(bridge));
  2770. } else {
  2771. dev_err(dev,
  2772. "Not enabling ECRC on non-root port %s\n",
  2773. pci_name(bridge));
  2774. return;
  2775. }
  2776. }
  2777. cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
  2778. if (!cap)
  2779. return;
  2780. ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
  2781. if (ret) {
  2782. dev_err(dev, "failed reading ext-conf-space of %s\n",
  2783. pci_name(bridge));
  2784. dev_err(dev, "\t pci=nommconf in use? "
  2785. "or buggy/incomplete/absent ACPI MCFG attr?\n");
  2786. return;
  2787. }
  2788. if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
  2789. return;
  2790. err_cap |= PCI_ERR_CAP_ECRC_GENE;
  2791. pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
  2792. dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
  2793. }
  2794. /*
  2795. * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
  2796. * when the PCI-E Completion packets are aligned on an 8-byte
  2797. * boundary. Some PCI-E chip sets always align Completion packets; on
  2798. * the ones that do not, the alignment can be enforced by enabling
  2799. * ECRC generation (if supported).
  2800. *
  2801. * When PCI-E Completion packets are not aligned, it is actually more
  2802. * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
  2803. *
  2804. * If the driver can neither enable ECRC nor verify that it has
  2805. * already been enabled, then it must use a firmware image which works
  2806. * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
  2807. * should also ensure that it never gives the device a Read-DMA which is
  2808. * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
  2809. * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
  2810. * firmware image, and set tx_boundary to 4KB.
  2811. */
  2812. static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
  2813. {
  2814. struct pci_dev *pdev = mgp->pdev;
  2815. struct device *dev = &pdev->dev;
  2816. int status;
  2817. mgp->tx_boundary = 4096;
  2818. /*
  2819. * Verify the max read request size was set to 4KB
  2820. * before trying the test with 4KB.
  2821. */
  2822. status = pcie_get_readrq(pdev);
  2823. if (status < 0) {
  2824. dev_err(dev, "Couldn't read max read req size: %d\n", status);
  2825. goto abort;
  2826. }
  2827. if (status != 4096) {
  2828. dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
  2829. mgp->tx_boundary = 2048;
  2830. }
  2831. /*
  2832. * load the optimized firmware (which assumes aligned PCIe
  2833. * completions) in order to see if it works on this host.
  2834. */
  2835. set_fw_name(mgp, myri10ge_fw_aligned, false);
  2836. status = myri10ge_load_firmware(mgp, 1);
  2837. if (status != 0) {
  2838. goto abort;
  2839. }
  2840. /*
  2841. * Enable ECRC if possible
  2842. */
  2843. myri10ge_enable_ecrc(mgp);
  2844. /*
  2845. * Run a DMA test which watches for unaligned completions and
  2846. * aborts on the first one seen.
  2847. */
  2848. status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
  2849. if (status == 0)
  2850. return; /* keep the aligned firmware */
  2851. if (status != -E2BIG)
  2852. dev_warn(dev, "DMA test failed: %d\n", status);
  2853. if (status == -ENOSYS)
  2854. dev_warn(dev, "Falling back to ethp! "
  2855. "Please install up to date fw\n");
  2856. abort:
  2857. /* fall back to using the unaligned firmware */
  2858. mgp->tx_boundary = 2048;
  2859. set_fw_name(mgp, myri10ge_fw_unaligned, false);
  2860. }
  2861. static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
  2862. {
  2863. int overridden = 0;
  2864. if (myri10ge_force_firmware == 0) {
  2865. int link_width, exp_cap;
  2866. u16 lnk;
  2867. exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
  2868. pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
  2869. link_width = (lnk >> 4) & 0x3f;
  2870. /* Check to see if Link is less than 8 or if the
  2871. * upstream bridge is known to provide aligned
  2872. * completions */
  2873. if (link_width < 8) {
  2874. dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
  2875. link_width);
  2876. mgp->tx_boundary = 4096;
  2877. set_fw_name(mgp, myri10ge_fw_aligned, false);
  2878. } else {
  2879. myri10ge_firmware_probe(mgp);
  2880. }
  2881. } else {
  2882. if (myri10ge_force_firmware == 1) {
  2883. dev_info(&mgp->pdev->dev,
  2884. "Assuming aligned completions (forced)\n");
  2885. mgp->tx_boundary = 4096;
  2886. set_fw_name(mgp, myri10ge_fw_aligned, false);
  2887. } else {
  2888. dev_info(&mgp->pdev->dev,
  2889. "Assuming unaligned completions (forced)\n");
  2890. mgp->tx_boundary = 2048;
  2891. set_fw_name(mgp, myri10ge_fw_unaligned, false);
  2892. }
  2893. }
  2894. kparam_block_sysfs_write(myri10ge_fw_name);
  2895. if (myri10ge_fw_name != NULL) {
  2896. char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
  2897. if (fw_name) {
  2898. overridden = 1;
  2899. set_fw_name(mgp, fw_name, true);
  2900. }
  2901. }
  2902. kparam_unblock_sysfs_write(myri10ge_fw_name);
  2903. if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
  2904. myri10ge_fw_names[mgp->board_number] != NULL &&
  2905. strlen(myri10ge_fw_names[mgp->board_number])) {
  2906. set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
  2907. overridden = 1;
  2908. }
  2909. if (overridden)
  2910. dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
  2911. mgp->fw_name);
  2912. }
  2913. #ifdef CONFIG_PM
  2914. static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
  2915. {
  2916. struct myri10ge_priv *mgp;
  2917. struct net_device *netdev;
  2918. mgp = pci_get_drvdata(pdev);
  2919. if (mgp == NULL)
  2920. return -EINVAL;
  2921. netdev = mgp->dev;
  2922. netif_device_detach(netdev);
  2923. if (netif_running(netdev)) {
  2924. netdev_info(netdev, "closing\n");
  2925. rtnl_lock();
  2926. myri10ge_close(netdev);
  2927. rtnl_unlock();
  2928. }
  2929. myri10ge_dummy_rdma(mgp, 0);
  2930. pci_save_state(pdev);
  2931. pci_disable_device(pdev);
  2932. return pci_set_power_state(pdev, pci_choose_state(pdev, state));
  2933. }
  2934. static int myri10ge_resume(struct pci_dev *pdev)
  2935. {
  2936. struct myri10ge_priv *mgp;
  2937. struct net_device *netdev;
  2938. int status;
  2939. u16 vendor;
  2940. mgp = pci_get_drvdata(pdev);
  2941. if (mgp == NULL)
  2942. return -EINVAL;
  2943. netdev = mgp->dev;
  2944. pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
  2945. msleep(5); /* give card time to respond */
  2946. pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
  2947. if (vendor == 0xffff) {
  2948. netdev_err(mgp->dev, "device disappeared!\n");
  2949. return -EIO;
  2950. }
  2951. pci_restore_state(pdev);
  2952. status = pci_enable_device(pdev);
  2953. if (status) {
  2954. dev_err(&pdev->dev, "failed to enable device\n");
  2955. return status;
  2956. }
  2957. pci_set_master(pdev);
  2958. myri10ge_reset(mgp);
  2959. myri10ge_dummy_rdma(mgp, 1);
  2960. /* Save configuration space to be restored if the
  2961. * nic resets due to a parity error */
  2962. pci_save_state(pdev);
  2963. if (netif_running(netdev)) {
  2964. rtnl_lock();
  2965. status = myri10ge_open(netdev);
  2966. rtnl_unlock();
  2967. if (status != 0)
  2968. goto abort_with_enabled;
  2969. }
  2970. netif_device_attach(netdev);
  2971. return 0;
  2972. abort_with_enabled:
  2973. pci_disable_device(pdev);
  2974. return -EIO;
  2975. }
  2976. #endif /* CONFIG_PM */
  2977. static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
  2978. {
  2979. struct pci_dev *pdev = mgp->pdev;
  2980. int vs = mgp->vendor_specific_offset;
  2981. u32 reboot;
  2982. /*enter read32 mode */
  2983. pci_write_config_byte(pdev, vs + 0x10, 0x3);
  2984. /*read REBOOT_STATUS (0xfffffff0) */
  2985. pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
  2986. pci_read_config_dword(pdev, vs + 0x14, &reboot);
  2987. return reboot;
  2988. }
  2989. /*
  2990. * This watchdog is used to check whether the board has suffered
  2991. * from a parity error and needs to be recovered.
  2992. */
  2993. static void myri10ge_watchdog(struct work_struct *work)
  2994. {
  2995. struct myri10ge_priv *mgp =
  2996. container_of(work, struct myri10ge_priv, watchdog_work);
  2997. struct myri10ge_tx_buf *tx;
  2998. u32 reboot;
  2999. int status, rebooted;
  3000. int i;
  3001. u16 cmd, vendor;
  3002. mgp->watchdog_resets++;
  3003. pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
  3004. rebooted = 0;
  3005. if ((cmd & PCI_COMMAND_MASTER) == 0) {
  3006. /* Bus master DMA disabled? Check to see
  3007. * if the card rebooted due to a parity error
  3008. * For now, just report it */
  3009. reboot = myri10ge_read_reboot(mgp);
  3010. netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
  3011. reboot,
  3012. myri10ge_reset_recover ? "" : " not");
  3013. if (myri10ge_reset_recover == 0)
  3014. return;
  3015. rtnl_lock();
  3016. mgp->rebooted = 1;
  3017. rebooted = 1;
  3018. myri10ge_close(mgp->dev);
  3019. myri10ge_reset_recover--;
  3020. mgp->rebooted = 0;
  3021. /*
  3022. * A rebooted nic will come back with config space as
  3023. * it was after power was applied to PCIe bus.
  3024. * Attempt to restore config space which was saved
  3025. * when the driver was loaded, or the last time the
  3026. * nic was resumed from power saving mode.
  3027. */
  3028. pci_restore_state(mgp->pdev);
  3029. /* save state again for accounting reasons */
  3030. pci_save_state(mgp->pdev);
  3031. } else {
  3032. /* if we get back -1's from our slot, perhaps somebody
  3033. * powered off our card. Don't try to reset it in
  3034. * this case */
  3035. if (cmd == 0xffff) {
  3036. pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
  3037. if (vendor == 0xffff) {
  3038. netdev_err(mgp->dev, "device disappeared!\n");
  3039. return;
  3040. }
  3041. }
  3042. /* Perhaps it is a software error. Try to reset */
  3043. netdev_err(mgp->dev, "device timeout, resetting\n");
  3044. for (i = 0; i < mgp->num_slices; i++) {
  3045. tx = &mgp->ss[i].tx;
  3046. netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
  3047. i, tx->queue_active, tx->req,
  3048. tx->done, tx->pkt_start, tx->pkt_done,
  3049. (int)ntohl(mgp->ss[i].fw_stats->
  3050. send_done_count));
  3051. msleep(2000);
  3052. netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
  3053. i, tx->queue_active, tx->req,
  3054. tx->done, tx->pkt_start, tx->pkt_done,
  3055. (int)ntohl(mgp->ss[i].fw_stats->
  3056. send_done_count));
  3057. }
  3058. }
  3059. if (!rebooted) {
  3060. rtnl_lock();
  3061. myri10ge_close(mgp->dev);
  3062. }
  3063. status = myri10ge_load_firmware(mgp, 1);
  3064. if (status != 0)
  3065. netdev_err(mgp->dev, "failed to load firmware\n");
  3066. else
  3067. myri10ge_open(mgp->dev);
  3068. rtnl_unlock();
  3069. }
  3070. /*
  3071. * We use our own timer routine rather than relying upon
  3072. * netdev->tx_timeout because we have a very large hardware transmit
  3073. * queue. Due to the large queue, the netdev->tx_timeout function
  3074. * cannot detect a NIC with a parity error in a timely fashion if the
  3075. * NIC is lightly loaded.
  3076. */
  3077. static void myri10ge_watchdog_timer(unsigned long arg)
  3078. {
  3079. struct myri10ge_priv *mgp;
  3080. struct myri10ge_slice_state *ss;
  3081. int i, reset_needed, busy_slice_cnt;
  3082. u32 rx_pause_cnt;
  3083. u16 cmd;
  3084. mgp = (struct myri10ge_priv *)arg;
  3085. rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
  3086. busy_slice_cnt = 0;
  3087. for (i = 0, reset_needed = 0;
  3088. i < mgp->num_slices && reset_needed == 0; ++i) {
  3089. ss = &mgp->ss[i];
  3090. if (ss->rx_small.watchdog_needed) {
  3091. myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
  3092. mgp->small_bytes + MXGEFW_PAD,
  3093. 1);
  3094. if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
  3095. myri10ge_fill_thresh)
  3096. ss->rx_small.watchdog_needed = 0;
  3097. }
  3098. if (ss->rx_big.watchdog_needed) {
  3099. myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
  3100. mgp->big_bytes, 1);
  3101. if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
  3102. myri10ge_fill_thresh)
  3103. ss->rx_big.watchdog_needed = 0;
  3104. }
  3105. if (ss->tx.req != ss->tx.done &&
  3106. ss->tx.done == ss->watchdog_tx_done &&
  3107. ss->watchdog_tx_req != ss->watchdog_tx_done) {
  3108. /* nic seems like it might be stuck.. */
  3109. if (rx_pause_cnt != mgp->watchdog_pause) {
  3110. if (net_ratelimit())
  3111. netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
  3112. i);
  3113. } else {
  3114. netdev_warn(mgp->dev, "slice %d stuck:", i);
  3115. reset_needed = 1;
  3116. }
  3117. }
  3118. if (ss->watchdog_tx_done != ss->tx.done ||
  3119. ss->watchdog_rx_done != ss->rx_done.cnt) {
  3120. busy_slice_cnt++;
  3121. }
  3122. ss->watchdog_tx_done = ss->tx.done;
  3123. ss->watchdog_tx_req = ss->tx.req;
  3124. ss->watchdog_rx_done = ss->rx_done.cnt;
  3125. }
  3126. /* if we've sent or received no traffic, poll the NIC to
  3127. * ensure it is still there. Otherwise, we risk not noticing
  3128. * an error in a timely fashion */
  3129. if (busy_slice_cnt == 0) {
  3130. pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
  3131. if ((cmd & PCI_COMMAND_MASTER) == 0) {
  3132. reset_needed = 1;
  3133. }
  3134. }
  3135. mgp->watchdog_pause = rx_pause_cnt;
  3136. if (reset_needed) {
  3137. schedule_work(&mgp->watchdog_work);
  3138. } else {
  3139. /* rearm timer */
  3140. mod_timer(&mgp->watchdog_timer,
  3141. jiffies + myri10ge_watchdog_timeout * HZ);
  3142. }
  3143. }
  3144. static void myri10ge_free_slices(struct myri10ge_priv *mgp)
  3145. {
  3146. struct myri10ge_slice_state *ss;
  3147. struct pci_dev *pdev = mgp->pdev;
  3148. size_t bytes;
  3149. int i;
  3150. if (mgp->ss == NULL)
  3151. return;
  3152. for (i = 0; i < mgp->num_slices; i++) {
  3153. ss = &mgp->ss[i];
  3154. if (ss->rx_done.entry != NULL) {
  3155. bytes = mgp->max_intr_slots *
  3156. sizeof(*ss->rx_done.entry);
  3157. dma_free_coherent(&pdev->dev, bytes,
  3158. ss->rx_done.entry, ss->rx_done.bus);
  3159. ss->rx_done.entry = NULL;
  3160. }
  3161. if (ss->fw_stats != NULL) {
  3162. bytes = sizeof(*ss->fw_stats);
  3163. dma_free_coherent(&pdev->dev, bytes,
  3164. ss->fw_stats, ss->fw_stats_bus);
  3165. ss->fw_stats = NULL;
  3166. netif_napi_del(&ss->napi);
  3167. }
  3168. }
  3169. kfree(mgp->ss);
  3170. mgp->ss = NULL;
  3171. }
  3172. static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
  3173. {
  3174. struct myri10ge_slice_state *ss;
  3175. struct pci_dev *pdev = mgp->pdev;
  3176. size_t bytes;
  3177. int i;
  3178. bytes = sizeof(*mgp->ss) * mgp->num_slices;
  3179. mgp->ss = kzalloc(bytes, GFP_KERNEL);
  3180. if (mgp->ss == NULL) {
  3181. return -ENOMEM;
  3182. }
  3183. for (i = 0; i < mgp->num_slices; i++) {
  3184. ss = &mgp->ss[i];
  3185. bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
  3186. ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
  3187. &ss->rx_done.bus,
  3188. GFP_KERNEL);
  3189. if (ss->rx_done.entry == NULL)
  3190. goto abort;
  3191. memset(ss->rx_done.entry, 0, bytes);
  3192. bytes = sizeof(*ss->fw_stats);
  3193. ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
  3194. &ss->fw_stats_bus,
  3195. GFP_KERNEL);
  3196. if (ss->fw_stats == NULL)
  3197. goto abort;
  3198. ss->mgp = mgp;
  3199. ss->dev = mgp->dev;
  3200. netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
  3201. myri10ge_napi_weight);
  3202. }
  3203. return 0;
  3204. abort:
  3205. myri10ge_free_slices(mgp);
  3206. return -ENOMEM;
  3207. }
  3208. /*
  3209. * This function determines the number of slices supported.
  3210. * The number slices is the minimum of the number of CPUS,
  3211. * the number of MSI-X irqs supported, the number of slices
  3212. * supported by the firmware
  3213. */
  3214. static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
  3215. {
  3216. struct myri10ge_cmd cmd;
  3217. struct pci_dev *pdev = mgp->pdev;
  3218. char *old_fw;
  3219. bool old_allocated;
  3220. int i, status, ncpus, msix_cap;
  3221. mgp->num_slices = 1;
  3222. msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
  3223. ncpus = num_online_cpus();
  3224. if (myri10ge_max_slices == 1 || msix_cap == 0 ||
  3225. (myri10ge_max_slices == -1 && ncpus < 2))
  3226. return;
  3227. /* try to load the slice aware rss firmware */
  3228. old_fw = mgp->fw_name;
  3229. old_allocated = mgp->fw_name_allocated;
  3230. /* don't free old_fw if we override it. */
  3231. mgp->fw_name_allocated = false;
  3232. if (myri10ge_fw_name != NULL) {
  3233. dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
  3234. myri10ge_fw_name);
  3235. set_fw_name(mgp, myri10ge_fw_name, false);
  3236. } else if (old_fw == myri10ge_fw_aligned)
  3237. set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
  3238. else
  3239. set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
  3240. status = myri10ge_load_firmware(mgp, 0);
  3241. if (status != 0) {
  3242. dev_info(&pdev->dev, "Rss firmware not found\n");
  3243. if (old_allocated)
  3244. kfree(old_fw);
  3245. return;
  3246. }
  3247. /* hit the board with a reset to ensure it is alive */
  3248. memset(&cmd, 0, sizeof(cmd));
  3249. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
  3250. if (status != 0) {
  3251. dev_err(&mgp->pdev->dev, "failed reset\n");
  3252. goto abort_with_fw;
  3253. }
  3254. mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
  3255. /* tell it the size of the interrupt queues */
  3256. cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
  3257. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
  3258. if (status != 0) {
  3259. dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
  3260. goto abort_with_fw;
  3261. }
  3262. /* ask the maximum number of slices it supports */
  3263. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
  3264. if (status != 0)
  3265. goto abort_with_fw;
  3266. else
  3267. mgp->num_slices = cmd.data0;
  3268. /* Only allow multiple slices if MSI-X is usable */
  3269. if (!myri10ge_msi) {
  3270. goto abort_with_fw;
  3271. }
  3272. /* if the admin did not specify a limit to how many
  3273. * slices we should use, cap it automatically to the
  3274. * number of CPUs currently online */
  3275. if (myri10ge_max_slices == -1)
  3276. myri10ge_max_slices = ncpus;
  3277. if (mgp->num_slices > myri10ge_max_slices)
  3278. mgp->num_slices = myri10ge_max_slices;
  3279. /* Now try to allocate as many MSI-X vectors as we have
  3280. * slices. We give up on MSI-X if we can only get a single
  3281. * vector. */
  3282. mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
  3283. GFP_KERNEL);
  3284. if (mgp->msix_vectors == NULL)
  3285. goto disable_msix;
  3286. for (i = 0; i < mgp->num_slices; i++) {
  3287. mgp->msix_vectors[i].entry = i;
  3288. }
  3289. while (mgp->num_slices > 1) {
  3290. /* make sure it is a power of two */
  3291. while (!is_power_of_2(mgp->num_slices))
  3292. mgp->num_slices--;
  3293. if (mgp->num_slices == 1)
  3294. goto disable_msix;
  3295. status = pci_enable_msix(pdev, mgp->msix_vectors,
  3296. mgp->num_slices);
  3297. if (status == 0) {
  3298. pci_disable_msix(pdev);
  3299. if (old_allocated)
  3300. kfree(old_fw);
  3301. return;
  3302. }
  3303. if (status > 0)
  3304. mgp->num_slices = status;
  3305. else
  3306. goto disable_msix;
  3307. }
  3308. disable_msix:
  3309. if (mgp->msix_vectors != NULL) {
  3310. kfree(mgp->msix_vectors);
  3311. mgp->msix_vectors = NULL;
  3312. }
  3313. abort_with_fw:
  3314. mgp->num_slices = 1;
  3315. set_fw_name(mgp, old_fw, old_allocated);
  3316. myri10ge_load_firmware(mgp, 0);
  3317. }
  3318. static const struct net_device_ops myri10ge_netdev_ops = {
  3319. .ndo_open = myri10ge_open,
  3320. .ndo_stop = myri10ge_close,
  3321. .ndo_start_xmit = myri10ge_xmit,
  3322. .ndo_get_stats64 = myri10ge_get_stats,
  3323. .ndo_validate_addr = eth_validate_addr,
  3324. .ndo_change_mtu = myri10ge_change_mtu,
  3325. .ndo_fix_features = myri10ge_fix_features,
  3326. .ndo_set_multicast_list = myri10ge_set_multicast_list,
  3327. .ndo_set_mac_address = myri10ge_set_mac_address,
  3328. };
  3329. static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3330. {
  3331. struct net_device *netdev;
  3332. struct myri10ge_priv *mgp;
  3333. struct device *dev = &pdev->dev;
  3334. int i;
  3335. int status = -ENXIO;
  3336. int dac_enabled;
  3337. unsigned hdr_offset, ss_offset;
  3338. static int board_number;
  3339. netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
  3340. if (netdev == NULL) {
  3341. dev_err(dev, "Could not allocate ethernet device\n");
  3342. return -ENOMEM;
  3343. }
  3344. SET_NETDEV_DEV(netdev, &pdev->dev);
  3345. mgp = netdev_priv(netdev);
  3346. mgp->dev = netdev;
  3347. mgp->pdev = pdev;
  3348. mgp->pause = myri10ge_flow_control;
  3349. mgp->intr_coal_delay = myri10ge_intr_coal_delay;
  3350. mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
  3351. mgp->board_number = board_number;
  3352. init_waitqueue_head(&mgp->down_wq);
  3353. if (pci_enable_device(pdev)) {
  3354. dev_err(&pdev->dev, "pci_enable_device call failed\n");
  3355. status = -ENODEV;
  3356. goto abort_with_netdev;
  3357. }
  3358. /* Find the vendor-specific cap so we can check
  3359. * the reboot register later on */
  3360. mgp->vendor_specific_offset
  3361. = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
  3362. /* Set our max read request to 4KB */
  3363. status = pcie_set_readrq(pdev, 4096);
  3364. if (status != 0) {
  3365. dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
  3366. status);
  3367. goto abort_with_enabled;
  3368. }
  3369. pci_set_master(pdev);
  3370. dac_enabled = 1;
  3371. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  3372. if (status != 0) {
  3373. dac_enabled = 0;
  3374. dev_err(&pdev->dev,
  3375. "64-bit pci address mask was refused, "
  3376. "trying 32-bit\n");
  3377. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  3378. }
  3379. if (status != 0) {
  3380. dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
  3381. goto abort_with_enabled;
  3382. }
  3383. (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3384. mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
  3385. &mgp->cmd_bus, GFP_KERNEL);
  3386. if (mgp->cmd == NULL)
  3387. goto abort_with_enabled;
  3388. mgp->board_span = pci_resource_len(pdev, 0);
  3389. mgp->iomem_base = pci_resource_start(pdev, 0);
  3390. mgp->mtrr = -1;
  3391. mgp->wc_enabled = 0;
  3392. #ifdef CONFIG_MTRR
  3393. mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
  3394. MTRR_TYPE_WRCOMB, 1);
  3395. if (mgp->mtrr >= 0)
  3396. mgp->wc_enabled = 1;
  3397. #endif
  3398. mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
  3399. if (mgp->sram == NULL) {
  3400. dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
  3401. mgp->board_span, mgp->iomem_base);
  3402. status = -ENXIO;
  3403. goto abort_with_mtrr;
  3404. }
  3405. hdr_offset =
  3406. ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
  3407. ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
  3408. mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
  3409. if (mgp->sram_size > mgp->board_span ||
  3410. mgp->sram_size <= MYRI10GE_FW_OFFSET) {
  3411. dev_err(&pdev->dev,
  3412. "invalid sram_size %dB or board span %ldB\n",
  3413. mgp->sram_size, mgp->board_span);
  3414. goto abort_with_ioremap;
  3415. }
  3416. memcpy_fromio(mgp->eeprom_strings,
  3417. mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
  3418. memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
  3419. status = myri10ge_read_mac_addr(mgp);
  3420. if (status)
  3421. goto abort_with_ioremap;
  3422. for (i = 0; i < ETH_ALEN; i++)
  3423. netdev->dev_addr[i] = mgp->mac_addr[i];
  3424. myri10ge_select_firmware(mgp);
  3425. status = myri10ge_load_firmware(mgp, 1);
  3426. if (status != 0) {
  3427. dev_err(&pdev->dev, "failed to load firmware\n");
  3428. goto abort_with_ioremap;
  3429. }
  3430. myri10ge_probe_slices(mgp);
  3431. status = myri10ge_alloc_slices(mgp);
  3432. if (status != 0) {
  3433. dev_err(&pdev->dev, "failed to alloc slice state\n");
  3434. goto abort_with_firmware;
  3435. }
  3436. netif_set_real_num_tx_queues(netdev, mgp->num_slices);
  3437. netif_set_real_num_rx_queues(netdev, mgp->num_slices);
  3438. status = myri10ge_reset(mgp);
  3439. if (status != 0) {
  3440. dev_err(&pdev->dev, "failed reset\n");
  3441. goto abort_with_slices;
  3442. }
  3443. #ifdef CONFIG_MYRI10GE_DCA
  3444. myri10ge_setup_dca(mgp);
  3445. #endif
  3446. pci_set_drvdata(pdev, mgp);
  3447. if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
  3448. myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
  3449. if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
  3450. myri10ge_initial_mtu = 68;
  3451. netdev->netdev_ops = &myri10ge_netdev_ops;
  3452. netdev->mtu = myri10ge_initial_mtu;
  3453. netdev->base_addr = mgp->iomem_base;
  3454. netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
  3455. netdev->features = netdev->hw_features;
  3456. if (dac_enabled)
  3457. netdev->features |= NETIF_F_HIGHDMA;
  3458. netdev->vlan_features |= mgp->features;
  3459. if (mgp->fw_ver_tiny < 37)
  3460. netdev->vlan_features &= ~NETIF_F_TSO6;
  3461. if (mgp->fw_ver_tiny < 32)
  3462. netdev->vlan_features &= ~NETIF_F_TSO;
  3463. /* make sure we can get an irq, and that MSI can be
  3464. * setup (if available). Also ensure netdev->irq
  3465. * is set to correct value if MSI is enabled */
  3466. status = myri10ge_request_irq(mgp);
  3467. if (status != 0)
  3468. goto abort_with_firmware;
  3469. netdev->irq = pdev->irq;
  3470. myri10ge_free_irq(mgp);
  3471. /* Save configuration space to be restored if the
  3472. * nic resets due to a parity error */
  3473. pci_save_state(pdev);
  3474. /* Setup the watchdog timer */
  3475. setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
  3476. (unsigned long)mgp);
  3477. spin_lock_init(&mgp->stats_lock);
  3478. SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
  3479. INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
  3480. status = register_netdev(netdev);
  3481. if (status != 0) {
  3482. dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
  3483. goto abort_with_state;
  3484. }
  3485. if (mgp->msix_enabled)
  3486. dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
  3487. mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
  3488. (mgp->wc_enabled ? "Enabled" : "Disabled"));
  3489. else
  3490. dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
  3491. mgp->msi_enabled ? "MSI" : "xPIC",
  3492. netdev->irq, mgp->tx_boundary, mgp->fw_name,
  3493. (mgp->wc_enabled ? "Enabled" : "Disabled"));
  3494. board_number++;
  3495. return 0;
  3496. abort_with_state:
  3497. pci_restore_state(pdev);
  3498. abort_with_slices:
  3499. myri10ge_free_slices(mgp);
  3500. abort_with_firmware:
  3501. myri10ge_dummy_rdma(mgp, 0);
  3502. abort_with_ioremap:
  3503. if (mgp->mac_addr_string != NULL)
  3504. dev_err(&pdev->dev,
  3505. "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
  3506. mgp->mac_addr_string, mgp->serial_number);
  3507. iounmap(mgp->sram);
  3508. abort_with_mtrr:
  3509. #ifdef CONFIG_MTRR
  3510. if (mgp->mtrr >= 0)
  3511. mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
  3512. #endif
  3513. dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
  3514. mgp->cmd, mgp->cmd_bus);
  3515. abort_with_enabled:
  3516. pci_disable_device(pdev);
  3517. abort_with_netdev:
  3518. set_fw_name(mgp, NULL, false);
  3519. free_netdev(netdev);
  3520. return status;
  3521. }
  3522. /*
  3523. * myri10ge_remove
  3524. *
  3525. * Does what is necessary to shutdown one Myrinet device. Called
  3526. * once for each Myrinet card by the kernel when a module is
  3527. * unloaded.
  3528. */
  3529. static void myri10ge_remove(struct pci_dev *pdev)
  3530. {
  3531. struct myri10ge_priv *mgp;
  3532. struct net_device *netdev;
  3533. mgp = pci_get_drvdata(pdev);
  3534. if (mgp == NULL)
  3535. return;
  3536. cancel_work_sync(&mgp->watchdog_work);
  3537. netdev = mgp->dev;
  3538. unregister_netdev(netdev);
  3539. #ifdef CONFIG_MYRI10GE_DCA
  3540. myri10ge_teardown_dca(mgp);
  3541. #endif
  3542. myri10ge_dummy_rdma(mgp, 0);
  3543. /* avoid a memory leak */
  3544. pci_restore_state(pdev);
  3545. iounmap(mgp->sram);
  3546. #ifdef CONFIG_MTRR
  3547. if (mgp->mtrr >= 0)
  3548. mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
  3549. #endif
  3550. myri10ge_free_slices(mgp);
  3551. if (mgp->msix_vectors != NULL)
  3552. kfree(mgp->msix_vectors);
  3553. dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
  3554. mgp->cmd, mgp->cmd_bus);
  3555. set_fw_name(mgp, NULL, false);
  3556. free_netdev(netdev);
  3557. pci_disable_device(pdev);
  3558. pci_set_drvdata(pdev, NULL);
  3559. }
  3560. #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
  3561. #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
  3562. static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
  3563. {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
  3564. {PCI_DEVICE
  3565. (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
  3566. {0},
  3567. };
  3568. MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
  3569. static struct pci_driver myri10ge_driver = {
  3570. .name = "myri10ge",
  3571. .probe = myri10ge_probe,
  3572. .remove = myri10ge_remove,
  3573. .id_table = myri10ge_pci_tbl,
  3574. #ifdef CONFIG_PM
  3575. .suspend = myri10ge_suspend,
  3576. .resume = myri10ge_resume,
  3577. #endif
  3578. };
  3579. #ifdef CONFIG_MYRI10GE_DCA
  3580. static int
  3581. myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
  3582. {
  3583. int err = driver_for_each_device(&myri10ge_driver.driver,
  3584. NULL, &event,
  3585. myri10ge_notify_dca_device);
  3586. if (err)
  3587. return NOTIFY_BAD;
  3588. return NOTIFY_DONE;
  3589. }
  3590. static struct notifier_block myri10ge_dca_notifier = {
  3591. .notifier_call = myri10ge_notify_dca,
  3592. .next = NULL,
  3593. .priority = 0,
  3594. };
  3595. #endif /* CONFIG_MYRI10GE_DCA */
  3596. static __init int myri10ge_init_module(void)
  3597. {
  3598. pr_info("Version %s\n", MYRI10GE_VERSION_STR);
  3599. if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
  3600. pr_err("Illegal rssh hash type %d, defaulting to source port\n",
  3601. myri10ge_rss_hash);
  3602. myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
  3603. }
  3604. #ifdef CONFIG_MYRI10GE_DCA
  3605. dca_register_notify(&myri10ge_dca_notifier);
  3606. #endif
  3607. if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
  3608. myri10ge_max_slices = MYRI10GE_MAX_SLICES;
  3609. return pci_register_driver(&myri10ge_driver);
  3610. }
  3611. module_init(myri10ge_init_module);
  3612. static __exit void myri10ge_cleanup_module(void)
  3613. {
  3614. #ifdef CONFIG_MYRI10GE_DCA
  3615. dca_unregister_notify(&myri10ge_dca_notifier);
  3616. #endif
  3617. pci_unregister_driver(&myri10ge_driver);
  3618. }
  3619. module_exit(myri10ge_cleanup_module);