vmxnet3_drv.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309
  1. /*
  2. * Linux driver for VMware's vmxnet3 ethernet NIC.
  3. *
  4. * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * The full GNU General Public License is included in this distribution in
  21. * the file called "COPYING".
  22. *
  23. * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <net/ip6_checksum.h>
  28. #include "vmxnet3_int.h"
  29. char vmxnet3_driver_name[] = "vmxnet3";
  30. #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  31. /*
  32. * PCI Device ID Table
  33. * Last entry must be all 0s
  34. */
  35. static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
  36. {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  37. {0}
  38. };
  39. MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  40. static atomic_t devices_found;
  41. #define VMXNET3_MAX_DEVICES 10
  42. static int enable_mq = 1;
  43. static int irq_share_mode;
  44. static void
  45. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
  46. /*
  47. * Enable/Disable the given intr
  48. */
  49. static void
  50. vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  51. {
  52. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  53. }
  54. static void
  55. vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  56. {
  57. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  58. }
  59. /*
  60. * Enable/Disable all intrs used by the device
  61. */
  62. static void
  63. vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  64. {
  65. int i;
  66. for (i = 0; i < adapter->intr.num_intrs; i++)
  67. vmxnet3_enable_intr(adapter, i);
  68. adapter->shared->devRead.intrConf.intrCtrl &=
  69. cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
  70. }
  71. static void
  72. vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  73. {
  74. int i;
  75. adapter->shared->devRead.intrConf.intrCtrl |=
  76. cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  77. for (i = 0; i < adapter->intr.num_intrs; i++)
  78. vmxnet3_disable_intr(adapter, i);
  79. }
  80. static void
  81. vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
  82. {
  83. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
  84. }
  85. static bool
  86. vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  87. {
  88. return tq->stopped;
  89. }
  90. static void
  91. vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  92. {
  93. tq->stopped = false;
  94. netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
  95. }
  96. static void
  97. vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  98. {
  99. tq->stopped = false;
  100. netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
  101. }
  102. static void
  103. vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  104. {
  105. tq->stopped = true;
  106. tq->num_stop++;
  107. netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
  108. }
  109. /*
  110. * Check the link state. This may start or stop the tx queue.
  111. */
  112. static void
  113. vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
  114. {
  115. u32 ret;
  116. int i;
  117. unsigned long flags;
  118. spin_lock_irqsave(&adapter->cmd_lock, flags);
  119. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
  120. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  121. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  122. adapter->link_speed = ret >> 16;
  123. if (ret & 1) { /* Link is up. */
  124. printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
  125. adapter->netdev->name, adapter->link_speed);
  126. if (!netif_carrier_ok(adapter->netdev))
  127. netif_carrier_on(adapter->netdev);
  128. if (affectTxQueue) {
  129. for (i = 0; i < adapter->num_tx_queues; i++)
  130. vmxnet3_tq_start(&adapter->tx_queue[i],
  131. adapter);
  132. }
  133. } else {
  134. printk(KERN_INFO "%s: NIC Link is Down\n",
  135. adapter->netdev->name);
  136. if (netif_carrier_ok(adapter->netdev))
  137. netif_carrier_off(adapter->netdev);
  138. if (affectTxQueue) {
  139. for (i = 0; i < adapter->num_tx_queues; i++)
  140. vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
  141. }
  142. }
  143. }
  144. static void
  145. vmxnet3_process_events(struct vmxnet3_adapter *adapter)
  146. {
  147. int i;
  148. unsigned long flags;
  149. u32 events = le32_to_cpu(adapter->shared->ecr);
  150. if (!events)
  151. return;
  152. vmxnet3_ack_events(adapter, events);
  153. /* Check if link state has changed */
  154. if (events & VMXNET3_ECR_LINK)
  155. vmxnet3_check_link(adapter, true);
  156. /* Check if there is an error on xmit/recv queues */
  157. if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
  158. spin_lock_irqsave(&adapter->cmd_lock, flags);
  159. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  160. VMXNET3_CMD_GET_QUEUE_STATUS);
  161. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  162. for (i = 0; i < adapter->num_tx_queues; i++)
  163. if (adapter->tqd_start[i].status.stopped)
  164. dev_err(&adapter->netdev->dev,
  165. "%s: tq[%d] error 0x%x\n",
  166. adapter->netdev->name, i, le32_to_cpu(
  167. adapter->tqd_start[i].status.error));
  168. for (i = 0; i < adapter->num_rx_queues; i++)
  169. if (adapter->rqd_start[i].status.stopped)
  170. dev_err(&adapter->netdev->dev,
  171. "%s: rq[%d] error 0x%x\n",
  172. adapter->netdev->name, i,
  173. adapter->rqd_start[i].status.error);
  174. schedule_work(&adapter->work);
  175. }
  176. }
  177. #ifdef __BIG_ENDIAN_BITFIELD
  178. /*
  179. * The device expects the bitfields in shared structures to be written in
  180. * little endian. When CPU is big endian, the following routines are used to
  181. * correctly read and write into ABI.
  182. * The general technique used here is : double word bitfields are defined in
  183. * opposite order for big endian architecture. Then before reading them in
  184. * driver the complete double word is translated using le32_to_cpu. Similarly
  185. * After the driver writes into bitfields, cpu_to_le32 is used to translate the
  186. * double words into required format.
  187. * In order to avoid touching bits in shared structure more than once, temporary
  188. * descriptors are used. These are passed as srcDesc to following functions.
  189. */
  190. static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
  191. struct Vmxnet3_RxDesc *dstDesc)
  192. {
  193. u32 *src = (u32 *)srcDesc + 2;
  194. u32 *dst = (u32 *)dstDesc + 2;
  195. dstDesc->addr = le64_to_cpu(srcDesc->addr);
  196. *dst = le32_to_cpu(*src);
  197. dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
  198. }
  199. static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
  200. struct Vmxnet3_TxDesc *dstDesc)
  201. {
  202. int i;
  203. u32 *src = (u32 *)(srcDesc + 1);
  204. u32 *dst = (u32 *)(dstDesc + 1);
  205. /* Working backwards so that the gen bit is set at the end. */
  206. for (i = 2; i > 0; i--) {
  207. src--;
  208. dst--;
  209. *dst = cpu_to_le32(*src);
  210. }
  211. }
  212. static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
  213. struct Vmxnet3_RxCompDesc *dstDesc)
  214. {
  215. int i = 0;
  216. u32 *src = (u32 *)srcDesc;
  217. u32 *dst = (u32 *)dstDesc;
  218. for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
  219. *dst = le32_to_cpu(*src);
  220. src++;
  221. dst++;
  222. }
  223. }
  224. /* Used to read bitfield values from double words. */
  225. static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
  226. {
  227. u32 temp = le32_to_cpu(*bitfield);
  228. u32 mask = ((1 << size) - 1) << pos;
  229. temp &= mask;
  230. temp >>= pos;
  231. return temp;
  232. }
  233. #endif /* __BIG_ENDIAN_BITFIELD */
  234. #ifdef __BIG_ENDIAN_BITFIELD
  235. # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
  236. txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
  237. VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
  238. # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
  239. txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
  240. VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
  241. # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
  242. VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
  243. VMXNET3_TCD_GEN_SIZE)
  244. # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
  245. VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
  246. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
  247. (dstrcd) = (tmp); \
  248. vmxnet3_RxCompToCPU((rcd), (tmp)); \
  249. } while (0)
  250. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
  251. (dstrxd) = (tmp); \
  252. vmxnet3_RxDescToCPU((rxd), (tmp)); \
  253. } while (0)
  254. #else
  255. # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
  256. # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
  257. # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
  258. # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
  259. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
  260. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
  261. #endif /* __BIG_ENDIAN_BITFIELD */
  262. static void
  263. vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
  264. struct pci_dev *pdev)
  265. {
  266. if (tbi->map_type == VMXNET3_MAP_SINGLE)
  267. pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
  268. PCI_DMA_TODEVICE);
  269. else if (tbi->map_type == VMXNET3_MAP_PAGE)
  270. pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
  271. PCI_DMA_TODEVICE);
  272. else
  273. BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
  274. tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
  275. }
  276. static int
  277. vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
  278. struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
  279. {
  280. struct sk_buff *skb;
  281. int entries = 0;
  282. /* no out of order completion */
  283. BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
  284. BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
  285. skb = tq->buf_info[eop_idx].skb;
  286. BUG_ON(skb == NULL);
  287. tq->buf_info[eop_idx].skb = NULL;
  288. VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
  289. while (tq->tx_ring.next2comp != eop_idx) {
  290. vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
  291. pdev);
  292. /* update next2comp w/o tx_lock. Since we are marking more,
  293. * instead of less, tx ring entries avail, the worst case is
  294. * that the tx routine incorrectly re-queues a pkt due to
  295. * insufficient tx ring entries.
  296. */
  297. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  298. entries++;
  299. }
  300. dev_kfree_skb_any(skb);
  301. return entries;
  302. }
  303. static int
  304. vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
  305. struct vmxnet3_adapter *adapter)
  306. {
  307. int completed = 0;
  308. union Vmxnet3_GenericDesc *gdesc;
  309. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  310. while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
  311. completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
  312. &gdesc->tcd), tq, adapter->pdev,
  313. adapter);
  314. vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
  315. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  316. }
  317. if (completed) {
  318. spin_lock(&tq->tx_lock);
  319. if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
  320. vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
  321. VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
  322. netif_carrier_ok(adapter->netdev))) {
  323. vmxnet3_tq_wake(tq, adapter);
  324. }
  325. spin_unlock(&tq->tx_lock);
  326. }
  327. return completed;
  328. }
  329. static void
  330. vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
  331. struct vmxnet3_adapter *adapter)
  332. {
  333. int i;
  334. while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
  335. struct vmxnet3_tx_buf_info *tbi;
  336. tbi = tq->buf_info + tq->tx_ring.next2comp;
  337. vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
  338. if (tbi->skb) {
  339. dev_kfree_skb_any(tbi->skb);
  340. tbi->skb = NULL;
  341. }
  342. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  343. }
  344. /* sanity check, verify all buffers are indeed unmapped and freed */
  345. for (i = 0; i < tq->tx_ring.size; i++) {
  346. BUG_ON(tq->buf_info[i].skb != NULL ||
  347. tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
  348. }
  349. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  350. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  351. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  352. tq->comp_ring.next2proc = 0;
  353. }
  354. static void
  355. vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
  356. struct vmxnet3_adapter *adapter)
  357. {
  358. if (tq->tx_ring.base) {
  359. pci_free_consistent(adapter->pdev, tq->tx_ring.size *
  360. sizeof(struct Vmxnet3_TxDesc),
  361. tq->tx_ring.base, tq->tx_ring.basePA);
  362. tq->tx_ring.base = NULL;
  363. }
  364. if (tq->data_ring.base) {
  365. pci_free_consistent(adapter->pdev, tq->data_ring.size *
  366. sizeof(struct Vmxnet3_TxDataDesc),
  367. tq->data_ring.base, tq->data_ring.basePA);
  368. tq->data_ring.base = NULL;
  369. }
  370. if (tq->comp_ring.base) {
  371. pci_free_consistent(adapter->pdev, tq->comp_ring.size *
  372. sizeof(struct Vmxnet3_TxCompDesc),
  373. tq->comp_ring.base, tq->comp_ring.basePA);
  374. tq->comp_ring.base = NULL;
  375. }
  376. kfree(tq->buf_info);
  377. tq->buf_info = NULL;
  378. }
  379. /* Destroy all tx queues */
  380. void
  381. vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
  382. {
  383. int i;
  384. for (i = 0; i < adapter->num_tx_queues; i++)
  385. vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
  386. }
  387. static void
  388. vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
  389. struct vmxnet3_adapter *adapter)
  390. {
  391. int i;
  392. /* reset the tx ring contents to 0 and reset the tx ring states */
  393. memset(tq->tx_ring.base, 0, tq->tx_ring.size *
  394. sizeof(struct Vmxnet3_TxDesc));
  395. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  396. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  397. memset(tq->data_ring.base, 0, tq->data_ring.size *
  398. sizeof(struct Vmxnet3_TxDataDesc));
  399. /* reset the tx comp ring contents to 0 and reset comp ring states */
  400. memset(tq->comp_ring.base, 0, tq->comp_ring.size *
  401. sizeof(struct Vmxnet3_TxCompDesc));
  402. tq->comp_ring.next2proc = 0;
  403. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  404. /* reset the bookkeeping data */
  405. memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
  406. for (i = 0; i < tq->tx_ring.size; i++)
  407. tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
  408. /* stats are not reset */
  409. }
  410. static int
  411. vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
  412. struct vmxnet3_adapter *adapter)
  413. {
  414. BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
  415. tq->comp_ring.base || tq->buf_info);
  416. tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
  417. * sizeof(struct Vmxnet3_TxDesc),
  418. &tq->tx_ring.basePA);
  419. if (!tq->tx_ring.base) {
  420. printk(KERN_ERR "%s: failed to allocate tx ring\n",
  421. adapter->netdev->name);
  422. goto err;
  423. }
  424. tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
  425. tq->data_ring.size *
  426. sizeof(struct Vmxnet3_TxDataDesc),
  427. &tq->data_ring.basePA);
  428. if (!tq->data_ring.base) {
  429. printk(KERN_ERR "%s: failed to allocate data ring\n",
  430. adapter->netdev->name);
  431. goto err;
  432. }
  433. tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
  434. tq->comp_ring.size *
  435. sizeof(struct Vmxnet3_TxCompDesc),
  436. &tq->comp_ring.basePA);
  437. if (!tq->comp_ring.base) {
  438. printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
  439. adapter->netdev->name);
  440. goto err;
  441. }
  442. tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
  443. GFP_KERNEL);
  444. if (!tq->buf_info)
  445. goto err;
  446. return 0;
  447. err:
  448. vmxnet3_tq_destroy(tq, adapter);
  449. return -ENOMEM;
  450. }
  451. static void
  452. vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
  453. {
  454. int i;
  455. for (i = 0; i < adapter->num_tx_queues; i++)
  456. vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
  457. }
  458. /*
  459. * starting from ring->next2fill, allocate rx buffers for the given ring
  460. * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
  461. * are allocated or allocation fails
  462. */
  463. static int
  464. vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
  465. int num_to_alloc, struct vmxnet3_adapter *adapter)
  466. {
  467. int num_allocated = 0;
  468. struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
  469. struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
  470. u32 val;
  471. while (num_allocated <= num_to_alloc) {
  472. struct vmxnet3_rx_buf_info *rbi;
  473. union Vmxnet3_GenericDesc *gd;
  474. rbi = rbi_base + ring->next2fill;
  475. gd = ring->base + ring->next2fill;
  476. if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
  477. if (rbi->skb == NULL) {
  478. rbi->skb = dev_alloc_skb(rbi->len +
  479. NET_IP_ALIGN);
  480. if (unlikely(rbi->skb == NULL)) {
  481. rq->stats.rx_buf_alloc_failure++;
  482. break;
  483. }
  484. rbi->skb->dev = adapter->netdev;
  485. skb_reserve(rbi->skb, NET_IP_ALIGN);
  486. rbi->dma_addr = pci_map_single(adapter->pdev,
  487. rbi->skb->data, rbi->len,
  488. PCI_DMA_FROMDEVICE);
  489. } else {
  490. /* rx buffer skipped by the device */
  491. }
  492. val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
  493. } else {
  494. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
  495. rbi->len != PAGE_SIZE);
  496. if (rbi->page == NULL) {
  497. rbi->page = alloc_page(GFP_ATOMIC);
  498. if (unlikely(rbi->page == NULL)) {
  499. rq->stats.rx_buf_alloc_failure++;
  500. break;
  501. }
  502. rbi->dma_addr = pci_map_page(adapter->pdev,
  503. rbi->page, 0, PAGE_SIZE,
  504. PCI_DMA_FROMDEVICE);
  505. } else {
  506. /* rx buffers skipped by the device */
  507. }
  508. val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
  509. }
  510. BUG_ON(rbi->dma_addr == 0);
  511. gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
  512. gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
  513. | val | rbi->len);
  514. /* Fill the last buffer but dont mark it ready, or else the
  515. * device will think that the queue is full */
  516. if (num_allocated == num_to_alloc)
  517. break;
  518. gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
  519. num_allocated++;
  520. vmxnet3_cmd_ring_adv_next2fill(ring);
  521. }
  522. rq->uncommitted[ring_idx] += num_allocated;
  523. dev_dbg(&adapter->netdev->dev,
  524. "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
  525. "%u, uncommitted %u\n", num_allocated, ring->next2fill,
  526. ring->next2comp, rq->uncommitted[ring_idx]);
  527. /* so that the device can distinguish a full ring and an empty ring */
  528. BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
  529. return num_allocated;
  530. }
  531. static void
  532. vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
  533. struct vmxnet3_rx_buf_info *rbi)
  534. {
  535. struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
  536. skb_shinfo(skb)->nr_frags;
  537. BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
  538. __skb_frag_set_page(frag, rbi->page);
  539. frag->page_offset = 0;
  540. skb_frag_size_set(frag, rcd->len);
  541. skb->data_len += rcd->len;
  542. skb->truesize += PAGE_SIZE;
  543. skb_shinfo(skb)->nr_frags++;
  544. }
  545. static void
  546. vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
  547. struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
  548. struct vmxnet3_adapter *adapter)
  549. {
  550. u32 dw2, len;
  551. unsigned long buf_offset;
  552. int i;
  553. union Vmxnet3_GenericDesc *gdesc;
  554. struct vmxnet3_tx_buf_info *tbi = NULL;
  555. BUG_ON(ctx->copy_size > skb_headlen(skb));
  556. /* use the previous gen bit for the SOP desc */
  557. dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
  558. ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
  559. gdesc = ctx->sop_txd; /* both loops below can be skipped */
  560. /* no need to map the buffer if headers are copied */
  561. if (ctx->copy_size) {
  562. ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
  563. tq->tx_ring.next2fill *
  564. sizeof(struct Vmxnet3_TxDataDesc));
  565. ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
  566. ctx->sop_txd->dword[3] = 0;
  567. tbi = tq->buf_info + tq->tx_ring.next2fill;
  568. tbi->map_type = VMXNET3_MAP_NONE;
  569. dev_dbg(&adapter->netdev->dev,
  570. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  571. tq->tx_ring.next2fill,
  572. le64_to_cpu(ctx->sop_txd->txd.addr),
  573. ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
  574. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  575. /* use the right gen for non-SOP desc */
  576. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  577. }
  578. /* linear part can use multiple tx desc if it's big */
  579. len = skb_headlen(skb) - ctx->copy_size;
  580. buf_offset = ctx->copy_size;
  581. while (len) {
  582. u32 buf_size;
  583. if (len < VMXNET3_MAX_TX_BUF_SIZE) {
  584. buf_size = len;
  585. dw2 |= len;
  586. } else {
  587. buf_size = VMXNET3_MAX_TX_BUF_SIZE;
  588. /* spec says that for TxDesc.len, 0 == 2^14 */
  589. }
  590. tbi = tq->buf_info + tq->tx_ring.next2fill;
  591. tbi->map_type = VMXNET3_MAP_SINGLE;
  592. tbi->dma_addr = pci_map_single(adapter->pdev,
  593. skb->data + buf_offset, buf_size,
  594. PCI_DMA_TODEVICE);
  595. tbi->len = buf_size;
  596. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  597. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  598. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  599. gdesc->dword[2] = cpu_to_le32(dw2);
  600. gdesc->dword[3] = 0;
  601. dev_dbg(&adapter->netdev->dev,
  602. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  603. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  604. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  605. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  606. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  607. len -= buf_size;
  608. buf_offset += buf_size;
  609. }
  610. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  611. const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  612. tbi = tq->buf_info + tq->tx_ring.next2fill;
  613. tbi->map_type = VMXNET3_MAP_PAGE;
  614. tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
  615. 0, skb_frag_size(frag),
  616. DMA_TO_DEVICE);
  617. tbi->len = skb_frag_size(frag);
  618. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  619. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  620. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  621. gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
  622. gdesc->dword[3] = 0;
  623. dev_dbg(&adapter->netdev->dev,
  624. "txd[%u]: 0x%llu %u %u\n",
  625. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  626. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  627. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  628. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  629. }
  630. ctx->eop_txd = gdesc;
  631. /* set the last buf_info for the pkt */
  632. tbi->skb = skb;
  633. tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
  634. }
  635. /* Init all tx queues */
  636. static void
  637. vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
  638. {
  639. int i;
  640. for (i = 0; i < adapter->num_tx_queues; i++)
  641. vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
  642. }
  643. /*
  644. * parse and copy relevant protocol headers:
  645. * For a tso pkt, relevant headers are L2/3/4 including options
  646. * For a pkt requesting csum offloading, they are L2/3 and may include L4
  647. * if it's a TCP/UDP pkt
  648. *
  649. * Returns:
  650. * -1: error happens during parsing
  651. * 0: protocol headers parsed, but too big to be copied
  652. * 1: protocol headers parsed and copied
  653. *
  654. * Other effects:
  655. * 1. related *ctx fields are updated.
  656. * 2. ctx->copy_size is # of bytes copied
  657. * 3. the portion copied is guaranteed to be in the linear part
  658. *
  659. */
  660. static int
  661. vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  662. struct vmxnet3_tx_ctx *ctx,
  663. struct vmxnet3_adapter *adapter)
  664. {
  665. struct Vmxnet3_TxDataDesc *tdd;
  666. if (ctx->mss) { /* TSO */
  667. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  668. ctx->l4_hdr_size = tcp_hdrlen(skb);
  669. ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
  670. } else {
  671. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  672. ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
  673. if (ctx->ipv4) {
  674. const struct iphdr *iph = ip_hdr(skb);
  675. if (iph->protocol == IPPROTO_TCP)
  676. ctx->l4_hdr_size = tcp_hdrlen(skb);
  677. else if (iph->protocol == IPPROTO_UDP)
  678. ctx->l4_hdr_size = sizeof(struct udphdr);
  679. else
  680. ctx->l4_hdr_size = 0;
  681. } else {
  682. /* for simplicity, don't copy L4 headers */
  683. ctx->l4_hdr_size = 0;
  684. }
  685. ctx->copy_size = min(ctx->eth_ip_hdr_size +
  686. ctx->l4_hdr_size, skb->len);
  687. } else {
  688. ctx->eth_ip_hdr_size = 0;
  689. ctx->l4_hdr_size = 0;
  690. /* copy as much as allowed */
  691. ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
  692. , skb_headlen(skb));
  693. }
  694. /* make sure headers are accessible directly */
  695. if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
  696. goto err;
  697. }
  698. if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
  699. tq->stats.oversized_hdr++;
  700. ctx->copy_size = 0;
  701. return 0;
  702. }
  703. tdd = tq->data_ring.base + tq->tx_ring.next2fill;
  704. memcpy(tdd->data, skb->data, ctx->copy_size);
  705. dev_dbg(&adapter->netdev->dev,
  706. "copy %u bytes to dataRing[%u]\n",
  707. ctx->copy_size, tq->tx_ring.next2fill);
  708. return 1;
  709. err:
  710. return -1;
  711. }
  712. static void
  713. vmxnet3_prepare_tso(struct sk_buff *skb,
  714. struct vmxnet3_tx_ctx *ctx)
  715. {
  716. struct tcphdr *tcph = tcp_hdr(skb);
  717. if (ctx->ipv4) {
  718. struct iphdr *iph = ip_hdr(skb);
  719. iph->check = 0;
  720. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
  721. IPPROTO_TCP, 0);
  722. } else {
  723. struct ipv6hdr *iph = ipv6_hdr(skb);
  724. tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
  725. IPPROTO_TCP, 0);
  726. }
  727. }
  728. /*
  729. * Transmits a pkt thru a given tq
  730. * Returns:
  731. * NETDEV_TX_OK: descriptors are setup successfully
  732. * NETDEV_TX_OK: error occurred, the pkt is dropped
  733. * NETDEV_TX_BUSY: tx ring is full, queue is stopped
  734. *
  735. * Side-effects:
  736. * 1. tx ring may be changed
  737. * 2. tq stats may be updated accordingly
  738. * 3. shared->txNumDeferred may be updated
  739. */
  740. static int
  741. vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  742. struct vmxnet3_adapter *adapter, struct net_device *netdev)
  743. {
  744. int ret;
  745. u32 count;
  746. unsigned long flags;
  747. struct vmxnet3_tx_ctx ctx;
  748. union Vmxnet3_GenericDesc *gdesc;
  749. #ifdef __BIG_ENDIAN_BITFIELD
  750. /* Use temporary descriptor to avoid touching bits multiple times */
  751. union Vmxnet3_GenericDesc tempTxDesc;
  752. #endif
  753. /* conservatively estimate # of descriptors to use */
  754. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
  755. skb_shinfo(skb)->nr_frags + 1;
  756. ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
  757. ctx.mss = skb_shinfo(skb)->gso_size;
  758. if (ctx.mss) {
  759. if (skb_header_cloned(skb)) {
  760. if (unlikely(pskb_expand_head(skb, 0, 0,
  761. GFP_ATOMIC) != 0)) {
  762. tq->stats.drop_tso++;
  763. goto drop_pkt;
  764. }
  765. tq->stats.copy_skb_header++;
  766. }
  767. vmxnet3_prepare_tso(skb, &ctx);
  768. } else {
  769. if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
  770. /* non-tso pkts must not use more than
  771. * VMXNET3_MAX_TXD_PER_PKT entries
  772. */
  773. if (skb_linearize(skb) != 0) {
  774. tq->stats.drop_too_many_frags++;
  775. goto drop_pkt;
  776. }
  777. tq->stats.linearized++;
  778. /* recalculate the # of descriptors to use */
  779. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
  780. }
  781. }
  782. spin_lock_irqsave(&tq->tx_lock, flags);
  783. if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
  784. tq->stats.tx_ring_full++;
  785. dev_dbg(&adapter->netdev->dev,
  786. "tx queue stopped on %s, next2comp %u"
  787. " next2fill %u\n", adapter->netdev->name,
  788. tq->tx_ring.next2comp, tq->tx_ring.next2fill);
  789. vmxnet3_tq_stop(tq, adapter);
  790. spin_unlock_irqrestore(&tq->tx_lock, flags);
  791. return NETDEV_TX_BUSY;
  792. }
  793. ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
  794. if (ret >= 0) {
  795. BUG_ON(ret <= 0 && ctx.copy_size != 0);
  796. /* hdrs parsed, check against other limits */
  797. if (ctx.mss) {
  798. if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
  799. VMXNET3_MAX_TX_BUF_SIZE)) {
  800. goto hdr_too_big;
  801. }
  802. } else {
  803. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  804. if (unlikely(ctx.eth_ip_hdr_size +
  805. skb->csum_offset >
  806. VMXNET3_MAX_CSUM_OFFSET)) {
  807. goto hdr_too_big;
  808. }
  809. }
  810. }
  811. } else {
  812. tq->stats.drop_hdr_inspect_err++;
  813. goto unlock_drop_pkt;
  814. }
  815. /* fill tx descs related to addr & len */
  816. vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
  817. /* setup the EOP desc */
  818. ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
  819. /* setup the SOP desc */
  820. #ifdef __BIG_ENDIAN_BITFIELD
  821. gdesc = &tempTxDesc;
  822. gdesc->dword[2] = ctx.sop_txd->dword[2];
  823. gdesc->dword[3] = ctx.sop_txd->dword[3];
  824. #else
  825. gdesc = ctx.sop_txd;
  826. #endif
  827. if (ctx.mss) {
  828. gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
  829. gdesc->txd.om = VMXNET3_OM_TSO;
  830. gdesc->txd.msscof = ctx.mss;
  831. le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
  832. gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
  833. } else {
  834. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  835. gdesc->txd.hlen = ctx.eth_ip_hdr_size;
  836. gdesc->txd.om = VMXNET3_OM_CSUM;
  837. gdesc->txd.msscof = ctx.eth_ip_hdr_size +
  838. skb->csum_offset;
  839. } else {
  840. gdesc->txd.om = 0;
  841. gdesc->txd.msscof = 0;
  842. }
  843. le32_add_cpu(&tq->shared->txNumDeferred, 1);
  844. }
  845. if (vlan_tx_tag_present(skb)) {
  846. gdesc->txd.ti = 1;
  847. gdesc->txd.tci = vlan_tx_tag_get(skb);
  848. }
  849. /* finally flips the GEN bit of the SOP desc. */
  850. gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
  851. VMXNET3_TXD_GEN);
  852. #ifdef __BIG_ENDIAN_BITFIELD
  853. /* Finished updating in bitfields of Tx Desc, so write them in original
  854. * place.
  855. */
  856. vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
  857. (struct Vmxnet3_TxDesc *)ctx.sop_txd);
  858. gdesc = ctx.sop_txd;
  859. #endif
  860. dev_dbg(&adapter->netdev->dev,
  861. "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
  862. (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
  863. tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
  864. le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
  865. spin_unlock_irqrestore(&tq->tx_lock, flags);
  866. if (le32_to_cpu(tq->shared->txNumDeferred) >=
  867. le32_to_cpu(tq->shared->txThreshold)) {
  868. tq->shared->txNumDeferred = 0;
  869. VMXNET3_WRITE_BAR0_REG(adapter,
  870. VMXNET3_REG_TXPROD + tq->qid * 8,
  871. tq->tx_ring.next2fill);
  872. }
  873. return NETDEV_TX_OK;
  874. hdr_too_big:
  875. tq->stats.drop_oversized_hdr++;
  876. unlock_drop_pkt:
  877. spin_unlock_irqrestore(&tq->tx_lock, flags);
  878. drop_pkt:
  879. tq->stats.drop_total++;
  880. dev_kfree_skb(skb);
  881. return NETDEV_TX_OK;
  882. }
  883. static netdev_tx_t
  884. vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  885. {
  886. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  887. BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
  888. return vmxnet3_tq_xmit(skb,
  889. &adapter->tx_queue[skb->queue_mapping],
  890. adapter, netdev);
  891. }
  892. static void
  893. vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
  894. struct sk_buff *skb,
  895. union Vmxnet3_GenericDesc *gdesc)
  896. {
  897. if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
  898. /* typical case: TCP/UDP over IP and both csums are correct */
  899. if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
  900. VMXNET3_RCD_CSUM_OK) {
  901. skb->ip_summed = CHECKSUM_UNNECESSARY;
  902. BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
  903. BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
  904. BUG_ON(gdesc->rcd.frg);
  905. } else {
  906. if (gdesc->rcd.csum) {
  907. skb->csum = htons(gdesc->rcd.csum);
  908. skb->ip_summed = CHECKSUM_PARTIAL;
  909. } else {
  910. skb_checksum_none_assert(skb);
  911. }
  912. }
  913. } else {
  914. skb_checksum_none_assert(skb);
  915. }
  916. }
  917. static void
  918. vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
  919. struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
  920. {
  921. rq->stats.drop_err++;
  922. if (!rcd->fcs)
  923. rq->stats.drop_fcs++;
  924. rq->stats.drop_total++;
  925. /*
  926. * We do not unmap and chain the rx buffer to the skb.
  927. * We basically pretend this buffer is not used and will be recycled
  928. * by vmxnet3_rq_alloc_rx_buf()
  929. */
  930. /*
  931. * ctx->skb may be NULL if this is the first and the only one
  932. * desc for the pkt
  933. */
  934. if (ctx->skb)
  935. dev_kfree_skb_irq(ctx->skb);
  936. ctx->skb = NULL;
  937. }
  938. static int
  939. vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
  940. struct vmxnet3_adapter *adapter, int quota)
  941. {
  942. static const u32 rxprod_reg[2] = {
  943. VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
  944. };
  945. u32 num_rxd = 0;
  946. bool skip_page_frags = false;
  947. struct Vmxnet3_RxCompDesc *rcd;
  948. struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
  949. #ifdef __BIG_ENDIAN_BITFIELD
  950. struct Vmxnet3_RxDesc rxCmdDesc;
  951. struct Vmxnet3_RxCompDesc rxComp;
  952. #endif
  953. vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
  954. &rxComp);
  955. while (rcd->gen == rq->comp_ring.gen) {
  956. struct vmxnet3_rx_buf_info *rbi;
  957. struct sk_buff *skb, *new_skb = NULL;
  958. struct page *new_page = NULL;
  959. int num_to_alloc;
  960. struct Vmxnet3_RxDesc *rxd;
  961. u32 idx, ring_idx;
  962. struct vmxnet3_cmd_ring *ring = NULL;
  963. if (num_rxd >= quota) {
  964. /* we may stop even before we see the EOP desc of
  965. * the current pkt
  966. */
  967. break;
  968. }
  969. num_rxd++;
  970. BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
  971. idx = rcd->rxdIdx;
  972. ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
  973. ring = rq->rx_ring + ring_idx;
  974. vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
  975. &rxCmdDesc);
  976. rbi = rq->buf_info[ring_idx] + idx;
  977. BUG_ON(rxd->addr != rbi->dma_addr ||
  978. rxd->len != rbi->len);
  979. if (unlikely(rcd->eop && rcd->err)) {
  980. vmxnet3_rx_error(rq, rcd, ctx, adapter);
  981. goto rcd_done;
  982. }
  983. if (rcd->sop) { /* first buf of the pkt */
  984. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
  985. rcd->rqID != rq->qid);
  986. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
  987. BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
  988. if (unlikely(rcd->len == 0)) {
  989. /* Pretend the rx buffer is skipped. */
  990. BUG_ON(!(rcd->sop && rcd->eop));
  991. dev_dbg(&adapter->netdev->dev,
  992. "rxRing[%u][%u] 0 length\n",
  993. ring_idx, idx);
  994. goto rcd_done;
  995. }
  996. skip_page_frags = false;
  997. ctx->skb = rbi->skb;
  998. new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
  999. if (new_skb == NULL) {
  1000. /* Skb allocation failed, do not handover this
  1001. * skb to stack. Reuse it. Drop the existing pkt
  1002. */
  1003. rq->stats.rx_buf_alloc_failure++;
  1004. ctx->skb = NULL;
  1005. rq->stats.drop_total++;
  1006. skip_page_frags = true;
  1007. goto rcd_done;
  1008. }
  1009. pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
  1010. PCI_DMA_FROMDEVICE);
  1011. skb_put(ctx->skb, rcd->len);
  1012. /* Immediate refill */
  1013. new_skb->dev = adapter->netdev;
  1014. skb_reserve(new_skb, NET_IP_ALIGN);
  1015. rbi->skb = new_skb;
  1016. rbi->dma_addr = pci_map_single(adapter->pdev,
  1017. rbi->skb->data, rbi->len,
  1018. PCI_DMA_FROMDEVICE);
  1019. rxd->addr = cpu_to_le64(rbi->dma_addr);
  1020. rxd->len = rbi->len;
  1021. } else {
  1022. BUG_ON(ctx->skb == NULL && !skip_page_frags);
  1023. /* non SOP buffer must be type 1 in most cases */
  1024. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
  1025. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
  1026. /* If an sop buffer was dropped, skip all
  1027. * following non-sop fragments. They will be reused.
  1028. */
  1029. if (skip_page_frags)
  1030. goto rcd_done;
  1031. new_page = alloc_page(GFP_ATOMIC);
  1032. if (unlikely(new_page == NULL)) {
  1033. /* Replacement page frag could not be allocated.
  1034. * Reuse this page. Drop the pkt and free the
  1035. * skb which contained this page as a frag. Skip
  1036. * processing all the following non-sop frags.
  1037. */
  1038. rq->stats.rx_buf_alloc_failure++;
  1039. dev_kfree_skb(ctx->skb);
  1040. ctx->skb = NULL;
  1041. skip_page_frags = true;
  1042. goto rcd_done;
  1043. }
  1044. if (rcd->len) {
  1045. pci_unmap_page(adapter->pdev,
  1046. rbi->dma_addr, rbi->len,
  1047. PCI_DMA_FROMDEVICE);
  1048. vmxnet3_append_frag(ctx->skb, rcd, rbi);
  1049. }
  1050. /* Immediate refill */
  1051. rbi->page = new_page;
  1052. rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
  1053. 0, PAGE_SIZE,
  1054. PCI_DMA_FROMDEVICE);
  1055. rxd->addr = cpu_to_le64(rbi->dma_addr);
  1056. rxd->len = rbi->len;
  1057. }
  1058. skb = ctx->skb;
  1059. if (rcd->eop) {
  1060. skb->len += skb->data_len;
  1061. vmxnet3_rx_csum(adapter, skb,
  1062. (union Vmxnet3_GenericDesc *)rcd);
  1063. skb->protocol = eth_type_trans(skb, adapter->netdev);
  1064. if (unlikely(rcd->ts))
  1065. __vlan_hwaccel_put_tag(skb, rcd->tci);
  1066. if (adapter->netdev->features & NETIF_F_LRO)
  1067. netif_receive_skb(skb);
  1068. else
  1069. napi_gro_receive(&rq->napi, skb);
  1070. ctx->skb = NULL;
  1071. }
  1072. rcd_done:
  1073. /* device may have skipped some rx descs */
  1074. ring->next2comp = idx;
  1075. num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
  1076. ring = rq->rx_ring + ring_idx;
  1077. while (num_to_alloc) {
  1078. vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
  1079. &rxCmdDesc);
  1080. BUG_ON(!rxd->addr);
  1081. /* Recv desc is ready to be used by the device */
  1082. rxd->gen = ring->gen;
  1083. vmxnet3_cmd_ring_adv_next2fill(ring);
  1084. num_to_alloc--;
  1085. }
  1086. /* if needed, update the register */
  1087. if (unlikely(rq->shared->updateRxProd)) {
  1088. VMXNET3_WRITE_BAR0_REG(adapter,
  1089. rxprod_reg[ring_idx] + rq->qid * 8,
  1090. ring->next2fill);
  1091. rq->uncommitted[ring_idx] = 0;
  1092. }
  1093. vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
  1094. vmxnet3_getRxComp(rcd,
  1095. &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
  1096. }
  1097. return num_rxd;
  1098. }
  1099. static void
  1100. vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
  1101. struct vmxnet3_adapter *adapter)
  1102. {
  1103. u32 i, ring_idx;
  1104. struct Vmxnet3_RxDesc *rxd;
  1105. for (ring_idx = 0; ring_idx < 2; ring_idx++) {
  1106. for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
  1107. #ifdef __BIG_ENDIAN_BITFIELD
  1108. struct Vmxnet3_RxDesc rxDesc;
  1109. #endif
  1110. vmxnet3_getRxDesc(rxd,
  1111. &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
  1112. if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
  1113. rq->buf_info[ring_idx][i].skb) {
  1114. pci_unmap_single(adapter->pdev, rxd->addr,
  1115. rxd->len, PCI_DMA_FROMDEVICE);
  1116. dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
  1117. rq->buf_info[ring_idx][i].skb = NULL;
  1118. } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
  1119. rq->buf_info[ring_idx][i].page) {
  1120. pci_unmap_page(adapter->pdev, rxd->addr,
  1121. rxd->len, PCI_DMA_FROMDEVICE);
  1122. put_page(rq->buf_info[ring_idx][i].page);
  1123. rq->buf_info[ring_idx][i].page = NULL;
  1124. }
  1125. }
  1126. rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
  1127. rq->rx_ring[ring_idx].next2fill =
  1128. rq->rx_ring[ring_idx].next2comp = 0;
  1129. rq->uncommitted[ring_idx] = 0;
  1130. }
  1131. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1132. rq->comp_ring.next2proc = 0;
  1133. }
  1134. static void
  1135. vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
  1136. {
  1137. int i;
  1138. for (i = 0; i < adapter->num_rx_queues; i++)
  1139. vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
  1140. }
  1141. void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
  1142. struct vmxnet3_adapter *adapter)
  1143. {
  1144. int i;
  1145. int j;
  1146. /* all rx buffers must have already been freed */
  1147. for (i = 0; i < 2; i++) {
  1148. if (rq->buf_info[i]) {
  1149. for (j = 0; j < rq->rx_ring[i].size; j++)
  1150. BUG_ON(rq->buf_info[i][j].page != NULL);
  1151. }
  1152. }
  1153. kfree(rq->buf_info[0]);
  1154. for (i = 0; i < 2; i++) {
  1155. if (rq->rx_ring[i].base) {
  1156. pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
  1157. * sizeof(struct Vmxnet3_RxDesc),
  1158. rq->rx_ring[i].base,
  1159. rq->rx_ring[i].basePA);
  1160. rq->rx_ring[i].base = NULL;
  1161. }
  1162. rq->buf_info[i] = NULL;
  1163. }
  1164. if (rq->comp_ring.base) {
  1165. pci_free_consistent(adapter->pdev, rq->comp_ring.size *
  1166. sizeof(struct Vmxnet3_RxCompDesc),
  1167. rq->comp_ring.base, rq->comp_ring.basePA);
  1168. rq->comp_ring.base = NULL;
  1169. }
  1170. }
  1171. static int
  1172. vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
  1173. struct vmxnet3_adapter *adapter)
  1174. {
  1175. int i;
  1176. /* initialize buf_info */
  1177. for (i = 0; i < rq->rx_ring[0].size; i++) {
  1178. /* 1st buf for a pkt is skbuff */
  1179. if (i % adapter->rx_buf_per_pkt == 0) {
  1180. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
  1181. rq->buf_info[0][i].len = adapter->skb_buf_size;
  1182. } else { /* subsequent bufs for a pkt is frag */
  1183. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1184. rq->buf_info[0][i].len = PAGE_SIZE;
  1185. }
  1186. }
  1187. for (i = 0; i < rq->rx_ring[1].size; i++) {
  1188. rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1189. rq->buf_info[1][i].len = PAGE_SIZE;
  1190. }
  1191. /* reset internal state and allocate buffers for both rings */
  1192. for (i = 0; i < 2; i++) {
  1193. rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
  1194. rq->uncommitted[i] = 0;
  1195. memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
  1196. sizeof(struct Vmxnet3_RxDesc));
  1197. rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
  1198. }
  1199. if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
  1200. adapter) == 0) {
  1201. /* at least has 1 rx buffer for the 1st ring */
  1202. return -ENOMEM;
  1203. }
  1204. vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
  1205. /* reset the comp ring */
  1206. rq->comp_ring.next2proc = 0;
  1207. memset(rq->comp_ring.base, 0, rq->comp_ring.size *
  1208. sizeof(struct Vmxnet3_RxCompDesc));
  1209. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1210. /* reset rxctx */
  1211. rq->rx_ctx.skb = NULL;
  1212. /* stats are not reset */
  1213. return 0;
  1214. }
  1215. static int
  1216. vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
  1217. {
  1218. int i, err = 0;
  1219. for (i = 0; i < adapter->num_rx_queues; i++) {
  1220. err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
  1221. if (unlikely(err)) {
  1222. dev_err(&adapter->netdev->dev, "%s: failed to "
  1223. "initialize rx queue%i\n",
  1224. adapter->netdev->name, i);
  1225. break;
  1226. }
  1227. }
  1228. return err;
  1229. }
  1230. static int
  1231. vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
  1232. {
  1233. int i;
  1234. size_t sz;
  1235. struct vmxnet3_rx_buf_info *bi;
  1236. for (i = 0; i < 2; i++) {
  1237. sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
  1238. rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
  1239. &rq->rx_ring[i].basePA);
  1240. if (!rq->rx_ring[i].base) {
  1241. printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
  1242. adapter->netdev->name, i);
  1243. goto err;
  1244. }
  1245. }
  1246. sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
  1247. rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
  1248. &rq->comp_ring.basePA);
  1249. if (!rq->comp_ring.base) {
  1250. printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
  1251. adapter->netdev->name);
  1252. goto err;
  1253. }
  1254. sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
  1255. rq->rx_ring[1].size);
  1256. bi = kzalloc(sz, GFP_KERNEL);
  1257. if (!bi)
  1258. goto err;
  1259. rq->buf_info[0] = bi;
  1260. rq->buf_info[1] = bi + rq->rx_ring[0].size;
  1261. return 0;
  1262. err:
  1263. vmxnet3_rq_destroy(rq, adapter);
  1264. return -ENOMEM;
  1265. }
  1266. static int
  1267. vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
  1268. {
  1269. int i, err = 0;
  1270. for (i = 0; i < adapter->num_rx_queues; i++) {
  1271. err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
  1272. if (unlikely(err)) {
  1273. dev_err(&adapter->netdev->dev,
  1274. "%s: failed to create rx queue%i\n",
  1275. adapter->netdev->name, i);
  1276. goto err_out;
  1277. }
  1278. }
  1279. return err;
  1280. err_out:
  1281. vmxnet3_rq_destroy_all(adapter);
  1282. return err;
  1283. }
  1284. /* Multiple queue aware polling function for tx and rx */
  1285. static int
  1286. vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
  1287. {
  1288. int rcd_done = 0, i;
  1289. if (unlikely(adapter->shared->ecr))
  1290. vmxnet3_process_events(adapter);
  1291. for (i = 0; i < adapter->num_tx_queues; i++)
  1292. vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
  1293. for (i = 0; i < adapter->num_rx_queues; i++)
  1294. rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
  1295. adapter, budget);
  1296. return rcd_done;
  1297. }
  1298. static int
  1299. vmxnet3_poll(struct napi_struct *napi, int budget)
  1300. {
  1301. struct vmxnet3_rx_queue *rx_queue = container_of(napi,
  1302. struct vmxnet3_rx_queue, napi);
  1303. int rxd_done;
  1304. rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
  1305. if (rxd_done < budget) {
  1306. napi_complete(napi);
  1307. vmxnet3_enable_all_intrs(rx_queue->adapter);
  1308. }
  1309. return rxd_done;
  1310. }
  1311. /*
  1312. * NAPI polling function for MSI-X mode with multiple Rx queues
  1313. * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
  1314. */
  1315. static int
  1316. vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
  1317. {
  1318. struct vmxnet3_rx_queue *rq = container_of(napi,
  1319. struct vmxnet3_rx_queue, napi);
  1320. struct vmxnet3_adapter *adapter = rq->adapter;
  1321. int rxd_done;
  1322. /* When sharing interrupt with corresponding tx queue, process
  1323. * tx completions in that queue as well
  1324. */
  1325. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
  1326. struct vmxnet3_tx_queue *tq =
  1327. &adapter->tx_queue[rq - adapter->rx_queue];
  1328. vmxnet3_tq_tx_complete(tq, adapter);
  1329. }
  1330. rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
  1331. if (rxd_done < budget) {
  1332. napi_complete(napi);
  1333. vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
  1334. }
  1335. return rxd_done;
  1336. }
  1337. #ifdef CONFIG_PCI_MSI
  1338. /*
  1339. * Handle completion interrupts on tx queues
  1340. * Returns whether or not the intr is handled
  1341. */
  1342. static irqreturn_t
  1343. vmxnet3_msix_tx(int irq, void *data)
  1344. {
  1345. struct vmxnet3_tx_queue *tq = data;
  1346. struct vmxnet3_adapter *adapter = tq->adapter;
  1347. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1348. vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
  1349. /* Handle the case where only one irq is allocate for all tx queues */
  1350. if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
  1351. int i;
  1352. for (i = 0; i < adapter->num_tx_queues; i++) {
  1353. struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
  1354. vmxnet3_tq_tx_complete(txq, adapter);
  1355. }
  1356. } else {
  1357. vmxnet3_tq_tx_complete(tq, adapter);
  1358. }
  1359. vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
  1360. return IRQ_HANDLED;
  1361. }
  1362. /*
  1363. * Handle completion interrupts on rx queues. Returns whether or not the
  1364. * intr is handled
  1365. */
  1366. static irqreturn_t
  1367. vmxnet3_msix_rx(int irq, void *data)
  1368. {
  1369. struct vmxnet3_rx_queue *rq = data;
  1370. struct vmxnet3_adapter *adapter = rq->adapter;
  1371. /* disable intr if needed */
  1372. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1373. vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
  1374. napi_schedule(&rq->napi);
  1375. return IRQ_HANDLED;
  1376. }
  1377. /*
  1378. *----------------------------------------------------------------------------
  1379. *
  1380. * vmxnet3_msix_event --
  1381. *
  1382. * vmxnet3 msix event intr handler
  1383. *
  1384. * Result:
  1385. * whether or not the intr is handled
  1386. *
  1387. *----------------------------------------------------------------------------
  1388. */
  1389. static irqreturn_t
  1390. vmxnet3_msix_event(int irq, void *data)
  1391. {
  1392. struct net_device *dev = data;
  1393. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1394. /* disable intr if needed */
  1395. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1396. vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
  1397. if (adapter->shared->ecr)
  1398. vmxnet3_process_events(adapter);
  1399. vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
  1400. return IRQ_HANDLED;
  1401. }
  1402. #endif /* CONFIG_PCI_MSI */
  1403. /* Interrupt handler for vmxnet3 */
  1404. static irqreturn_t
  1405. vmxnet3_intr(int irq, void *dev_id)
  1406. {
  1407. struct net_device *dev = dev_id;
  1408. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1409. if (adapter->intr.type == VMXNET3_IT_INTX) {
  1410. u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
  1411. if (unlikely(icr == 0))
  1412. /* not ours */
  1413. return IRQ_NONE;
  1414. }
  1415. /* disable intr if needed */
  1416. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1417. vmxnet3_disable_all_intrs(adapter);
  1418. napi_schedule(&adapter->rx_queue[0].napi);
  1419. return IRQ_HANDLED;
  1420. }
  1421. #ifdef CONFIG_NET_POLL_CONTROLLER
  1422. /* netpoll callback. */
  1423. static void
  1424. vmxnet3_netpoll(struct net_device *netdev)
  1425. {
  1426. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1427. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1428. vmxnet3_disable_all_intrs(adapter);
  1429. vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
  1430. vmxnet3_enable_all_intrs(adapter);
  1431. }
  1432. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1433. static int
  1434. vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
  1435. {
  1436. struct vmxnet3_intr *intr = &adapter->intr;
  1437. int err = 0, i;
  1438. int vector = 0;
  1439. #ifdef CONFIG_PCI_MSI
  1440. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  1441. for (i = 0; i < adapter->num_tx_queues; i++) {
  1442. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
  1443. sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
  1444. adapter->netdev->name, vector);
  1445. err = request_irq(
  1446. intr->msix_entries[vector].vector,
  1447. vmxnet3_msix_tx, 0,
  1448. adapter->tx_queue[i].name,
  1449. &adapter->tx_queue[i]);
  1450. } else {
  1451. sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
  1452. adapter->netdev->name, vector);
  1453. }
  1454. if (err) {
  1455. dev_err(&adapter->netdev->dev,
  1456. "Failed to request irq for MSIX, %s, "
  1457. "error %d\n",
  1458. adapter->tx_queue[i].name, err);
  1459. return err;
  1460. }
  1461. /* Handle the case where only 1 MSIx was allocated for
  1462. * all tx queues */
  1463. if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
  1464. for (; i < adapter->num_tx_queues; i++)
  1465. adapter->tx_queue[i].comp_ring.intr_idx
  1466. = vector;
  1467. vector++;
  1468. break;
  1469. } else {
  1470. adapter->tx_queue[i].comp_ring.intr_idx
  1471. = vector++;
  1472. }
  1473. }
  1474. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
  1475. vector = 0;
  1476. for (i = 0; i < adapter->num_rx_queues; i++) {
  1477. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
  1478. sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
  1479. adapter->netdev->name, vector);
  1480. else
  1481. sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
  1482. adapter->netdev->name, vector);
  1483. err = request_irq(intr->msix_entries[vector].vector,
  1484. vmxnet3_msix_rx, 0,
  1485. adapter->rx_queue[i].name,
  1486. &(adapter->rx_queue[i]));
  1487. if (err) {
  1488. printk(KERN_ERR "Failed to request irq for MSIX"
  1489. ", %s, error %d\n",
  1490. adapter->rx_queue[i].name, err);
  1491. return err;
  1492. }
  1493. adapter->rx_queue[i].comp_ring.intr_idx = vector++;
  1494. }
  1495. sprintf(intr->event_msi_vector_name, "%s-event-%d",
  1496. adapter->netdev->name, vector);
  1497. err = request_irq(intr->msix_entries[vector].vector,
  1498. vmxnet3_msix_event, 0,
  1499. intr->event_msi_vector_name, adapter->netdev);
  1500. intr->event_intr_idx = vector;
  1501. } else if (intr->type == VMXNET3_IT_MSI) {
  1502. adapter->num_rx_queues = 1;
  1503. err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
  1504. adapter->netdev->name, adapter->netdev);
  1505. } else {
  1506. #endif
  1507. adapter->num_rx_queues = 1;
  1508. err = request_irq(adapter->pdev->irq, vmxnet3_intr,
  1509. IRQF_SHARED, adapter->netdev->name,
  1510. adapter->netdev);
  1511. #ifdef CONFIG_PCI_MSI
  1512. }
  1513. #endif
  1514. intr->num_intrs = vector + 1;
  1515. if (err) {
  1516. printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
  1517. ":%d\n", adapter->netdev->name, intr->type, err);
  1518. } else {
  1519. /* Number of rx queues will not change after this */
  1520. for (i = 0; i < adapter->num_rx_queues; i++) {
  1521. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  1522. rq->qid = i;
  1523. rq->qid2 = i + adapter->num_rx_queues;
  1524. }
  1525. /* init our intr settings */
  1526. for (i = 0; i < intr->num_intrs; i++)
  1527. intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
  1528. if (adapter->intr.type != VMXNET3_IT_MSIX) {
  1529. adapter->intr.event_intr_idx = 0;
  1530. for (i = 0; i < adapter->num_tx_queues; i++)
  1531. adapter->tx_queue[i].comp_ring.intr_idx = 0;
  1532. adapter->rx_queue[0].comp_ring.intr_idx = 0;
  1533. }
  1534. printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
  1535. "allocated\n", adapter->netdev->name, intr->type,
  1536. intr->mask_mode, intr->num_intrs);
  1537. }
  1538. return err;
  1539. }
  1540. static void
  1541. vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
  1542. {
  1543. struct vmxnet3_intr *intr = &adapter->intr;
  1544. BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
  1545. switch (intr->type) {
  1546. #ifdef CONFIG_PCI_MSI
  1547. case VMXNET3_IT_MSIX:
  1548. {
  1549. int i, vector = 0;
  1550. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
  1551. for (i = 0; i < adapter->num_tx_queues; i++) {
  1552. free_irq(intr->msix_entries[vector++].vector,
  1553. &(adapter->tx_queue[i]));
  1554. if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
  1555. break;
  1556. }
  1557. }
  1558. for (i = 0; i < adapter->num_rx_queues; i++) {
  1559. free_irq(intr->msix_entries[vector++].vector,
  1560. &(adapter->rx_queue[i]));
  1561. }
  1562. free_irq(intr->msix_entries[vector].vector,
  1563. adapter->netdev);
  1564. BUG_ON(vector >= intr->num_intrs);
  1565. break;
  1566. }
  1567. #endif
  1568. case VMXNET3_IT_MSI:
  1569. free_irq(adapter->pdev->irq, adapter->netdev);
  1570. break;
  1571. case VMXNET3_IT_INTX:
  1572. free_irq(adapter->pdev->irq, adapter->netdev);
  1573. break;
  1574. default:
  1575. BUG_ON(true);
  1576. }
  1577. }
  1578. static void
  1579. vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
  1580. {
  1581. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1582. u16 vid;
  1583. /* allow untagged pkts */
  1584. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1585. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  1586. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1587. }
  1588. static int
  1589. vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  1590. {
  1591. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1592. if (!(netdev->flags & IFF_PROMISC)) {
  1593. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1594. unsigned long flags;
  1595. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1596. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1597. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1598. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1599. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1600. }
  1601. set_bit(vid, adapter->active_vlans);
  1602. return 0;
  1603. }
  1604. static int
  1605. vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  1606. {
  1607. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1608. if (!(netdev->flags & IFF_PROMISC)) {
  1609. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1610. unsigned long flags;
  1611. VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
  1612. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1613. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1614. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1615. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1616. }
  1617. clear_bit(vid, adapter->active_vlans);
  1618. return 0;
  1619. }
  1620. static u8 *
  1621. vmxnet3_copy_mc(struct net_device *netdev)
  1622. {
  1623. u8 *buf = NULL;
  1624. u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
  1625. /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
  1626. if (sz <= 0xffff) {
  1627. /* We may be called with BH disabled */
  1628. buf = kmalloc(sz, GFP_ATOMIC);
  1629. if (buf) {
  1630. struct netdev_hw_addr *ha;
  1631. int i = 0;
  1632. netdev_for_each_mc_addr(ha, netdev)
  1633. memcpy(buf + i++ * ETH_ALEN, ha->addr,
  1634. ETH_ALEN);
  1635. }
  1636. }
  1637. return buf;
  1638. }
  1639. static void
  1640. vmxnet3_set_mc(struct net_device *netdev)
  1641. {
  1642. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1643. unsigned long flags;
  1644. struct Vmxnet3_RxFilterConf *rxConf =
  1645. &adapter->shared->devRead.rxFilterConf;
  1646. u8 *new_table = NULL;
  1647. u32 new_mode = VMXNET3_RXM_UCAST;
  1648. if (netdev->flags & IFF_PROMISC) {
  1649. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1650. memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
  1651. new_mode |= VMXNET3_RXM_PROMISC;
  1652. } else {
  1653. vmxnet3_restore_vlan(adapter);
  1654. }
  1655. if (netdev->flags & IFF_BROADCAST)
  1656. new_mode |= VMXNET3_RXM_BCAST;
  1657. if (netdev->flags & IFF_ALLMULTI)
  1658. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1659. else
  1660. if (!netdev_mc_empty(netdev)) {
  1661. new_table = vmxnet3_copy_mc(netdev);
  1662. if (new_table) {
  1663. new_mode |= VMXNET3_RXM_MCAST;
  1664. rxConf->mfTableLen = cpu_to_le16(
  1665. netdev_mc_count(netdev) * ETH_ALEN);
  1666. rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
  1667. new_table));
  1668. } else {
  1669. printk(KERN_INFO "%s: failed to copy mcast list"
  1670. ", setting ALL_MULTI\n", netdev->name);
  1671. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1672. }
  1673. }
  1674. if (!(new_mode & VMXNET3_RXM_MCAST)) {
  1675. rxConf->mfTableLen = 0;
  1676. rxConf->mfTablePA = 0;
  1677. }
  1678. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1679. if (new_mode != rxConf->rxMode) {
  1680. rxConf->rxMode = cpu_to_le32(new_mode);
  1681. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1682. VMXNET3_CMD_UPDATE_RX_MODE);
  1683. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1684. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1685. }
  1686. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1687. VMXNET3_CMD_UPDATE_MAC_FILTERS);
  1688. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1689. kfree(new_table);
  1690. }
  1691. void
  1692. vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
  1693. {
  1694. int i;
  1695. for (i = 0; i < adapter->num_rx_queues; i++)
  1696. vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
  1697. }
  1698. /*
  1699. * Set up driver_shared based on settings in adapter.
  1700. */
  1701. static void
  1702. vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
  1703. {
  1704. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1705. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1706. struct Vmxnet3_TxQueueConf *tqc;
  1707. struct Vmxnet3_RxQueueConf *rqc;
  1708. int i;
  1709. memset(shared, 0, sizeof(*shared));
  1710. /* driver settings */
  1711. shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
  1712. devRead->misc.driverInfo.version = cpu_to_le32(
  1713. VMXNET3_DRIVER_VERSION_NUM);
  1714. devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
  1715. VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
  1716. devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
  1717. *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
  1718. *((u32 *)&devRead->misc.driverInfo.gos));
  1719. devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
  1720. devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
  1721. devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
  1722. devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
  1723. /* set up feature flags */
  1724. if (adapter->netdev->features & NETIF_F_RXCSUM)
  1725. devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
  1726. if (adapter->netdev->features & NETIF_F_LRO) {
  1727. devRead->misc.uptFeatures |= UPT1_F_LRO;
  1728. devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
  1729. }
  1730. if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
  1731. devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
  1732. devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
  1733. devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
  1734. devRead->misc.queueDescLen = cpu_to_le32(
  1735. adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
  1736. adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
  1737. /* tx queue settings */
  1738. devRead->misc.numTxQueues = adapter->num_tx_queues;
  1739. for (i = 0; i < adapter->num_tx_queues; i++) {
  1740. struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
  1741. BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
  1742. tqc = &adapter->tqd_start[i].conf;
  1743. tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
  1744. tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
  1745. tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
  1746. tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
  1747. tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
  1748. tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
  1749. tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
  1750. tqc->ddLen = cpu_to_le32(
  1751. sizeof(struct vmxnet3_tx_buf_info) *
  1752. tqc->txRingSize);
  1753. tqc->intrIdx = tq->comp_ring.intr_idx;
  1754. }
  1755. /* rx queue settings */
  1756. devRead->misc.numRxQueues = adapter->num_rx_queues;
  1757. for (i = 0; i < adapter->num_rx_queues; i++) {
  1758. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  1759. rqc = &adapter->rqd_start[i].conf;
  1760. rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
  1761. rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
  1762. rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
  1763. rqc->ddPA = cpu_to_le64(virt_to_phys(
  1764. rq->buf_info));
  1765. rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
  1766. rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
  1767. rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
  1768. rqc->ddLen = cpu_to_le32(
  1769. sizeof(struct vmxnet3_rx_buf_info) *
  1770. (rqc->rxRingSize[0] +
  1771. rqc->rxRingSize[1]));
  1772. rqc->intrIdx = rq->comp_ring.intr_idx;
  1773. }
  1774. #ifdef VMXNET3_RSS
  1775. memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
  1776. if (adapter->rss) {
  1777. struct UPT1_RSSConf *rssConf = adapter->rss_conf;
  1778. devRead->misc.uptFeatures |= UPT1_F_RSS;
  1779. devRead->misc.numRxQueues = adapter->num_rx_queues;
  1780. rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
  1781. UPT1_RSS_HASH_TYPE_IPV4 |
  1782. UPT1_RSS_HASH_TYPE_TCP_IPV6 |
  1783. UPT1_RSS_HASH_TYPE_IPV6;
  1784. rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
  1785. rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
  1786. rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
  1787. get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
  1788. for (i = 0; i < rssConf->indTableSize; i++)
  1789. rssConf->indTable[i] = ethtool_rxfh_indir_default(
  1790. i, adapter->num_rx_queues);
  1791. devRead->rssConfDesc.confVer = 1;
  1792. devRead->rssConfDesc.confLen = sizeof(*rssConf);
  1793. devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
  1794. }
  1795. #endif /* VMXNET3_RSS */
  1796. /* intr settings */
  1797. devRead->intrConf.autoMask = adapter->intr.mask_mode ==
  1798. VMXNET3_IMM_AUTO;
  1799. devRead->intrConf.numIntrs = adapter->intr.num_intrs;
  1800. for (i = 0; i < adapter->intr.num_intrs; i++)
  1801. devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
  1802. devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
  1803. devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  1804. /* rx filter settings */
  1805. devRead->rxFilterConf.rxMode = 0;
  1806. vmxnet3_restore_vlan(adapter);
  1807. vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
  1808. /* the rest are already zeroed */
  1809. }
  1810. int
  1811. vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
  1812. {
  1813. int err, i;
  1814. u32 ret;
  1815. unsigned long flags;
  1816. dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
  1817. " ring sizes %u %u %u\n", adapter->netdev->name,
  1818. adapter->skb_buf_size, adapter->rx_buf_per_pkt,
  1819. adapter->tx_queue[0].tx_ring.size,
  1820. adapter->rx_queue[0].rx_ring[0].size,
  1821. adapter->rx_queue[0].rx_ring[1].size);
  1822. vmxnet3_tq_init_all(adapter);
  1823. err = vmxnet3_rq_init_all(adapter);
  1824. if (err) {
  1825. printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
  1826. adapter->netdev->name, err);
  1827. goto rq_err;
  1828. }
  1829. err = vmxnet3_request_irqs(adapter);
  1830. if (err) {
  1831. printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
  1832. adapter->netdev->name, err);
  1833. goto irq_err;
  1834. }
  1835. vmxnet3_setup_driver_shared(adapter);
  1836. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
  1837. adapter->shared_pa));
  1838. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
  1839. adapter->shared_pa));
  1840. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1841. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1842. VMXNET3_CMD_ACTIVATE_DEV);
  1843. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1844. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1845. if (ret != 0) {
  1846. printk(KERN_ERR "Failed to activate dev %s: error %u\n",
  1847. adapter->netdev->name, ret);
  1848. err = -EINVAL;
  1849. goto activate_err;
  1850. }
  1851. for (i = 0; i < adapter->num_rx_queues; i++) {
  1852. VMXNET3_WRITE_BAR0_REG(adapter,
  1853. VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
  1854. adapter->rx_queue[i].rx_ring[0].next2fill);
  1855. VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
  1856. (i * VMXNET3_REG_ALIGN)),
  1857. adapter->rx_queue[i].rx_ring[1].next2fill);
  1858. }
  1859. /* Apply the rx filter settins last. */
  1860. vmxnet3_set_mc(adapter->netdev);
  1861. /*
  1862. * Check link state when first activating device. It will start the
  1863. * tx queue if the link is up.
  1864. */
  1865. vmxnet3_check_link(adapter, true);
  1866. for (i = 0; i < adapter->num_rx_queues; i++)
  1867. napi_enable(&adapter->rx_queue[i].napi);
  1868. vmxnet3_enable_all_intrs(adapter);
  1869. clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  1870. return 0;
  1871. activate_err:
  1872. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
  1873. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
  1874. vmxnet3_free_irqs(adapter);
  1875. irq_err:
  1876. rq_err:
  1877. /* free up buffers we allocated */
  1878. vmxnet3_rq_cleanup_all(adapter);
  1879. return err;
  1880. }
  1881. void
  1882. vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
  1883. {
  1884. unsigned long flags;
  1885. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1886. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
  1887. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1888. }
  1889. int
  1890. vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
  1891. {
  1892. int i;
  1893. unsigned long flags;
  1894. if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
  1895. return 0;
  1896. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1897. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1898. VMXNET3_CMD_QUIESCE_DEV);
  1899. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1900. vmxnet3_disable_all_intrs(adapter);
  1901. for (i = 0; i < adapter->num_rx_queues; i++)
  1902. napi_disable(&adapter->rx_queue[i].napi);
  1903. netif_tx_disable(adapter->netdev);
  1904. adapter->link_speed = 0;
  1905. netif_carrier_off(adapter->netdev);
  1906. vmxnet3_tq_cleanup_all(adapter);
  1907. vmxnet3_rq_cleanup_all(adapter);
  1908. vmxnet3_free_irqs(adapter);
  1909. return 0;
  1910. }
  1911. static void
  1912. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  1913. {
  1914. u32 tmp;
  1915. tmp = *(u32 *)mac;
  1916. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
  1917. tmp = (mac[5] << 8) | mac[4];
  1918. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
  1919. }
  1920. static int
  1921. vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
  1922. {
  1923. struct sockaddr *addr = p;
  1924. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1925. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1926. vmxnet3_write_mac_addr(adapter, addr->sa_data);
  1927. return 0;
  1928. }
  1929. /* ==================== initialization and cleanup routines ============ */
  1930. static int
  1931. vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
  1932. {
  1933. int err;
  1934. unsigned long mmio_start, mmio_len;
  1935. struct pci_dev *pdev = adapter->pdev;
  1936. err = pci_enable_device(pdev);
  1937. if (err) {
  1938. printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
  1939. pci_name(pdev), err);
  1940. return err;
  1941. }
  1942. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
  1943. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
  1944. printk(KERN_ERR "pci_set_consistent_dma_mask failed "
  1945. "for adapter %s\n", pci_name(pdev));
  1946. err = -EIO;
  1947. goto err_set_mask;
  1948. }
  1949. *dma64 = true;
  1950. } else {
  1951. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
  1952. printk(KERN_ERR "pci_set_dma_mask failed for adapter "
  1953. "%s\n", pci_name(pdev));
  1954. err = -EIO;
  1955. goto err_set_mask;
  1956. }
  1957. *dma64 = false;
  1958. }
  1959. err = pci_request_selected_regions(pdev, (1 << 2) - 1,
  1960. vmxnet3_driver_name);
  1961. if (err) {
  1962. printk(KERN_ERR "Failed to request region for adapter %s: "
  1963. "error %d\n", pci_name(pdev), err);
  1964. goto err_set_mask;
  1965. }
  1966. pci_set_master(pdev);
  1967. mmio_start = pci_resource_start(pdev, 0);
  1968. mmio_len = pci_resource_len(pdev, 0);
  1969. adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
  1970. if (!adapter->hw_addr0) {
  1971. printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
  1972. pci_name(pdev));
  1973. err = -EIO;
  1974. goto err_ioremap;
  1975. }
  1976. mmio_start = pci_resource_start(pdev, 1);
  1977. mmio_len = pci_resource_len(pdev, 1);
  1978. adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
  1979. if (!adapter->hw_addr1) {
  1980. printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
  1981. pci_name(pdev));
  1982. err = -EIO;
  1983. goto err_bar1;
  1984. }
  1985. return 0;
  1986. err_bar1:
  1987. iounmap(adapter->hw_addr0);
  1988. err_ioremap:
  1989. pci_release_selected_regions(pdev, (1 << 2) - 1);
  1990. err_set_mask:
  1991. pci_disable_device(pdev);
  1992. return err;
  1993. }
  1994. static void
  1995. vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
  1996. {
  1997. BUG_ON(!adapter->pdev);
  1998. iounmap(adapter->hw_addr0);
  1999. iounmap(adapter->hw_addr1);
  2000. pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
  2001. pci_disable_device(adapter->pdev);
  2002. }
  2003. static void
  2004. vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
  2005. {
  2006. size_t sz, i, ring0_size, ring1_size, comp_size;
  2007. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
  2008. if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
  2009. VMXNET3_MAX_ETH_HDR_SIZE) {
  2010. adapter->skb_buf_size = adapter->netdev->mtu +
  2011. VMXNET3_MAX_ETH_HDR_SIZE;
  2012. if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
  2013. adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
  2014. adapter->rx_buf_per_pkt = 1;
  2015. } else {
  2016. adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
  2017. sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
  2018. VMXNET3_MAX_ETH_HDR_SIZE;
  2019. adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
  2020. }
  2021. /*
  2022. * for simplicity, force the ring0 size to be a multiple of
  2023. * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
  2024. */
  2025. sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
  2026. ring0_size = adapter->rx_queue[0].rx_ring[0].size;
  2027. ring0_size = (ring0_size + sz - 1) / sz * sz;
  2028. ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
  2029. sz * sz);
  2030. ring1_size = adapter->rx_queue[0].rx_ring[1].size;
  2031. comp_size = ring0_size + ring1_size;
  2032. for (i = 0; i < adapter->num_rx_queues; i++) {
  2033. rq = &adapter->rx_queue[i];
  2034. rq->rx_ring[0].size = ring0_size;
  2035. rq->rx_ring[1].size = ring1_size;
  2036. rq->comp_ring.size = comp_size;
  2037. }
  2038. }
  2039. int
  2040. vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
  2041. u32 rx_ring_size, u32 rx_ring2_size)
  2042. {
  2043. int err = 0, i;
  2044. for (i = 0; i < adapter->num_tx_queues; i++) {
  2045. struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
  2046. tq->tx_ring.size = tx_ring_size;
  2047. tq->data_ring.size = tx_ring_size;
  2048. tq->comp_ring.size = tx_ring_size;
  2049. tq->shared = &adapter->tqd_start[i].ctrl;
  2050. tq->stopped = true;
  2051. tq->adapter = adapter;
  2052. tq->qid = i;
  2053. err = vmxnet3_tq_create(tq, adapter);
  2054. /*
  2055. * Too late to change num_tx_queues. We cannot do away with
  2056. * lesser number of queues than what we asked for
  2057. */
  2058. if (err)
  2059. goto queue_err;
  2060. }
  2061. adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
  2062. adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
  2063. vmxnet3_adjust_rx_ring_size(adapter);
  2064. for (i = 0; i < adapter->num_rx_queues; i++) {
  2065. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  2066. /* qid and qid2 for rx queues will be assigned later when num
  2067. * of rx queues is finalized after allocating intrs */
  2068. rq->shared = &adapter->rqd_start[i].ctrl;
  2069. rq->adapter = adapter;
  2070. err = vmxnet3_rq_create(rq, adapter);
  2071. if (err) {
  2072. if (i == 0) {
  2073. printk(KERN_ERR "Could not allocate any rx"
  2074. "queues. Aborting.\n");
  2075. goto queue_err;
  2076. } else {
  2077. printk(KERN_INFO "Number of rx queues changed "
  2078. "to : %d.\n", i);
  2079. adapter->num_rx_queues = i;
  2080. err = 0;
  2081. break;
  2082. }
  2083. }
  2084. }
  2085. return err;
  2086. queue_err:
  2087. vmxnet3_tq_destroy_all(adapter);
  2088. return err;
  2089. }
  2090. static int
  2091. vmxnet3_open(struct net_device *netdev)
  2092. {
  2093. struct vmxnet3_adapter *adapter;
  2094. int err, i;
  2095. adapter = netdev_priv(netdev);
  2096. for (i = 0; i < adapter->num_tx_queues; i++)
  2097. spin_lock_init(&adapter->tx_queue[i].tx_lock);
  2098. err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
  2099. VMXNET3_DEF_RX_RING_SIZE,
  2100. VMXNET3_DEF_RX_RING_SIZE);
  2101. if (err)
  2102. goto queue_err;
  2103. err = vmxnet3_activate_dev(adapter);
  2104. if (err)
  2105. goto activate_err;
  2106. return 0;
  2107. activate_err:
  2108. vmxnet3_rq_destroy_all(adapter);
  2109. vmxnet3_tq_destroy_all(adapter);
  2110. queue_err:
  2111. return err;
  2112. }
  2113. static int
  2114. vmxnet3_close(struct net_device *netdev)
  2115. {
  2116. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2117. /*
  2118. * Reset_work may be in the middle of resetting the device, wait for its
  2119. * completion.
  2120. */
  2121. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2122. msleep(1);
  2123. vmxnet3_quiesce_dev(adapter);
  2124. vmxnet3_rq_destroy_all(adapter);
  2125. vmxnet3_tq_destroy_all(adapter);
  2126. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2127. return 0;
  2128. }
  2129. void
  2130. vmxnet3_force_close(struct vmxnet3_adapter *adapter)
  2131. {
  2132. int i;
  2133. /*
  2134. * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
  2135. * vmxnet3_close() will deadlock.
  2136. */
  2137. BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
  2138. /* we need to enable NAPI, otherwise dev_close will deadlock */
  2139. for (i = 0; i < adapter->num_rx_queues; i++)
  2140. napi_enable(&adapter->rx_queue[i].napi);
  2141. dev_close(adapter->netdev);
  2142. }
  2143. static int
  2144. vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
  2145. {
  2146. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2147. int err = 0;
  2148. if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
  2149. return -EINVAL;
  2150. netdev->mtu = new_mtu;
  2151. /*
  2152. * Reset_work may be in the middle of resetting the device, wait for its
  2153. * completion.
  2154. */
  2155. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2156. msleep(1);
  2157. if (netif_running(netdev)) {
  2158. vmxnet3_quiesce_dev(adapter);
  2159. vmxnet3_reset_dev(adapter);
  2160. /* we need to re-create the rx queue based on the new mtu */
  2161. vmxnet3_rq_destroy_all(adapter);
  2162. vmxnet3_adjust_rx_ring_size(adapter);
  2163. err = vmxnet3_rq_create_all(adapter);
  2164. if (err) {
  2165. printk(KERN_ERR "%s: failed to re-create rx queues,"
  2166. " error %d. Closing it.\n", netdev->name, err);
  2167. goto out;
  2168. }
  2169. err = vmxnet3_activate_dev(adapter);
  2170. if (err) {
  2171. printk(KERN_ERR "%s: failed to re-activate, error %d. "
  2172. "Closing it\n", netdev->name, err);
  2173. goto out;
  2174. }
  2175. }
  2176. out:
  2177. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2178. if (err)
  2179. vmxnet3_force_close(adapter);
  2180. return err;
  2181. }
  2182. static void
  2183. vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
  2184. {
  2185. struct net_device *netdev = adapter->netdev;
  2186. netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
  2187. NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
  2188. NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
  2189. NETIF_F_LRO;
  2190. if (dma64)
  2191. netdev->hw_features |= NETIF_F_HIGHDMA;
  2192. netdev->vlan_features = netdev->hw_features &
  2193. ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
  2194. netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
  2195. netdev_info(adapter->netdev,
  2196. "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
  2197. dma64 ? " highDMA" : "");
  2198. }
  2199. static void
  2200. vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  2201. {
  2202. u32 tmp;
  2203. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
  2204. *(u32 *)mac = tmp;
  2205. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
  2206. mac[4] = tmp & 0xff;
  2207. mac[5] = (tmp >> 8) & 0xff;
  2208. }
  2209. #ifdef CONFIG_PCI_MSI
  2210. /*
  2211. * Enable MSIx vectors.
  2212. * Returns :
  2213. * 0 on successful enabling of required vectors,
  2214. * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
  2215. * could be enabled.
  2216. * number of vectors which can be enabled otherwise (this number is smaller
  2217. * than VMXNET3_LINUX_MIN_MSIX_VECT)
  2218. */
  2219. static int
  2220. vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
  2221. int vectors)
  2222. {
  2223. int err = 0, vector_threshold;
  2224. vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
  2225. while (vectors >= vector_threshold) {
  2226. err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
  2227. vectors);
  2228. if (!err) {
  2229. adapter->intr.num_intrs = vectors;
  2230. return 0;
  2231. } else if (err < 0) {
  2232. netdev_err(adapter->netdev,
  2233. "Failed to enable MSI-X, error: %d\n", err);
  2234. vectors = 0;
  2235. } else if (err < vector_threshold) {
  2236. break;
  2237. } else {
  2238. /* If fails to enable required number of MSI-x vectors
  2239. * try enabling minimum number of vectors required.
  2240. */
  2241. netdev_err(adapter->netdev,
  2242. "Failed to enable %d MSI-X, trying %d instead\n",
  2243. vectors, vector_threshold);
  2244. vectors = vector_threshold;
  2245. }
  2246. }
  2247. netdev_info(adapter->netdev,
  2248. "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
  2249. return err;
  2250. }
  2251. #endif /* CONFIG_PCI_MSI */
  2252. static void
  2253. vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
  2254. {
  2255. u32 cfg;
  2256. unsigned long flags;
  2257. /* intr settings */
  2258. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2259. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2260. VMXNET3_CMD_GET_CONF_INTR);
  2261. cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  2262. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2263. adapter->intr.type = cfg & 0x3;
  2264. adapter->intr.mask_mode = (cfg >> 2) & 0x3;
  2265. if (adapter->intr.type == VMXNET3_IT_AUTO) {
  2266. adapter->intr.type = VMXNET3_IT_MSIX;
  2267. }
  2268. #ifdef CONFIG_PCI_MSI
  2269. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  2270. int vector, err = 0;
  2271. adapter->intr.num_intrs = (adapter->share_intr ==
  2272. VMXNET3_INTR_TXSHARE) ? 1 :
  2273. adapter->num_tx_queues;
  2274. adapter->intr.num_intrs += (adapter->share_intr ==
  2275. VMXNET3_INTR_BUDDYSHARE) ? 0 :
  2276. adapter->num_rx_queues;
  2277. adapter->intr.num_intrs += 1; /* for link event */
  2278. adapter->intr.num_intrs = (adapter->intr.num_intrs >
  2279. VMXNET3_LINUX_MIN_MSIX_VECT
  2280. ? adapter->intr.num_intrs :
  2281. VMXNET3_LINUX_MIN_MSIX_VECT);
  2282. for (vector = 0; vector < adapter->intr.num_intrs; vector++)
  2283. adapter->intr.msix_entries[vector].entry = vector;
  2284. err = vmxnet3_acquire_msix_vectors(adapter,
  2285. adapter->intr.num_intrs);
  2286. /* If we cannot allocate one MSIx vector per queue
  2287. * then limit the number of rx queues to 1
  2288. */
  2289. if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
  2290. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
  2291. || adapter->num_rx_queues != 1) {
  2292. adapter->share_intr = VMXNET3_INTR_TXSHARE;
  2293. printk(KERN_ERR "Number of rx queues : 1\n");
  2294. adapter->num_rx_queues = 1;
  2295. adapter->intr.num_intrs =
  2296. VMXNET3_LINUX_MIN_MSIX_VECT;
  2297. }
  2298. return;
  2299. }
  2300. if (!err)
  2301. return;
  2302. /* If we cannot allocate MSIx vectors use only one rx queue */
  2303. netdev_info(adapter->netdev,
  2304. "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
  2305. err);
  2306. adapter->intr.type = VMXNET3_IT_MSI;
  2307. }
  2308. if (adapter->intr.type == VMXNET3_IT_MSI) {
  2309. int err;
  2310. err = pci_enable_msi(adapter->pdev);
  2311. if (!err) {
  2312. adapter->num_rx_queues = 1;
  2313. adapter->intr.num_intrs = 1;
  2314. return;
  2315. }
  2316. }
  2317. #endif /* CONFIG_PCI_MSI */
  2318. adapter->num_rx_queues = 1;
  2319. printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
  2320. adapter->intr.type = VMXNET3_IT_INTX;
  2321. /* INT-X related setting */
  2322. adapter->intr.num_intrs = 1;
  2323. }
  2324. static void
  2325. vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
  2326. {
  2327. if (adapter->intr.type == VMXNET3_IT_MSIX)
  2328. pci_disable_msix(adapter->pdev);
  2329. else if (adapter->intr.type == VMXNET3_IT_MSI)
  2330. pci_disable_msi(adapter->pdev);
  2331. else
  2332. BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
  2333. }
  2334. static void
  2335. vmxnet3_tx_timeout(struct net_device *netdev)
  2336. {
  2337. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2338. adapter->tx_timeout_count++;
  2339. printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
  2340. schedule_work(&adapter->work);
  2341. netif_wake_queue(adapter->netdev);
  2342. }
  2343. static void
  2344. vmxnet3_reset_work(struct work_struct *data)
  2345. {
  2346. struct vmxnet3_adapter *adapter;
  2347. adapter = container_of(data, struct vmxnet3_adapter, work);
  2348. /* if another thread is resetting the device, no need to proceed */
  2349. if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2350. return;
  2351. /* if the device is closed, we must leave it alone */
  2352. rtnl_lock();
  2353. if (netif_running(adapter->netdev)) {
  2354. printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
  2355. vmxnet3_quiesce_dev(adapter);
  2356. vmxnet3_reset_dev(adapter);
  2357. vmxnet3_activate_dev(adapter);
  2358. } else {
  2359. printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
  2360. }
  2361. rtnl_unlock();
  2362. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2363. }
  2364. static int __devinit
  2365. vmxnet3_probe_device(struct pci_dev *pdev,
  2366. const struct pci_device_id *id)
  2367. {
  2368. static const struct net_device_ops vmxnet3_netdev_ops = {
  2369. .ndo_open = vmxnet3_open,
  2370. .ndo_stop = vmxnet3_close,
  2371. .ndo_start_xmit = vmxnet3_xmit_frame,
  2372. .ndo_set_mac_address = vmxnet3_set_mac_addr,
  2373. .ndo_change_mtu = vmxnet3_change_mtu,
  2374. .ndo_set_features = vmxnet3_set_features,
  2375. .ndo_get_stats64 = vmxnet3_get_stats64,
  2376. .ndo_tx_timeout = vmxnet3_tx_timeout,
  2377. .ndo_set_rx_mode = vmxnet3_set_mc,
  2378. .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
  2379. .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
  2380. #ifdef CONFIG_NET_POLL_CONTROLLER
  2381. .ndo_poll_controller = vmxnet3_netpoll,
  2382. #endif
  2383. };
  2384. int err;
  2385. bool dma64 = false; /* stupid gcc */
  2386. u32 ver;
  2387. struct net_device *netdev;
  2388. struct vmxnet3_adapter *adapter;
  2389. u8 mac[ETH_ALEN];
  2390. int size;
  2391. int num_tx_queues;
  2392. int num_rx_queues;
  2393. if (!pci_msi_enabled())
  2394. enable_mq = 0;
  2395. #ifdef VMXNET3_RSS
  2396. if (enable_mq)
  2397. num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
  2398. (int)num_online_cpus());
  2399. else
  2400. #endif
  2401. num_rx_queues = 1;
  2402. num_rx_queues = rounddown_pow_of_two(num_rx_queues);
  2403. if (enable_mq)
  2404. num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
  2405. (int)num_online_cpus());
  2406. else
  2407. num_tx_queues = 1;
  2408. num_tx_queues = rounddown_pow_of_two(num_tx_queues);
  2409. netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
  2410. max(num_tx_queues, num_rx_queues));
  2411. printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
  2412. num_tx_queues, num_rx_queues);
  2413. if (!netdev)
  2414. return -ENOMEM;
  2415. pci_set_drvdata(pdev, netdev);
  2416. adapter = netdev_priv(netdev);
  2417. adapter->netdev = netdev;
  2418. adapter->pdev = pdev;
  2419. spin_lock_init(&adapter->cmd_lock);
  2420. adapter->shared = pci_alloc_consistent(adapter->pdev,
  2421. sizeof(struct Vmxnet3_DriverShared),
  2422. &adapter->shared_pa);
  2423. if (!adapter->shared) {
  2424. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2425. pci_name(pdev));
  2426. err = -ENOMEM;
  2427. goto err_alloc_shared;
  2428. }
  2429. adapter->num_rx_queues = num_rx_queues;
  2430. adapter->num_tx_queues = num_tx_queues;
  2431. size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
  2432. size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
  2433. adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
  2434. &adapter->queue_desc_pa);
  2435. if (!adapter->tqd_start) {
  2436. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2437. pci_name(pdev));
  2438. err = -ENOMEM;
  2439. goto err_alloc_queue_desc;
  2440. }
  2441. adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
  2442. adapter->num_tx_queues);
  2443. adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
  2444. if (adapter->pm_conf == NULL) {
  2445. err = -ENOMEM;
  2446. goto err_alloc_pm;
  2447. }
  2448. #ifdef VMXNET3_RSS
  2449. adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
  2450. if (adapter->rss_conf == NULL) {
  2451. err = -ENOMEM;
  2452. goto err_alloc_rss;
  2453. }
  2454. #endif /* VMXNET3_RSS */
  2455. err = vmxnet3_alloc_pci_resources(adapter, &dma64);
  2456. if (err < 0)
  2457. goto err_alloc_pci;
  2458. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
  2459. if (ver & 1) {
  2460. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
  2461. } else {
  2462. printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
  2463. " %s\n", ver, pci_name(pdev));
  2464. err = -EBUSY;
  2465. goto err_ver;
  2466. }
  2467. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
  2468. if (ver & 1) {
  2469. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
  2470. } else {
  2471. printk(KERN_ERR "Incompatible upt version (0x%x) for "
  2472. "adapter %s\n", ver, pci_name(pdev));
  2473. err = -EBUSY;
  2474. goto err_ver;
  2475. }
  2476. SET_NETDEV_DEV(netdev, &pdev->dev);
  2477. vmxnet3_declare_features(adapter, dma64);
  2478. adapter->dev_number = atomic_read(&devices_found);
  2479. adapter->share_intr = irq_share_mode;
  2480. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
  2481. adapter->num_tx_queues != adapter->num_rx_queues)
  2482. adapter->share_intr = VMXNET3_INTR_DONTSHARE;
  2483. vmxnet3_alloc_intr_resources(adapter);
  2484. #ifdef VMXNET3_RSS
  2485. if (adapter->num_rx_queues > 1 &&
  2486. adapter->intr.type == VMXNET3_IT_MSIX) {
  2487. adapter->rss = true;
  2488. printk(KERN_INFO "RSS is enabled.\n");
  2489. } else {
  2490. adapter->rss = false;
  2491. }
  2492. #endif
  2493. vmxnet3_read_mac_addr(adapter, mac);
  2494. memcpy(netdev->dev_addr, mac, netdev->addr_len);
  2495. netdev->netdev_ops = &vmxnet3_netdev_ops;
  2496. vmxnet3_set_ethtool_ops(netdev);
  2497. netdev->watchdog_timeo = 5 * HZ;
  2498. INIT_WORK(&adapter->work, vmxnet3_reset_work);
  2499. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  2500. int i;
  2501. for (i = 0; i < adapter->num_rx_queues; i++) {
  2502. netif_napi_add(adapter->netdev,
  2503. &adapter->rx_queue[i].napi,
  2504. vmxnet3_poll_rx_only, 64);
  2505. }
  2506. } else {
  2507. netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
  2508. vmxnet3_poll, 64);
  2509. }
  2510. netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
  2511. netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
  2512. err = register_netdev(netdev);
  2513. if (err) {
  2514. printk(KERN_ERR "Failed to register adapter %s\n",
  2515. pci_name(pdev));
  2516. goto err_register;
  2517. }
  2518. set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  2519. vmxnet3_check_link(adapter, false);
  2520. atomic_inc(&devices_found);
  2521. return 0;
  2522. err_register:
  2523. vmxnet3_free_intr_resources(adapter);
  2524. err_ver:
  2525. vmxnet3_free_pci_resources(adapter);
  2526. err_alloc_pci:
  2527. #ifdef VMXNET3_RSS
  2528. kfree(adapter->rss_conf);
  2529. err_alloc_rss:
  2530. #endif
  2531. kfree(adapter->pm_conf);
  2532. err_alloc_pm:
  2533. pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
  2534. adapter->queue_desc_pa);
  2535. err_alloc_queue_desc:
  2536. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2537. adapter->shared, adapter->shared_pa);
  2538. err_alloc_shared:
  2539. pci_set_drvdata(pdev, NULL);
  2540. free_netdev(netdev);
  2541. return err;
  2542. }
  2543. static void __devexit
  2544. vmxnet3_remove_device(struct pci_dev *pdev)
  2545. {
  2546. struct net_device *netdev = pci_get_drvdata(pdev);
  2547. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2548. int size = 0;
  2549. int num_rx_queues;
  2550. #ifdef VMXNET3_RSS
  2551. if (enable_mq)
  2552. num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
  2553. (int)num_online_cpus());
  2554. else
  2555. #endif
  2556. num_rx_queues = 1;
  2557. num_rx_queues = rounddown_pow_of_two(num_rx_queues);
  2558. cancel_work_sync(&adapter->work);
  2559. unregister_netdev(netdev);
  2560. vmxnet3_free_intr_resources(adapter);
  2561. vmxnet3_free_pci_resources(adapter);
  2562. #ifdef VMXNET3_RSS
  2563. kfree(adapter->rss_conf);
  2564. #endif
  2565. kfree(adapter->pm_conf);
  2566. size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
  2567. size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
  2568. pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
  2569. adapter->queue_desc_pa);
  2570. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2571. adapter->shared, adapter->shared_pa);
  2572. free_netdev(netdev);
  2573. }
  2574. #ifdef CONFIG_PM
  2575. static int
  2576. vmxnet3_suspend(struct device *device)
  2577. {
  2578. struct pci_dev *pdev = to_pci_dev(device);
  2579. struct net_device *netdev = pci_get_drvdata(pdev);
  2580. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2581. struct Vmxnet3_PMConf *pmConf;
  2582. struct ethhdr *ehdr;
  2583. struct arphdr *ahdr;
  2584. u8 *arpreq;
  2585. struct in_device *in_dev;
  2586. struct in_ifaddr *ifa;
  2587. unsigned long flags;
  2588. int i = 0;
  2589. if (!netif_running(netdev))
  2590. return 0;
  2591. for (i = 0; i < adapter->num_rx_queues; i++)
  2592. napi_disable(&adapter->rx_queue[i].napi);
  2593. vmxnet3_disable_all_intrs(adapter);
  2594. vmxnet3_free_irqs(adapter);
  2595. vmxnet3_free_intr_resources(adapter);
  2596. netif_device_detach(netdev);
  2597. netif_tx_stop_all_queues(netdev);
  2598. /* Create wake-up filters. */
  2599. pmConf = adapter->pm_conf;
  2600. memset(pmConf, 0, sizeof(*pmConf));
  2601. if (adapter->wol & WAKE_UCAST) {
  2602. pmConf->filters[i].patternSize = ETH_ALEN;
  2603. pmConf->filters[i].maskSize = 1;
  2604. memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
  2605. pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
  2606. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
  2607. i++;
  2608. }
  2609. if (adapter->wol & WAKE_ARP) {
  2610. in_dev = in_dev_get(netdev);
  2611. if (!in_dev)
  2612. goto skip_arp;
  2613. ifa = (struct in_ifaddr *)in_dev->ifa_list;
  2614. if (!ifa)
  2615. goto skip_arp;
  2616. pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
  2617. sizeof(struct arphdr) + /* ARP header */
  2618. 2 * ETH_ALEN + /* 2 Ethernet addresses*/
  2619. 2 * sizeof(u32); /*2 IPv4 addresses */
  2620. pmConf->filters[i].maskSize =
  2621. (pmConf->filters[i].patternSize - 1) / 8 + 1;
  2622. /* ETH_P_ARP in Ethernet header. */
  2623. ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
  2624. ehdr->h_proto = htons(ETH_P_ARP);
  2625. /* ARPOP_REQUEST in ARP header. */
  2626. ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
  2627. ahdr->ar_op = htons(ARPOP_REQUEST);
  2628. arpreq = (u8 *)(ahdr + 1);
  2629. /* The Unicast IPv4 address in 'tip' field. */
  2630. arpreq += 2 * ETH_ALEN + sizeof(u32);
  2631. *(u32 *)arpreq = ifa->ifa_address;
  2632. /* The mask for the relevant bits. */
  2633. pmConf->filters[i].mask[0] = 0x00;
  2634. pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
  2635. pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
  2636. pmConf->filters[i].mask[3] = 0x00;
  2637. pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
  2638. pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
  2639. in_dev_put(in_dev);
  2640. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
  2641. i++;
  2642. }
  2643. skip_arp:
  2644. if (adapter->wol & WAKE_MAGIC)
  2645. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
  2646. pmConf->numFilters = i;
  2647. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2648. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2649. *pmConf));
  2650. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
  2651. pmConf));
  2652. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2653. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2654. VMXNET3_CMD_UPDATE_PMCFG);
  2655. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2656. pci_save_state(pdev);
  2657. pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
  2658. adapter->wol);
  2659. pci_disable_device(pdev);
  2660. pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
  2661. return 0;
  2662. }
  2663. static int
  2664. vmxnet3_resume(struct device *device)
  2665. {
  2666. int err, i = 0;
  2667. unsigned long flags;
  2668. struct pci_dev *pdev = to_pci_dev(device);
  2669. struct net_device *netdev = pci_get_drvdata(pdev);
  2670. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2671. struct Vmxnet3_PMConf *pmConf;
  2672. if (!netif_running(netdev))
  2673. return 0;
  2674. /* Destroy wake-up filters. */
  2675. pmConf = adapter->pm_conf;
  2676. memset(pmConf, 0, sizeof(*pmConf));
  2677. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2678. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2679. *pmConf));
  2680. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
  2681. pmConf));
  2682. netif_device_attach(netdev);
  2683. pci_set_power_state(pdev, PCI_D0);
  2684. pci_restore_state(pdev);
  2685. err = pci_enable_device_mem(pdev);
  2686. if (err != 0)
  2687. return err;
  2688. pci_enable_wake(pdev, PCI_D0, 0);
  2689. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2690. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2691. VMXNET3_CMD_UPDATE_PMCFG);
  2692. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2693. vmxnet3_alloc_intr_resources(adapter);
  2694. vmxnet3_request_irqs(adapter);
  2695. for (i = 0; i < adapter->num_rx_queues; i++)
  2696. napi_enable(&adapter->rx_queue[i].napi);
  2697. vmxnet3_enable_all_intrs(adapter);
  2698. return 0;
  2699. }
  2700. static const struct dev_pm_ops vmxnet3_pm_ops = {
  2701. .suspend = vmxnet3_suspend,
  2702. .resume = vmxnet3_resume,
  2703. };
  2704. #endif
  2705. static struct pci_driver vmxnet3_driver = {
  2706. .name = vmxnet3_driver_name,
  2707. .id_table = vmxnet3_pciid_table,
  2708. .probe = vmxnet3_probe_device,
  2709. .remove = __devexit_p(vmxnet3_remove_device),
  2710. #ifdef CONFIG_PM
  2711. .driver.pm = &vmxnet3_pm_ops,
  2712. #endif
  2713. };
  2714. static int __init
  2715. vmxnet3_init_module(void)
  2716. {
  2717. printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
  2718. VMXNET3_DRIVER_VERSION_REPORT);
  2719. return pci_register_driver(&vmxnet3_driver);
  2720. }
  2721. module_init(vmxnet3_init_module);
  2722. static void
  2723. vmxnet3_exit_module(void)
  2724. {
  2725. pci_unregister_driver(&vmxnet3_driver);
  2726. }
  2727. module_exit(vmxnet3_exit_module);
  2728. MODULE_AUTHOR("VMware, Inc.");
  2729. MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
  2730. MODULE_LICENSE("GPL v2");
  2731. MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);