vmxnet3_drv.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730
  1. /*
  2. * Linux driver for VMware's vmxnet3 ethernet NIC.
  3. *
  4. * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * The full GNU General Public License is included in this distribution in
  21. * the file called "COPYING".
  22. *
  23. * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
  24. *
  25. */
  26. #include <net/ip6_checksum.h>
  27. #include "vmxnet3_int.h"
  28. char vmxnet3_driver_name[] = "vmxnet3";
  29. #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  30. /*
  31. * PCI Device ID Table
  32. * Last entry must be all 0s
  33. */
  34. static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
  35. {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  36. {0}
  37. };
  38. MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  39. static atomic_t devices_found;
  40. /*
  41. * Enable/Disable the given intr
  42. */
  43. static void
  44. vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  45. {
  46. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  47. }
  48. static void
  49. vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  50. {
  51. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  52. }
  53. /*
  54. * Enable/Disable all intrs used by the device
  55. */
  56. static void
  57. vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  58. {
  59. int i;
  60. for (i = 0; i < adapter->intr.num_intrs; i++)
  61. vmxnet3_enable_intr(adapter, i);
  62. adapter->shared->devRead.intrConf.intrCtrl &=
  63. cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
  64. }
  65. static void
  66. vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  67. {
  68. int i;
  69. adapter->shared->devRead.intrConf.intrCtrl |=
  70. cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  71. for (i = 0; i < adapter->intr.num_intrs; i++)
  72. vmxnet3_disable_intr(adapter, i);
  73. }
  74. static void
  75. vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
  76. {
  77. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
  78. }
  79. static bool
  80. vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  81. {
  82. return netif_queue_stopped(adapter->netdev);
  83. }
  84. static void
  85. vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  86. {
  87. tq->stopped = false;
  88. netif_start_queue(adapter->netdev);
  89. }
  90. static void
  91. vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  92. {
  93. tq->stopped = false;
  94. netif_wake_queue(adapter->netdev);
  95. }
  96. static void
  97. vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  98. {
  99. tq->stopped = true;
  100. tq->num_stop++;
  101. netif_stop_queue(adapter->netdev);
  102. }
  103. /*
  104. * Check the link state. This may start or stop the tx queue.
  105. */
  106. static void
  107. vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
  108. {
  109. u32 ret;
  110. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
  111. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  112. adapter->link_speed = ret >> 16;
  113. if (ret & 1) { /* Link is up. */
  114. printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
  115. adapter->netdev->name, adapter->link_speed);
  116. if (!netif_carrier_ok(adapter->netdev))
  117. netif_carrier_on(adapter->netdev);
  118. if (affectTxQueue)
  119. vmxnet3_tq_start(&adapter->tx_queue, adapter);
  120. } else {
  121. printk(KERN_INFO "%s: NIC Link is Down\n",
  122. adapter->netdev->name);
  123. if (netif_carrier_ok(adapter->netdev))
  124. netif_carrier_off(adapter->netdev);
  125. if (affectTxQueue)
  126. vmxnet3_tq_stop(&adapter->tx_queue, adapter);
  127. }
  128. }
  129. static void
  130. vmxnet3_process_events(struct vmxnet3_adapter *adapter)
  131. {
  132. u32 events = le32_to_cpu(adapter->shared->ecr);
  133. if (!events)
  134. return;
  135. vmxnet3_ack_events(adapter, events);
  136. /* Check if link state has changed */
  137. if (events & VMXNET3_ECR_LINK)
  138. vmxnet3_check_link(adapter, true);
  139. /* Check if there is an error on xmit/recv queues */
  140. if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
  141. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  142. VMXNET3_CMD_GET_QUEUE_STATUS);
  143. if (adapter->tqd_start->status.stopped) {
  144. printk(KERN_ERR "%s: tq error 0x%x\n",
  145. adapter->netdev->name,
  146. le32_to_cpu(adapter->tqd_start->status.error));
  147. }
  148. if (adapter->rqd_start->status.stopped) {
  149. printk(KERN_ERR "%s: rq error 0x%x\n",
  150. adapter->netdev->name,
  151. adapter->rqd_start->status.error);
  152. }
  153. schedule_work(&adapter->work);
  154. }
  155. }
  156. #ifdef __BIG_ENDIAN_BITFIELD
  157. /*
  158. * The device expects the bitfields in shared structures to be written in
  159. * little endian. When CPU is big endian, the following routines are used to
  160. * correctly read and write into ABI.
  161. * The general technique used here is : double word bitfields are defined in
  162. * opposite order for big endian architecture. Then before reading them in
  163. * driver the complete double word is translated using le32_to_cpu. Similarly
  164. * After the driver writes into bitfields, cpu_to_le32 is used to translate the
  165. * double words into required format.
  166. * In order to avoid touching bits in shared structure more than once, temporary
  167. * descriptors are used. These are passed as srcDesc to following functions.
  168. */
  169. static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
  170. struct Vmxnet3_RxDesc *dstDesc)
  171. {
  172. u32 *src = (u32 *)srcDesc + 2;
  173. u32 *dst = (u32 *)dstDesc + 2;
  174. dstDesc->addr = le64_to_cpu(srcDesc->addr);
  175. *dst = le32_to_cpu(*src);
  176. dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
  177. }
  178. static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
  179. struct Vmxnet3_TxDesc *dstDesc)
  180. {
  181. int i;
  182. u32 *src = (u32 *)(srcDesc + 1);
  183. u32 *dst = (u32 *)(dstDesc + 1);
  184. /* Working backwards so that the gen bit is set at the end. */
  185. for (i = 2; i > 0; i--) {
  186. src--;
  187. dst--;
  188. *dst = cpu_to_le32(*src);
  189. }
  190. }
  191. static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
  192. struct Vmxnet3_RxCompDesc *dstDesc)
  193. {
  194. int i = 0;
  195. u32 *src = (u32 *)srcDesc;
  196. u32 *dst = (u32 *)dstDesc;
  197. for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
  198. *dst = le32_to_cpu(*src);
  199. src++;
  200. dst++;
  201. }
  202. }
  203. /* Used to read bitfield values from double words. */
  204. static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
  205. {
  206. u32 temp = le32_to_cpu(*bitfield);
  207. u32 mask = ((1 << size) - 1) << pos;
  208. temp &= mask;
  209. temp >>= pos;
  210. return temp;
  211. }
  212. #endif /* __BIG_ENDIAN_BITFIELD */
  213. #ifdef __BIG_ENDIAN_BITFIELD
  214. # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
  215. txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
  216. VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
  217. # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
  218. txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
  219. VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
  220. # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
  221. VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
  222. VMXNET3_TCD_GEN_SIZE)
  223. # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
  224. VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
  225. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
  226. (dstrcd) = (tmp); \
  227. vmxnet3_RxCompToCPU((rcd), (tmp)); \
  228. } while (0)
  229. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
  230. (dstrxd) = (tmp); \
  231. vmxnet3_RxDescToCPU((rxd), (tmp)); \
  232. } while (0)
  233. #else
  234. # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
  235. # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
  236. # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
  237. # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
  238. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
  239. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
  240. #endif /* __BIG_ENDIAN_BITFIELD */
  241. static void
  242. vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
  243. struct pci_dev *pdev)
  244. {
  245. if (tbi->map_type == VMXNET3_MAP_SINGLE)
  246. pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
  247. PCI_DMA_TODEVICE);
  248. else if (tbi->map_type == VMXNET3_MAP_PAGE)
  249. pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
  250. PCI_DMA_TODEVICE);
  251. else
  252. BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
  253. tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
  254. }
  255. static int
  256. vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
  257. struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
  258. {
  259. struct sk_buff *skb;
  260. int entries = 0;
  261. /* no out of order completion */
  262. BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
  263. BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
  264. skb = tq->buf_info[eop_idx].skb;
  265. BUG_ON(skb == NULL);
  266. tq->buf_info[eop_idx].skb = NULL;
  267. VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
  268. while (tq->tx_ring.next2comp != eop_idx) {
  269. vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
  270. pdev);
  271. /* update next2comp w/o tx_lock. Since we are marking more,
  272. * instead of less, tx ring entries avail, the worst case is
  273. * that the tx routine incorrectly re-queues a pkt due to
  274. * insufficient tx ring entries.
  275. */
  276. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  277. entries++;
  278. }
  279. dev_kfree_skb_any(skb);
  280. return entries;
  281. }
  282. static int
  283. vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
  284. struct vmxnet3_adapter *adapter)
  285. {
  286. int completed = 0;
  287. union Vmxnet3_GenericDesc *gdesc;
  288. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  289. while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
  290. completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
  291. &gdesc->tcd), tq, adapter->pdev,
  292. adapter);
  293. vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
  294. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  295. }
  296. if (completed) {
  297. spin_lock(&tq->tx_lock);
  298. if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
  299. vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
  300. VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
  301. netif_carrier_ok(adapter->netdev))) {
  302. vmxnet3_tq_wake(tq, adapter);
  303. }
  304. spin_unlock(&tq->tx_lock);
  305. }
  306. return completed;
  307. }
  308. static void
  309. vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
  310. struct vmxnet3_adapter *adapter)
  311. {
  312. int i;
  313. while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
  314. struct vmxnet3_tx_buf_info *tbi;
  315. union Vmxnet3_GenericDesc *gdesc;
  316. tbi = tq->buf_info + tq->tx_ring.next2comp;
  317. gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
  318. vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
  319. if (tbi->skb) {
  320. dev_kfree_skb_any(tbi->skb);
  321. tbi->skb = NULL;
  322. }
  323. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  324. }
  325. /* sanity check, verify all buffers are indeed unmapped and freed */
  326. for (i = 0; i < tq->tx_ring.size; i++) {
  327. BUG_ON(tq->buf_info[i].skb != NULL ||
  328. tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
  329. }
  330. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  331. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  332. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  333. tq->comp_ring.next2proc = 0;
  334. }
  335. void
  336. vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
  337. struct vmxnet3_adapter *adapter)
  338. {
  339. if (tq->tx_ring.base) {
  340. pci_free_consistent(adapter->pdev, tq->tx_ring.size *
  341. sizeof(struct Vmxnet3_TxDesc),
  342. tq->tx_ring.base, tq->tx_ring.basePA);
  343. tq->tx_ring.base = NULL;
  344. }
  345. if (tq->data_ring.base) {
  346. pci_free_consistent(adapter->pdev, tq->data_ring.size *
  347. sizeof(struct Vmxnet3_TxDataDesc),
  348. tq->data_ring.base, tq->data_ring.basePA);
  349. tq->data_ring.base = NULL;
  350. }
  351. if (tq->comp_ring.base) {
  352. pci_free_consistent(adapter->pdev, tq->comp_ring.size *
  353. sizeof(struct Vmxnet3_TxCompDesc),
  354. tq->comp_ring.base, tq->comp_ring.basePA);
  355. tq->comp_ring.base = NULL;
  356. }
  357. kfree(tq->buf_info);
  358. tq->buf_info = NULL;
  359. }
  360. static void
  361. vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
  362. struct vmxnet3_adapter *adapter)
  363. {
  364. int i;
  365. /* reset the tx ring contents to 0 and reset the tx ring states */
  366. memset(tq->tx_ring.base, 0, tq->tx_ring.size *
  367. sizeof(struct Vmxnet3_TxDesc));
  368. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  369. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  370. memset(tq->data_ring.base, 0, tq->data_ring.size *
  371. sizeof(struct Vmxnet3_TxDataDesc));
  372. /* reset the tx comp ring contents to 0 and reset comp ring states */
  373. memset(tq->comp_ring.base, 0, tq->comp_ring.size *
  374. sizeof(struct Vmxnet3_TxCompDesc));
  375. tq->comp_ring.next2proc = 0;
  376. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  377. /* reset the bookkeeping data */
  378. memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
  379. for (i = 0; i < tq->tx_ring.size; i++)
  380. tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
  381. /* stats are not reset */
  382. }
  383. static int
  384. vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
  385. struct vmxnet3_adapter *adapter)
  386. {
  387. BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
  388. tq->comp_ring.base || tq->buf_info);
  389. tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
  390. * sizeof(struct Vmxnet3_TxDesc),
  391. &tq->tx_ring.basePA);
  392. if (!tq->tx_ring.base) {
  393. printk(KERN_ERR "%s: failed to allocate tx ring\n",
  394. adapter->netdev->name);
  395. goto err;
  396. }
  397. tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
  398. tq->data_ring.size *
  399. sizeof(struct Vmxnet3_TxDataDesc),
  400. &tq->data_ring.basePA);
  401. if (!tq->data_ring.base) {
  402. printk(KERN_ERR "%s: failed to allocate data ring\n",
  403. adapter->netdev->name);
  404. goto err;
  405. }
  406. tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
  407. tq->comp_ring.size *
  408. sizeof(struct Vmxnet3_TxCompDesc),
  409. &tq->comp_ring.basePA);
  410. if (!tq->comp_ring.base) {
  411. printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
  412. adapter->netdev->name);
  413. goto err;
  414. }
  415. tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
  416. GFP_KERNEL);
  417. if (!tq->buf_info) {
  418. printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
  419. adapter->netdev->name);
  420. goto err;
  421. }
  422. return 0;
  423. err:
  424. vmxnet3_tq_destroy(tq, adapter);
  425. return -ENOMEM;
  426. }
  427. /*
  428. * starting from ring->next2fill, allocate rx buffers for the given ring
  429. * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
  430. * are allocated or allocation fails
  431. */
  432. static int
  433. vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
  434. int num_to_alloc, struct vmxnet3_adapter *adapter)
  435. {
  436. int num_allocated = 0;
  437. struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
  438. struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
  439. u32 val;
  440. while (num_allocated < num_to_alloc) {
  441. struct vmxnet3_rx_buf_info *rbi;
  442. union Vmxnet3_GenericDesc *gd;
  443. rbi = rbi_base + ring->next2fill;
  444. gd = ring->base + ring->next2fill;
  445. if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
  446. if (rbi->skb == NULL) {
  447. rbi->skb = dev_alloc_skb(rbi->len +
  448. NET_IP_ALIGN);
  449. if (unlikely(rbi->skb == NULL)) {
  450. rq->stats.rx_buf_alloc_failure++;
  451. break;
  452. }
  453. rbi->skb->dev = adapter->netdev;
  454. skb_reserve(rbi->skb, NET_IP_ALIGN);
  455. rbi->dma_addr = pci_map_single(adapter->pdev,
  456. rbi->skb->data, rbi->len,
  457. PCI_DMA_FROMDEVICE);
  458. } else {
  459. /* rx buffer skipped by the device */
  460. }
  461. val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
  462. } else {
  463. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
  464. rbi->len != PAGE_SIZE);
  465. if (rbi->page == NULL) {
  466. rbi->page = alloc_page(GFP_ATOMIC);
  467. if (unlikely(rbi->page == NULL)) {
  468. rq->stats.rx_buf_alloc_failure++;
  469. break;
  470. }
  471. rbi->dma_addr = pci_map_page(adapter->pdev,
  472. rbi->page, 0, PAGE_SIZE,
  473. PCI_DMA_FROMDEVICE);
  474. } else {
  475. /* rx buffers skipped by the device */
  476. }
  477. val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
  478. }
  479. BUG_ON(rbi->dma_addr == 0);
  480. gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
  481. gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
  482. | val | rbi->len);
  483. num_allocated++;
  484. vmxnet3_cmd_ring_adv_next2fill(ring);
  485. }
  486. rq->uncommitted[ring_idx] += num_allocated;
  487. dev_dbg(&adapter->netdev->dev,
  488. "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
  489. "%u, uncommited %u\n", num_allocated, ring->next2fill,
  490. ring->next2comp, rq->uncommitted[ring_idx]);
  491. /* so that the device can distinguish a full ring and an empty ring */
  492. BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
  493. return num_allocated;
  494. }
  495. static void
  496. vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
  497. struct vmxnet3_rx_buf_info *rbi)
  498. {
  499. struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
  500. skb_shinfo(skb)->nr_frags;
  501. BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
  502. frag->page = rbi->page;
  503. frag->page_offset = 0;
  504. frag->size = rcd->len;
  505. skb->data_len += frag->size;
  506. skb_shinfo(skb)->nr_frags++;
  507. }
  508. static void
  509. vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
  510. struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
  511. struct vmxnet3_adapter *adapter)
  512. {
  513. u32 dw2, len;
  514. unsigned long buf_offset;
  515. int i;
  516. union Vmxnet3_GenericDesc *gdesc;
  517. struct vmxnet3_tx_buf_info *tbi = NULL;
  518. BUG_ON(ctx->copy_size > skb_headlen(skb));
  519. /* use the previous gen bit for the SOP desc */
  520. dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
  521. ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
  522. gdesc = ctx->sop_txd; /* both loops below can be skipped */
  523. /* no need to map the buffer if headers are copied */
  524. if (ctx->copy_size) {
  525. ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
  526. tq->tx_ring.next2fill *
  527. sizeof(struct Vmxnet3_TxDataDesc));
  528. ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
  529. ctx->sop_txd->dword[3] = 0;
  530. tbi = tq->buf_info + tq->tx_ring.next2fill;
  531. tbi->map_type = VMXNET3_MAP_NONE;
  532. dev_dbg(&adapter->netdev->dev,
  533. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  534. tq->tx_ring.next2fill,
  535. le64_to_cpu(ctx->sop_txd->txd.addr),
  536. ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
  537. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  538. /* use the right gen for non-SOP desc */
  539. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  540. }
  541. /* linear part can use multiple tx desc if it's big */
  542. len = skb_headlen(skb) - ctx->copy_size;
  543. buf_offset = ctx->copy_size;
  544. while (len) {
  545. u32 buf_size;
  546. if (len < VMXNET3_MAX_TX_BUF_SIZE) {
  547. buf_size = len;
  548. dw2 |= len;
  549. } else {
  550. buf_size = VMXNET3_MAX_TX_BUF_SIZE;
  551. /* spec says that for TxDesc.len, 0 == 2^14 */
  552. }
  553. tbi = tq->buf_info + tq->tx_ring.next2fill;
  554. tbi->map_type = VMXNET3_MAP_SINGLE;
  555. tbi->dma_addr = pci_map_single(adapter->pdev,
  556. skb->data + buf_offset, buf_size,
  557. PCI_DMA_TODEVICE);
  558. tbi->len = buf_size;
  559. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  560. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  561. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  562. gdesc->dword[2] = cpu_to_le32(dw2);
  563. gdesc->dword[3] = 0;
  564. dev_dbg(&adapter->netdev->dev,
  565. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  566. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  567. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  568. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  569. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  570. len -= buf_size;
  571. buf_offset += buf_size;
  572. }
  573. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  574. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  575. tbi = tq->buf_info + tq->tx_ring.next2fill;
  576. tbi->map_type = VMXNET3_MAP_PAGE;
  577. tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
  578. frag->page_offset, frag->size,
  579. PCI_DMA_TODEVICE);
  580. tbi->len = frag->size;
  581. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  582. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  583. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  584. gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
  585. gdesc->dword[3] = 0;
  586. dev_dbg(&adapter->netdev->dev,
  587. "txd[%u]: 0x%llu %u %u\n",
  588. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  589. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  590. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  591. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  592. }
  593. ctx->eop_txd = gdesc;
  594. /* set the last buf_info for the pkt */
  595. tbi->skb = skb;
  596. tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
  597. }
  598. /*
  599. * parse and copy relevant protocol headers:
  600. * For a tso pkt, relevant headers are L2/3/4 including options
  601. * For a pkt requesting csum offloading, they are L2/3 and may include L4
  602. * if it's a TCP/UDP pkt
  603. *
  604. * Returns:
  605. * -1: error happens during parsing
  606. * 0: protocol headers parsed, but too big to be copied
  607. * 1: protocol headers parsed and copied
  608. *
  609. * Other effects:
  610. * 1. related *ctx fields are updated.
  611. * 2. ctx->copy_size is # of bytes copied
  612. * 3. the portion copied is guaranteed to be in the linear part
  613. *
  614. */
  615. static int
  616. vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  617. struct vmxnet3_tx_ctx *ctx,
  618. struct vmxnet3_adapter *adapter)
  619. {
  620. struct Vmxnet3_TxDataDesc *tdd;
  621. if (ctx->mss) {
  622. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  623. ctx->l4_hdr_size = ((struct tcphdr *)
  624. skb_transport_header(skb))->doff * 4;
  625. ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
  626. } else {
  627. unsigned int pull_size;
  628. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  629. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  630. if (ctx->ipv4) {
  631. struct iphdr *iph = (struct iphdr *)
  632. skb_network_header(skb);
  633. if (iph->protocol == IPPROTO_TCP) {
  634. pull_size = ctx->eth_ip_hdr_size +
  635. sizeof(struct tcphdr);
  636. if (unlikely(!pskb_may_pull(skb,
  637. pull_size))) {
  638. goto err;
  639. }
  640. ctx->l4_hdr_size = ((struct tcphdr *)
  641. skb_transport_header(skb))->doff * 4;
  642. } else if (iph->protocol == IPPROTO_UDP) {
  643. ctx->l4_hdr_size =
  644. sizeof(struct udphdr);
  645. } else {
  646. ctx->l4_hdr_size = 0;
  647. }
  648. } else {
  649. /* for simplicity, don't copy L4 headers */
  650. ctx->l4_hdr_size = 0;
  651. }
  652. ctx->copy_size = ctx->eth_ip_hdr_size +
  653. ctx->l4_hdr_size;
  654. } else {
  655. ctx->eth_ip_hdr_size = 0;
  656. ctx->l4_hdr_size = 0;
  657. /* copy as much as allowed */
  658. ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
  659. , skb_headlen(skb));
  660. }
  661. /* make sure headers are accessible directly */
  662. if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
  663. goto err;
  664. }
  665. if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
  666. tq->stats.oversized_hdr++;
  667. ctx->copy_size = 0;
  668. return 0;
  669. }
  670. tdd = tq->data_ring.base + tq->tx_ring.next2fill;
  671. memcpy(tdd->data, skb->data, ctx->copy_size);
  672. dev_dbg(&adapter->netdev->dev,
  673. "copy %u bytes to dataRing[%u]\n",
  674. ctx->copy_size, tq->tx_ring.next2fill);
  675. return 1;
  676. err:
  677. return -1;
  678. }
  679. static void
  680. vmxnet3_prepare_tso(struct sk_buff *skb,
  681. struct vmxnet3_tx_ctx *ctx)
  682. {
  683. struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
  684. if (ctx->ipv4) {
  685. struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
  686. iph->check = 0;
  687. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
  688. IPPROTO_TCP, 0);
  689. } else {
  690. struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
  691. tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
  692. IPPROTO_TCP, 0);
  693. }
  694. }
  695. /*
  696. * Transmits a pkt thru a given tq
  697. * Returns:
  698. * NETDEV_TX_OK: descriptors are setup successfully
  699. * NETDEV_TX_OK: error occured, the pkt is dropped
  700. * NETDEV_TX_BUSY: tx ring is full, queue is stopped
  701. *
  702. * Side-effects:
  703. * 1. tx ring may be changed
  704. * 2. tq stats may be updated accordingly
  705. * 3. shared->txNumDeferred may be updated
  706. */
  707. static int
  708. vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  709. struct vmxnet3_adapter *adapter, struct net_device *netdev)
  710. {
  711. int ret;
  712. u32 count;
  713. unsigned long flags;
  714. struct vmxnet3_tx_ctx ctx;
  715. union Vmxnet3_GenericDesc *gdesc;
  716. #ifdef __BIG_ENDIAN_BITFIELD
  717. /* Use temporary descriptor to avoid touching bits multiple times */
  718. union Vmxnet3_GenericDesc tempTxDesc;
  719. #endif
  720. /* conservatively estimate # of descriptors to use */
  721. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
  722. skb_shinfo(skb)->nr_frags + 1;
  723. ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
  724. ctx.mss = skb_shinfo(skb)->gso_size;
  725. if (ctx.mss) {
  726. if (skb_header_cloned(skb)) {
  727. if (unlikely(pskb_expand_head(skb, 0, 0,
  728. GFP_ATOMIC) != 0)) {
  729. tq->stats.drop_tso++;
  730. goto drop_pkt;
  731. }
  732. tq->stats.copy_skb_header++;
  733. }
  734. vmxnet3_prepare_tso(skb, &ctx);
  735. } else {
  736. if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
  737. /* non-tso pkts must not use more than
  738. * VMXNET3_MAX_TXD_PER_PKT entries
  739. */
  740. if (skb_linearize(skb) != 0) {
  741. tq->stats.drop_too_many_frags++;
  742. goto drop_pkt;
  743. }
  744. tq->stats.linearized++;
  745. /* recalculate the # of descriptors to use */
  746. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
  747. }
  748. }
  749. ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
  750. if (ret >= 0) {
  751. BUG_ON(ret <= 0 && ctx.copy_size != 0);
  752. /* hdrs parsed, check against other limits */
  753. if (ctx.mss) {
  754. if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
  755. VMXNET3_MAX_TX_BUF_SIZE)) {
  756. goto hdr_too_big;
  757. }
  758. } else {
  759. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  760. if (unlikely(ctx.eth_ip_hdr_size +
  761. skb->csum_offset >
  762. VMXNET3_MAX_CSUM_OFFSET)) {
  763. goto hdr_too_big;
  764. }
  765. }
  766. }
  767. } else {
  768. tq->stats.drop_hdr_inspect_err++;
  769. goto drop_pkt;
  770. }
  771. spin_lock_irqsave(&tq->tx_lock, flags);
  772. if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
  773. tq->stats.tx_ring_full++;
  774. dev_dbg(&adapter->netdev->dev,
  775. "tx queue stopped on %s, next2comp %u"
  776. " next2fill %u\n", adapter->netdev->name,
  777. tq->tx_ring.next2comp, tq->tx_ring.next2fill);
  778. vmxnet3_tq_stop(tq, adapter);
  779. spin_unlock_irqrestore(&tq->tx_lock, flags);
  780. return NETDEV_TX_BUSY;
  781. }
  782. /* fill tx descs related to addr & len */
  783. vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
  784. /* setup the EOP desc */
  785. ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
  786. /* setup the SOP desc */
  787. #ifdef __BIG_ENDIAN_BITFIELD
  788. gdesc = &tempTxDesc;
  789. gdesc->dword[2] = ctx.sop_txd->dword[2];
  790. gdesc->dword[3] = ctx.sop_txd->dword[3];
  791. #else
  792. gdesc = ctx.sop_txd;
  793. #endif
  794. if (ctx.mss) {
  795. gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
  796. gdesc->txd.om = VMXNET3_OM_TSO;
  797. gdesc->txd.msscof = ctx.mss;
  798. le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
  799. gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
  800. } else {
  801. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  802. gdesc->txd.hlen = ctx.eth_ip_hdr_size;
  803. gdesc->txd.om = VMXNET3_OM_CSUM;
  804. gdesc->txd.msscof = ctx.eth_ip_hdr_size +
  805. skb->csum_offset;
  806. } else {
  807. gdesc->txd.om = 0;
  808. gdesc->txd.msscof = 0;
  809. }
  810. le32_add_cpu(&tq->shared->txNumDeferred, 1);
  811. }
  812. if (vlan_tx_tag_present(skb)) {
  813. gdesc->txd.ti = 1;
  814. gdesc->txd.tci = vlan_tx_tag_get(skb);
  815. }
  816. /* finally flips the GEN bit of the SOP desc. */
  817. gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
  818. VMXNET3_TXD_GEN);
  819. #ifdef __BIG_ENDIAN_BITFIELD
  820. /* Finished updating in bitfields of Tx Desc, so write them in original
  821. * place.
  822. */
  823. vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
  824. (struct Vmxnet3_TxDesc *)ctx.sop_txd);
  825. gdesc = ctx.sop_txd;
  826. #endif
  827. dev_dbg(&adapter->netdev->dev,
  828. "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
  829. (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
  830. tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
  831. le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
  832. spin_unlock_irqrestore(&tq->tx_lock, flags);
  833. if (le32_to_cpu(tq->shared->txNumDeferred) >=
  834. le32_to_cpu(tq->shared->txThreshold)) {
  835. tq->shared->txNumDeferred = 0;
  836. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
  837. tq->tx_ring.next2fill);
  838. }
  839. return NETDEV_TX_OK;
  840. hdr_too_big:
  841. tq->stats.drop_oversized_hdr++;
  842. drop_pkt:
  843. tq->stats.drop_total++;
  844. dev_kfree_skb(skb);
  845. return NETDEV_TX_OK;
  846. }
  847. static netdev_tx_t
  848. vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  849. {
  850. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  851. return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
  852. }
  853. static void
  854. vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
  855. struct sk_buff *skb,
  856. union Vmxnet3_GenericDesc *gdesc)
  857. {
  858. if (!gdesc->rcd.cnc && adapter->rxcsum) {
  859. /* typical case: TCP/UDP over IP and both csums are correct */
  860. if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
  861. VMXNET3_RCD_CSUM_OK) {
  862. skb->ip_summed = CHECKSUM_UNNECESSARY;
  863. BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
  864. BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
  865. BUG_ON(gdesc->rcd.frg);
  866. } else {
  867. if (gdesc->rcd.csum) {
  868. skb->csum = htons(gdesc->rcd.csum);
  869. skb->ip_summed = CHECKSUM_PARTIAL;
  870. } else {
  871. skb_checksum_none_assert(skb);
  872. }
  873. }
  874. } else {
  875. skb_checksum_none_assert(skb);
  876. }
  877. }
  878. static void
  879. vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
  880. struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
  881. {
  882. rq->stats.drop_err++;
  883. if (!rcd->fcs)
  884. rq->stats.drop_fcs++;
  885. rq->stats.drop_total++;
  886. /*
  887. * We do not unmap and chain the rx buffer to the skb.
  888. * We basically pretend this buffer is not used and will be recycled
  889. * by vmxnet3_rq_alloc_rx_buf()
  890. */
  891. /*
  892. * ctx->skb may be NULL if this is the first and the only one
  893. * desc for the pkt
  894. */
  895. if (ctx->skb)
  896. dev_kfree_skb_irq(ctx->skb);
  897. ctx->skb = NULL;
  898. }
  899. static int
  900. vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
  901. struct vmxnet3_adapter *adapter, int quota)
  902. {
  903. static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
  904. u32 num_rxd = 0;
  905. struct Vmxnet3_RxCompDesc *rcd;
  906. struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
  907. #ifdef __BIG_ENDIAN_BITFIELD
  908. struct Vmxnet3_RxDesc rxCmdDesc;
  909. struct Vmxnet3_RxCompDesc rxComp;
  910. #endif
  911. vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
  912. &rxComp);
  913. while (rcd->gen == rq->comp_ring.gen) {
  914. struct vmxnet3_rx_buf_info *rbi;
  915. struct sk_buff *skb;
  916. int num_to_alloc;
  917. struct Vmxnet3_RxDesc *rxd;
  918. u32 idx, ring_idx;
  919. if (num_rxd >= quota) {
  920. /* we may stop even before we see the EOP desc of
  921. * the current pkt
  922. */
  923. break;
  924. }
  925. num_rxd++;
  926. idx = rcd->rxdIdx;
  927. ring_idx = rcd->rqID == rq->qid ? 0 : 1;
  928. vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
  929. &rxCmdDesc);
  930. rbi = rq->buf_info[ring_idx] + idx;
  931. BUG_ON(rxd->addr != rbi->dma_addr ||
  932. rxd->len != rbi->len);
  933. if (unlikely(rcd->eop && rcd->err)) {
  934. vmxnet3_rx_error(rq, rcd, ctx, adapter);
  935. goto rcd_done;
  936. }
  937. if (rcd->sop) { /* first buf of the pkt */
  938. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
  939. rcd->rqID != rq->qid);
  940. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
  941. BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
  942. if (unlikely(rcd->len == 0)) {
  943. /* Pretend the rx buffer is skipped. */
  944. BUG_ON(!(rcd->sop && rcd->eop));
  945. dev_dbg(&adapter->netdev->dev,
  946. "rxRing[%u][%u] 0 length\n",
  947. ring_idx, idx);
  948. goto rcd_done;
  949. }
  950. ctx->skb = rbi->skb;
  951. rbi->skb = NULL;
  952. pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
  953. PCI_DMA_FROMDEVICE);
  954. skb_put(ctx->skb, rcd->len);
  955. } else {
  956. BUG_ON(ctx->skb == NULL);
  957. /* non SOP buffer must be type 1 in most cases */
  958. if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
  959. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
  960. if (rcd->len) {
  961. pci_unmap_page(adapter->pdev,
  962. rbi->dma_addr, rbi->len,
  963. PCI_DMA_FROMDEVICE);
  964. vmxnet3_append_frag(ctx->skb, rcd, rbi);
  965. rbi->page = NULL;
  966. }
  967. } else {
  968. /*
  969. * The only time a non-SOP buffer is type 0 is
  970. * when it's EOP and error flag is raised, which
  971. * has already been handled.
  972. */
  973. BUG_ON(true);
  974. }
  975. }
  976. skb = ctx->skb;
  977. if (rcd->eop) {
  978. skb->len += skb->data_len;
  979. skb->truesize += skb->data_len;
  980. vmxnet3_rx_csum(adapter, skb,
  981. (union Vmxnet3_GenericDesc *)rcd);
  982. skb->protocol = eth_type_trans(skb, adapter->netdev);
  983. if (unlikely(adapter->vlan_grp && rcd->ts)) {
  984. vlan_hwaccel_receive_skb(skb,
  985. adapter->vlan_grp, rcd->tci);
  986. } else {
  987. netif_receive_skb(skb);
  988. }
  989. ctx->skb = NULL;
  990. }
  991. rcd_done:
  992. /* device may skip some rx descs */
  993. rq->rx_ring[ring_idx].next2comp = idx;
  994. VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
  995. rq->rx_ring[ring_idx].size);
  996. /* refill rx buffers frequently to avoid starving the h/w */
  997. num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
  998. ring_idx);
  999. if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
  1000. ring_idx, adapter))) {
  1001. vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
  1002. adapter);
  1003. /* if needed, update the register */
  1004. if (unlikely(rq->shared->updateRxProd)) {
  1005. VMXNET3_WRITE_BAR0_REG(adapter,
  1006. rxprod_reg[ring_idx] + rq->qid * 8,
  1007. rq->rx_ring[ring_idx].next2fill);
  1008. rq->uncommitted[ring_idx] = 0;
  1009. }
  1010. }
  1011. vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
  1012. vmxnet3_getRxComp(rcd,
  1013. &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
  1014. }
  1015. return num_rxd;
  1016. }
  1017. static void
  1018. vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
  1019. struct vmxnet3_adapter *adapter)
  1020. {
  1021. u32 i, ring_idx;
  1022. struct Vmxnet3_RxDesc *rxd;
  1023. for (ring_idx = 0; ring_idx < 2; ring_idx++) {
  1024. for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
  1025. #ifdef __BIG_ENDIAN_BITFIELD
  1026. struct Vmxnet3_RxDesc rxDesc;
  1027. #endif
  1028. vmxnet3_getRxDesc(rxd,
  1029. &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
  1030. if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
  1031. rq->buf_info[ring_idx][i].skb) {
  1032. pci_unmap_single(adapter->pdev, rxd->addr,
  1033. rxd->len, PCI_DMA_FROMDEVICE);
  1034. dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
  1035. rq->buf_info[ring_idx][i].skb = NULL;
  1036. } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
  1037. rq->buf_info[ring_idx][i].page) {
  1038. pci_unmap_page(adapter->pdev, rxd->addr,
  1039. rxd->len, PCI_DMA_FROMDEVICE);
  1040. put_page(rq->buf_info[ring_idx][i].page);
  1041. rq->buf_info[ring_idx][i].page = NULL;
  1042. }
  1043. }
  1044. rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
  1045. rq->rx_ring[ring_idx].next2fill =
  1046. rq->rx_ring[ring_idx].next2comp = 0;
  1047. rq->uncommitted[ring_idx] = 0;
  1048. }
  1049. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1050. rq->comp_ring.next2proc = 0;
  1051. }
  1052. void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
  1053. struct vmxnet3_adapter *adapter)
  1054. {
  1055. int i;
  1056. int j;
  1057. /* all rx buffers must have already been freed */
  1058. for (i = 0; i < 2; i++) {
  1059. if (rq->buf_info[i]) {
  1060. for (j = 0; j < rq->rx_ring[i].size; j++)
  1061. BUG_ON(rq->buf_info[i][j].page != NULL);
  1062. }
  1063. }
  1064. kfree(rq->buf_info[0]);
  1065. for (i = 0; i < 2; i++) {
  1066. if (rq->rx_ring[i].base) {
  1067. pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
  1068. * sizeof(struct Vmxnet3_RxDesc),
  1069. rq->rx_ring[i].base,
  1070. rq->rx_ring[i].basePA);
  1071. rq->rx_ring[i].base = NULL;
  1072. }
  1073. rq->buf_info[i] = NULL;
  1074. }
  1075. if (rq->comp_ring.base) {
  1076. pci_free_consistent(adapter->pdev, rq->comp_ring.size *
  1077. sizeof(struct Vmxnet3_RxCompDesc),
  1078. rq->comp_ring.base, rq->comp_ring.basePA);
  1079. rq->comp_ring.base = NULL;
  1080. }
  1081. }
  1082. static int
  1083. vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
  1084. struct vmxnet3_adapter *adapter)
  1085. {
  1086. int i;
  1087. /* initialize buf_info */
  1088. for (i = 0; i < rq->rx_ring[0].size; i++) {
  1089. /* 1st buf for a pkt is skbuff */
  1090. if (i % adapter->rx_buf_per_pkt == 0) {
  1091. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
  1092. rq->buf_info[0][i].len = adapter->skb_buf_size;
  1093. } else { /* subsequent bufs for a pkt is frag */
  1094. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1095. rq->buf_info[0][i].len = PAGE_SIZE;
  1096. }
  1097. }
  1098. for (i = 0; i < rq->rx_ring[1].size; i++) {
  1099. rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1100. rq->buf_info[1][i].len = PAGE_SIZE;
  1101. }
  1102. /* reset internal state and allocate buffers for both rings */
  1103. for (i = 0; i < 2; i++) {
  1104. rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
  1105. rq->uncommitted[i] = 0;
  1106. memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
  1107. sizeof(struct Vmxnet3_RxDesc));
  1108. rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
  1109. }
  1110. if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
  1111. adapter) == 0) {
  1112. /* at least has 1 rx buffer for the 1st ring */
  1113. return -ENOMEM;
  1114. }
  1115. vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
  1116. /* reset the comp ring */
  1117. rq->comp_ring.next2proc = 0;
  1118. memset(rq->comp_ring.base, 0, rq->comp_ring.size *
  1119. sizeof(struct Vmxnet3_RxCompDesc));
  1120. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1121. /* reset rxctx */
  1122. rq->rx_ctx.skb = NULL;
  1123. /* stats are not reset */
  1124. return 0;
  1125. }
  1126. static int
  1127. vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
  1128. {
  1129. int i;
  1130. size_t sz;
  1131. struct vmxnet3_rx_buf_info *bi;
  1132. for (i = 0; i < 2; i++) {
  1133. sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
  1134. rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
  1135. &rq->rx_ring[i].basePA);
  1136. if (!rq->rx_ring[i].base) {
  1137. printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
  1138. adapter->netdev->name, i);
  1139. goto err;
  1140. }
  1141. }
  1142. sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
  1143. rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
  1144. &rq->comp_ring.basePA);
  1145. if (!rq->comp_ring.base) {
  1146. printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
  1147. adapter->netdev->name);
  1148. goto err;
  1149. }
  1150. sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
  1151. rq->rx_ring[1].size);
  1152. bi = kzalloc(sz, GFP_KERNEL);
  1153. if (!bi) {
  1154. printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
  1155. adapter->netdev->name);
  1156. goto err;
  1157. }
  1158. rq->buf_info[0] = bi;
  1159. rq->buf_info[1] = bi + rq->rx_ring[0].size;
  1160. return 0;
  1161. err:
  1162. vmxnet3_rq_destroy(rq, adapter);
  1163. return -ENOMEM;
  1164. }
  1165. static int
  1166. vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
  1167. {
  1168. if (unlikely(adapter->shared->ecr))
  1169. vmxnet3_process_events(adapter);
  1170. vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
  1171. return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
  1172. }
  1173. static int
  1174. vmxnet3_poll(struct napi_struct *napi, int budget)
  1175. {
  1176. struct vmxnet3_adapter *adapter = container_of(napi,
  1177. struct vmxnet3_adapter, napi);
  1178. int rxd_done;
  1179. rxd_done = vmxnet3_do_poll(adapter, budget);
  1180. if (rxd_done < budget) {
  1181. napi_complete(napi);
  1182. vmxnet3_enable_intr(adapter, 0);
  1183. }
  1184. return rxd_done;
  1185. }
  1186. /* Interrupt handler for vmxnet3 */
  1187. static irqreturn_t
  1188. vmxnet3_intr(int irq, void *dev_id)
  1189. {
  1190. struct net_device *dev = dev_id;
  1191. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1192. if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
  1193. u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
  1194. if (unlikely(icr == 0))
  1195. /* not ours */
  1196. return IRQ_NONE;
  1197. }
  1198. /* disable intr if needed */
  1199. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1200. vmxnet3_disable_intr(adapter, 0);
  1201. napi_schedule(&adapter->napi);
  1202. return IRQ_HANDLED;
  1203. }
  1204. #ifdef CONFIG_NET_POLL_CONTROLLER
  1205. /* netpoll callback. */
  1206. static void
  1207. vmxnet3_netpoll(struct net_device *netdev)
  1208. {
  1209. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1210. int irq;
  1211. #ifdef CONFIG_PCI_MSI
  1212. if (adapter->intr.type == VMXNET3_IT_MSIX)
  1213. irq = adapter->intr.msix_entries[0].vector;
  1214. else
  1215. #endif
  1216. irq = adapter->pdev->irq;
  1217. disable_irq(irq);
  1218. vmxnet3_intr(irq, netdev);
  1219. enable_irq(irq);
  1220. }
  1221. #endif
  1222. static int
  1223. vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
  1224. {
  1225. int err;
  1226. #ifdef CONFIG_PCI_MSI
  1227. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  1228. /* we only use 1 MSI-X vector */
  1229. err = request_irq(adapter->intr.msix_entries[0].vector,
  1230. vmxnet3_intr, 0, adapter->netdev->name,
  1231. adapter->netdev);
  1232. } else if (adapter->intr.type == VMXNET3_IT_MSI) {
  1233. err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
  1234. adapter->netdev->name, adapter->netdev);
  1235. } else
  1236. #endif
  1237. {
  1238. err = request_irq(adapter->pdev->irq, vmxnet3_intr,
  1239. IRQF_SHARED, adapter->netdev->name,
  1240. adapter->netdev);
  1241. }
  1242. if (err)
  1243. printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
  1244. ":%d\n", adapter->netdev->name, adapter->intr.type, err);
  1245. if (!err) {
  1246. int i;
  1247. /* init our intr settings */
  1248. for (i = 0; i < adapter->intr.num_intrs; i++)
  1249. adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
  1250. /* next setup intr index for all intr sources */
  1251. adapter->tx_queue.comp_ring.intr_idx = 0;
  1252. adapter->rx_queue.comp_ring.intr_idx = 0;
  1253. adapter->intr.event_intr_idx = 0;
  1254. printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
  1255. "allocated\n", adapter->netdev->name, adapter->intr.type,
  1256. adapter->intr.mask_mode, adapter->intr.num_intrs);
  1257. }
  1258. return err;
  1259. }
  1260. static void
  1261. vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
  1262. {
  1263. BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
  1264. adapter->intr.num_intrs <= 0);
  1265. switch (adapter->intr.type) {
  1266. #ifdef CONFIG_PCI_MSI
  1267. case VMXNET3_IT_MSIX:
  1268. {
  1269. int i;
  1270. for (i = 0; i < adapter->intr.num_intrs; i++)
  1271. free_irq(adapter->intr.msix_entries[i].vector,
  1272. adapter->netdev);
  1273. break;
  1274. }
  1275. #endif
  1276. case VMXNET3_IT_MSI:
  1277. free_irq(adapter->pdev->irq, adapter->netdev);
  1278. break;
  1279. case VMXNET3_IT_INTX:
  1280. free_irq(adapter->pdev->irq, adapter->netdev);
  1281. break;
  1282. default:
  1283. BUG_ON(true);
  1284. }
  1285. }
  1286. static void
  1287. vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  1288. {
  1289. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1290. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1291. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1292. if (grp) {
  1293. /* add vlan rx stripping. */
  1294. if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
  1295. int i;
  1296. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1297. adapter->vlan_grp = grp;
  1298. /* update FEATURES to device */
  1299. set_flag_le64(&devRead->misc.uptFeatures,
  1300. UPT1_F_RXVLAN);
  1301. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1302. VMXNET3_CMD_UPDATE_FEATURE);
  1303. /*
  1304. * Clear entire vfTable; then enable untagged pkts.
  1305. * Note: setting one entry in vfTable to non-zero turns
  1306. * on VLAN rx filtering.
  1307. */
  1308. for (i = 0; i < VMXNET3_VFT_SIZE; i++)
  1309. vfTable[i] = 0;
  1310. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1311. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1312. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1313. } else {
  1314. printk(KERN_ERR "%s: vlan_rx_register when device has "
  1315. "no NETIF_F_HW_VLAN_RX\n", netdev->name);
  1316. }
  1317. } else {
  1318. /* remove vlan rx stripping. */
  1319. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1320. adapter->vlan_grp = NULL;
  1321. if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
  1322. int i;
  1323. for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
  1324. /* clear entire vfTable; this also disables
  1325. * VLAN rx filtering
  1326. */
  1327. vfTable[i] = 0;
  1328. }
  1329. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1330. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1331. /* update FEATURES to device */
  1332. reset_flag_le64(&devRead->misc.uptFeatures,
  1333. UPT1_F_RXVLAN);
  1334. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1335. VMXNET3_CMD_UPDATE_FEATURE);
  1336. }
  1337. }
  1338. }
  1339. static void
  1340. vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
  1341. {
  1342. if (adapter->vlan_grp) {
  1343. u16 vid;
  1344. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1345. bool activeVlan = false;
  1346. for (vid = 0; vid < VLAN_N_VID; vid++) {
  1347. if (vlan_group_get_device(adapter->vlan_grp, vid)) {
  1348. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1349. activeVlan = true;
  1350. }
  1351. }
  1352. if (activeVlan) {
  1353. /* continue to allow untagged pkts */
  1354. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1355. }
  1356. }
  1357. }
  1358. static void
  1359. vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  1360. {
  1361. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1362. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1363. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1364. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1365. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1366. }
  1367. static void
  1368. vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  1369. {
  1370. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1371. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1372. VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
  1373. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1374. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1375. }
  1376. static u8 *
  1377. vmxnet3_copy_mc(struct net_device *netdev)
  1378. {
  1379. u8 *buf = NULL;
  1380. u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
  1381. /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
  1382. if (sz <= 0xffff) {
  1383. /* We may be called with BH disabled */
  1384. buf = kmalloc(sz, GFP_ATOMIC);
  1385. if (buf) {
  1386. struct netdev_hw_addr *ha;
  1387. int i = 0;
  1388. netdev_for_each_mc_addr(ha, netdev)
  1389. memcpy(buf + i++ * ETH_ALEN, ha->addr,
  1390. ETH_ALEN);
  1391. }
  1392. }
  1393. return buf;
  1394. }
  1395. static void
  1396. vmxnet3_set_mc(struct net_device *netdev)
  1397. {
  1398. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1399. struct Vmxnet3_RxFilterConf *rxConf =
  1400. &adapter->shared->devRead.rxFilterConf;
  1401. u8 *new_table = NULL;
  1402. u32 new_mode = VMXNET3_RXM_UCAST;
  1403. if (netdev->flags & IFF_PROMISC)
  1404. new_mode |= VMXNET3_RXM_PROMISC;
  1405. if (netdev->flags & IFF_BROADCAST)
  1406. new_mode |= VMXNET3_RXM_BCAST;
  1407. if (netdev->flags & IFF_ALLMULTI)
  1408. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1409. else
  1410. if (!netdev_mc_empty(netdev)) {
  1411. new_table = vmxnet3_copy_mc(netdev);
  1412. if (new_table) {
  1413. new_mode |= VMXNET3_RXM_MCAST;
  1414. rxConf->mfTableLen = cpu_to_le16(
  1415. netdev_mc_count(netdev) * ETH_ALEN);
  1416. rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
  1417. new_table));
  1418. } else {
  1419. printk(KERN_INFO "%s: failed to copy mcast list"
  1420. ", setting ALL_MULTI\n", netdev->name);
  1421. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1422. }
  1423. }
  1424. if (!(new_mode & VMXNET3_RXM_MCAST)) {
  1425. rxConf->mfTableLen = 0;
  1426. rxConf->mfTablePA = 0;
  1427. }
  1428. if (new_mode != rxConf->rxMode) {
  1429. rxConf->rxMode = cpu_to_le32(new_mode);
  1430. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1431. VMXNET3_CMD_UPDATE_RX_MODE);
  1432. }
  1433. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1434. VMXNET3_CMD_UPDATE_MAC_FILTERS);
  1435. kfree(new_table);
  1436. }
  1437. /*
  1438. * Set up driver_shared based on settings in adapter.
  1439. */
  1440. static void
  1441. vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
  1442. {
  1443. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1444. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1445. struct Vmxnet3_TxQueueConf *tqc;
  1446. struct Vmxnet3_RxQueueConf *rqc;
  1447. int i;
  1448. memset(shared, 0, sizeof(*shared));
  1449. /* driver settings */
  1450. shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
  1451. devRead->misc.driverInfo.version = cpu_to_le32(
  1452. VMXNET3_DRIVER_VERSION_NUM);
  1453. devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
  1454. VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
  1455. devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
  1456. *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
  1457. *((u32 *)&devRead->misc.driverInfo.gos));
  1458. devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
  1459. devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
  1460. devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
  1461. devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
  1462. /* set up feature flags */
  1463. if (adapter->rxcsum)
  1464. set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
  1465. if (adapter->lro) {
  1466. set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
  1467. devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
  1468. }
  1469. if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
  1470. adapter->vlan_grp) {
  1471. set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
  1472. }
  1473. devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
  1474. devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
  1475. devRead->misc.queueDescLen = cpu_to_le32(
  1476. sizeof(struct Vmxnet3_TxQueueDesc) +
  1477. sizeof(struct Vmxnet3_RxQueueDesc));
  1478. /* tx queue settings */
  1479. BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
  1480. devRead->misc.numTxQueues = 1;
  1481. tqc = &adapter->tqd_start->conf;
  1482. tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
  1483. tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
  1484. tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
  1485. tqc->ddPA = cpu_to_le64(virt_to_phys(
  1486. adapter->tx_queue.buf_info));
  1487. tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
  1488. tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
  1489. tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
  1490. tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
  1491. tqc->txRingSize);
  1492. tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
  1493. /* rx queue settings */
  1494. devRead->misc.numRxQueues = 1;
  1495. rqc = &adapter->rqd_start->conf;
  1496. rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
  1497. rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
  1498. rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
  1499. rqc->ddPA = cpu_to_le64(virt_to_phys(
  1500. adapter->rx_queue.buf_info));
  1501. rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
  1502. rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
  1503. rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
  1504. rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
  1505. (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
  1506. rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
  1507. /* intr settings */
  1508. devRead->intrConf.autoMask = adapter->intr.mask_mode ==
  1509. VMXNET3_IMM_AUTO;
  1510. devRead->intrConf.numIntrs = adapter->intr.num_intrs;
  1511. for (i = 0; i < adapter->intr.num_intrs; i++)
  1512. devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
  1513. devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
  1514. devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  1515. /* rx filter settings */
  1516. devRead->rxFilterConf.rxMode = 0;
  1517. vmxnet3_restore_vlan(adapter);
  1518. /* the rest are already zeroed */
  1519. }
  1520. int
  1521. vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
  1522. {
  1523. int err;
  1524. u32 ret;
  1525. dev_dbg(&adapter->netdev->dev,
  1526. "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
  1527. " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
  1528. adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
  1529. adapter->rx_queue.rx_ring[0].size,
  1530. adapter->rx_queue.rx_ring[1].size);
  1531. vmxnet3_tq_init(&adapter->tx_queue, adapter);
  1532. err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
  1533. if (err) {
  1534. printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
  1535. adapter->netdev->name, err);
  1536. goto rq_err;
  1537. }
  1538. err = vmxnet3_request_irqs(adapter);
  1539. if (err) {
  1540. printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
  1541. adapter->netdev->name, err);
  1542. goto irq_err;
  1543. }
  1544. vmxnet3_setup_driver_shared(adapter);
  1545. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
  1546. adapter->shared_pa));
  1547. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
  1548. adapter->shared_pa));
  1549. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1550. VMXNET3_CMD_ACTIVATE_DEV);
  1551. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1552. if (ret != 0) {
  1553. printk(KERN_ERR "Failed to activate dev %s: error %u\n",
  1554. adapter->netdev->name, ret);
  1555. err = -EINVAL;
  1556. goto activate_err;
  1557. }
  1558. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
  1559. adapter->rx_queue.rx_ring[0].next2fill);
  1560. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
  1561. adapter->rx_queue.rx_ring[1].next2fill);
  1562. /* Apply the rx filter settins last. */
  1563. vmxnet3_set_mc(adapter->netdev);
  1564. /*
  1565. * Check link state when first activating device. It will start the
  1566. * tx queue if the link is up.
  1567. */
  1568. vmxnet3_check_link(adapter, true);
  1569. napi_enable(&adapter->napi);
  1570. vmxnet3_enable_all_intrs(adapter);
  1571. clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  1572. return 0;
  1573. activate_err:
  1574. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
  1575. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
  1576. vmxnet3_free_irqs(adapter);
  1577. irq_err:
  1578. rq_err:
  1579. /* free up buffers we allocated */
  1580. vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
  1581. return err;
  1582. }
  1583. void
  1584. vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
  1585. {
  1586. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
  1587. }
  1588. int
  1589. vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
  1590. {
  1591. if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
  1592. return 0;
  1593. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1594. VMXNET3_CMD_QUIESCE_DEV);
  1595. vmxnet3_disable_all_intrs(adapter);
  1596. napi_disable(&adapter->napi);
  1597. netif_tx_disable(adapter->netdev);
  1598. adapter->link_speed = 0;
  1599. netif_carrier_off(adapter->netdev);
  1600. vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
  1601. vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
  1602. vmxnet3_free_irqs(adapter);
  1603. return 0;
  1604. }
  1605. static void
  1606. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  1607. {
  1608. u32 tmp;
  1609. tmp = *(u32 *)mac;
  1610. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
  1611. tmp = (mac[5] << 8) | mac[4];
  1612. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
  1613. }
  1614. static int
  1615. vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
  1616. {
  1617. struct sockaddr *addr = p;
  1618. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1619. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1620. vmxnet3_write_mac_addr(adapter, addr->sa_data);
  1621. return 0;
  1622. }
  1623. /* ==================== initialization and cleanup routines ============ */
  1624. static int
  1625. vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
  1626. {
  1627. int err;
  1628. unsigned long mmio_start, mmio_len;
  1629. struct pci_dev *pdev = adapter->pdev;
  1630. err = pci_enable_device(pdev);
  1631. if (err) {
  1632. printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
  1633. pci_name(pdev), err);
  1634. return err;
  1635. }
  1636. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
  1637. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
  1638. printk(KERN_ERR "pci_set_consistent_dma_mask failed "
  1639. "for adapter %s\n", pci_name(pdev));
  1640. err = -EIO;
  1641. goto err_set_mask;
  1642. }
  1643. *dma64 = true;
  1644. } else {
  1645. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
  1646. printk(KERN_ERR "pci_set_dma_mask failed for adapter "
  1647. "%s\n", pci_name(pdev));
  1648. err = -EIO;
  1649. goto err_set_mask;
  1650. }
  1651. *dma64 = false;
  1652. }
  1653. err = pci_request_selected_regions(pdev, (1 << 2) - 1,
  1654. vmxnet3_driver_name);
  1655. if (err) {
  1656. printk(KERN_ERR "Failed to request region for adapter %s: "
  1657. "error %d\n", pci_name(pdev), err);
  1658. goto err_set_mask;
  1659. }
  1660. pci_set_master(pdev);
  1661. mmio_start = pci_resource_start(pdev, 0);
  1662. mmio_len = pci_resource_len(pdev, 0);
  1663. adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
  1664. if (!adapter->hw_addr0) {
  1665. printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
  1666. pci_name(pdev));
  1667. err = -EIO;
  1668. goto err_ioremap;
  1669. }
  1670. mmio_start = pci_resource_start(pdev, 1);
  1671. mmio_len = pci_resource_len(pdev, 1);
  1672. adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
  1673. if (!adapter->hw_addr1) {
  1674. printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
  1675. pci_name(pdev));
  1676. err = -EIO;
  1677. goto err_bar1;
  1678. }
  1679. return 0;
  1680. err_bar1:
  1681. iounmap(adapter->hw_addr0);
  1682. err_ioremap:
  1683. pci_release_selected_regions(pdev, (1 << 2) - 1);
  1684. err_set_mask:
  1685. pci_disable_device(pdev);
  1686. return err;
  1687. }
  1688. static void
  1689. vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
  1690. {
  1691. BUG_ON(!adapter->pdev);
  1692. iounmap(adapter->hw_addr0);
  1693. iounmap(adapter->hw_addr1);
  1694. pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
  1695. pci_disable_device(adapter->pdev);
  1696. }
  1697. static void
  1698. vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
  1699. {
  1700. size_t sz;
  1701. if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
  1702. VMXNET3_MAX_ETH_HDR_SIZE) {
  1703. adapter->skb_buf_size = adapter->netdev->mtu +
  1704. VMXNET3_MAX_ETH_HDR_SIZE;
  1705. if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
  1706. adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
  1707. adapter->rx_buf_per_pkt = 1;
  1708. } else {
  1709. adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
  1710. sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
  1711. VMXNET3_MAX_ETH_HDR_SIZE;
  1712. adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
  1713. }
  1714. /*
  1715. * for simplicity, force the ring0 size to be a multiple of
  1716. * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
  1717. */
  1718. sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
  1719. adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
  1720. sz - 1) / sz * sz;
  1721. adapter->rx_queue.rx_ring[0].size = min_t(u32,
  1722. adapter->rx_queue.rx_ring[0].size,
  1723. VMXNET3_RX_RING_MAX_SIZE / sz * sz);
  1724. }
  1725. int
  1726. vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
  1727. u32 rx_ring_size, u32 rx_ring2_size)
  1728. {
  1729. int err;
  1730. adapter->tx_queue.tx_ring.size = tx_ring_size;
  1731. adapter->tx_queue.data_ring.size = tx_ring_size;
  1732. adapter->tx_queue.comp_ring.size = tx_ring_size;
  1733. adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
  1734. adapter->tx_queue.stopped = true;
  1735. err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
  1736. if (err)
  1737. return err;
  1738. adapter->rx_queue.rx_ring[0].size = rx_ring_size;
  1739. adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
  1740. vmxnet3_adjust_rx_ring_size(adapter);
  1741. adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
  1742. adapter->rx_queue.rx_ring[1].size;
  1743. adapter->rx_queue.qid = 0;
  1744. adapter->rx_queue.qid2 = 1;
  1745. adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
  1746. err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
  1747. if (err)
  1748. vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
  1749. return err;
  1750. }
  1751. static int
  1752. vmxnet3_open(struct net_device *netdev)
  1753. {
  1754. struct vmxnet3_adapter *adapter;
  1755. int err;
  1756. adapter = netdev_priv(netdev);
  1757. spin_lock_init(&adapter->tx_queue.tx_lock);
  1758. err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
  1759. VMXNET3_DEF_RX_RING_SIZE,
  1760. VMXNET3_DEF_RX_RING_SIZE);
  1761. if (err)
  1762. goto queue_err;
  1763. err = vmxnet3_activate_dev(adapter);
  1764. if (err)
  1765. goto activate_err;
  1766. return 0;
  1767. activate_err:
  1768. vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
  1769. vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
  1770. queue_err:
  1771. return err;
  1772. }
  1773. static int
  1774. vmxnet3_close(struct net_device *netdev)
  1775. {
  1776. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1777. /*
  1778. * Reset_work may be in the middle of resetting the device, wait for its
  1779. * completion.
  1780. */
  1781. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  1782. msleep(1);
  1783. vmxnet3_quiesce_dev(adapter);
  1784. vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
  1785. vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
  1786. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  1787. return 0;
  1788. }
  1789. void
  1790. vmxnet3_force_close(struct vmxnet3_adapter *adapter)
  1791. {
  1792. /*
  1793. * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
  1794. * vmxnet3_close() will deadlock.
  1795. */
  1796. BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
  1797. /* we need to enable NAPI, otherwise dev_close will deadlock */
  1798. napi_enable(&adapter->napi);
  1799. dev_close(adapter->netdev);
  1800. }
  1801. static int
  1802. vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
  1803. {
  1804. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1805. int err = 0;
  1806. if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
  1807. return -EINVAL;
  1808. if (new_mtu > 1500 && !adapter->jumbo_frame)
  1809. return -EINVAL;
  1810. netdev->mtu = new_mtu;
  1811. /*
  1812. * Reset_work may be in the middle of resetting the device, wait for its
  1813. * completion.
  1814. */
  1815. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  1816. msleep(1);
  1817. if (netif_running(netdev)) {
  1818. vmxnet3_quiesce_dev(adapter);
  1819. vmxnet3_reset_dev(adapter);
  1820. /* we need to re-create the rx queue based on the new mtu */
  1821. vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
  1822. vmxnet3_adjust_rx_ring_size(adapter);
  1823. adapter->rx_queue.comp_ring.size =
  1824. adapter->rx_queue.rx_ring[0].size +
  1825. adapter->rx_queue.rx_ring[1].size;
  1826. err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
  1827. if (err) {
  1828. printk(KERN_ERR "%s: failed to re-create rx queue,"
  1829. " error %d. Closing it.\n", netdev->name, err);
  1830. goto out;
  1831. }
  1832. err = vmxnet3_activate_dev(adapter);
  1833. if (err) {
  1834. printk(KERN_ERR "%s: failed to re-activate, error %d. "
  1835. "Closing it\n", netdev->name, err);
  1836. goto out;
  1837. }
  1838. }
  1839. out:
  1840. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  1841. if (err)
  1842. vmxnet3_force_close(adapter);
  1843. return err;
  1844. }
  1845. static void
  1846. vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
  1847. {
  1848. struct net_device *netdev = adapter->netdev;
  1849. netdev->features = NETIF_F_SG |
  1850. NETIF_F_HW_CSUM |
  1851. NETIF_F_HW_VLAN_TX |
  1852. NETIF_F_HW_VLAN_RX |
  1853. NETIF_F_HW_VLAN_FILTER |
  1854. NETIF_F_TSO |
  1855. NETIF_F_TSO6 |
  1856. NETIF_F_LRO;
  1857. printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
  1858. adapter->rxcsum = true;
  1859. adapter->jumbo_frame = true;
  1860. adapter->lro = true;
  1861. if (dma64) {
  1862. netdev->features |= NETIF_F_HIGHDMA;
  1863. printk(" highDMA");
  1864. }
  1865. netdev->vlan_features = netdev->features;
  1866. printk("\n");
  1867. }
  1868. static void
  1869. vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  1870. {
  1871. u32 tmp;
  1872. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
  1873. *(u32 *)mac = tmp;
  1874. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
  1875. mac[4] = tmp & 0xff;
  1876. mac[5] = (tmp >> 8) & 0xff;
  1877. }
  1878. static void
  1879. vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
  1880. {
  1881. u32 cfg;
  1882. /* intr settings */
  1883. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1884. VMXNET3_CMD_GET_CONF_INTR);
  1885. cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1886. adapter->intr.type = cfg & 0x3;
  1887. adapter->intr.mask_mode = (cfg >> 2) & 0x3;
  1888. if (adapter->intr.type == VMXNET3_IT_AUTO) {
  1889. adapter->intr.type = VMXNET3_IT_MSIX;
  1890. }
  1891. #ifdef CONFIG_PCI_MSI
  1892. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  1893. int err;
  1894. adapter->intr.msix_entries[0].entry = 0;
  1895. err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
  1896. VMXNET3_LINUX_MAX_MSIX_VECT);
  1897. if (!err) {
  1898. adapter->intr.num_intrs = 1;
  1899. adapter->intr.type = VMXNET3_IT_MSIX;
  1900. return;
  1901. }
  1902. adapter->intr.type = VMXNET3_IT_MSI;
  1903. }
  1904. if (adapter->intr.type == VMXNET3_IT_MSI) {
  1905. int err;
  1906. err = pci_enable_msi(adapter->pdev);
  1907. if (!err) {
  1908. adapter->intr.num_intrs = 1;
  1909. return;
  1910. }
  1911. }
  1912. #endif /* CONFIG_PCI_MSI */
  1913. adapter->intr.type = VMXNET3_IT_INTX;
  1914. /* INT-X related setting */
  1915. adapter->intr.num_intrs = 1;
  1916. }
  1917. static void
  1918. vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
  1919. {
  1920. if (adapter->intr.type == VMXNET3_IT_MSIX)
  1921. pci_disable_msix(adapter->pdev);
  1922. else if (adapter->intr.type == VMXNET3_IT_MSI)
  1923. pci_disable_msi(adapter->pdev);
  1924. else
  1925. BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
  1926. }
  1927. static void
  1928. vmxnet3_tx_timeout(struct net_device *netdev)
  1929. {
  1930. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1931. adapter->tx_timeout_count++;
  1932. printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
  1933. schedule_work(&adapter->work);
  1934. }
  1935. static void
  1936. vmxnet3_reset_work(struct work_struct *data)
  1937. {
  1938. struct vmxnet3_adapter *adapter;
  1939. adapter = container_of(data, struct vmxnet3_adapter, work);
  1940. /* if another thread is resetting the device, no need to proceed */
  1941. if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  1942. return;
  1943. /* if the device is closed, we must leave it alone */
  1944. rtnl_lock();
  1945. if (netif_running(adapter->netdev)) {
  1946. printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
  1947. vmxnet3_quiesce_dev(adapter);
  1948. vmxnet3_reset_dev(adapter);
  1949. vmxnet3_activate_dev(adapter);
  1950. } else {
  1951. printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
  1952. }
  1953. rtnl_unlock();
  1954. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  1955. }
  1956. static int __devinit
  1957. vmxnet3_probe_device(struct pci_dev *pdev,
  1958. const struct pci_device_id *id)
  1959. {
  1960. static const struct net_device_ops vmxnet3_netdev_ops = {
  1961. .ndo_open = vmxnet3_open,
  1962. .ndo_stop = vmxnet3_close,
  1963. .ndo_start_xmit = vmxnet3_xmit_frame,
  1964. .ndo_set_mac_address = vmxnet3_set_mac_addr,
  1965. .ndo_change_mtu = vmxnet3_change_mtu,
  1966. .ndo_get_stats = vmxnet3_get_stats,
  1967. .ndo_tx_timeout = vmxnet3_tx_timeout,
  1968. .ndo_set_multicast_list = vmxnet3_set_mc,
  1969. .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
  1970. .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
  1971. .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
  1972. #ifdef CONFIG_NET_POLL_CONTROLLER
  1973. .ndo_poll_controller = vmxnet3_netpoll,
  1974. #endif
  1975. };
  1976. int err;
  1977. bool dma64 = false; /* stupid gcc */
  1978. u32 ver;
  1979. struct net_device *netdev;
  1980. struct vmxnet3_adapter *adapter;
  1981. u8 mac[ETH_ALEN];
  1982. netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
  1983. if (!netdev) {
  1984. printk(KERN_ERR "Failed to alloc ethernet device for adapter "
  1985. "%s\n", pci_name(pdev));
  1986. return -ENOMEM;
  1987. }
  1988. pci_set_drvdata(pdev, netdev);
  1989. adapter = netdev_priv(netdev);
  1990. adapter->netdev = netdev;
  1991. adapter->pdev = pdev;
  1992. adapter->shared = pci_alloc_consistent(adapter->pdev,
  1993. sizeof(struct Vmxnet3_DriverShared),
  1994. &adapter->shared_pa);
  1995. if (!adapter->shared) {
  1996. printk(KERN_ERR "Failed to allocate memory for %s\n",
  1997. pci_name(pdev));
  1998. err = -ENOMEM;
  1999. goto err_alloc_shared;
  2000. }
  2001. adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
  2002. sizeof(struct Vmxnet3_TxQueueDesc) +
  2003. sizeof(struct Vmxnet3_RxQueueDesc),
  2004. &adapter->queue_desc_pa);
  2005. if (!adapter->tqd_start) {
  2006. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2007. pci_name(pdev));
  2008. err = -ENOMEM;
  2009. goto err_alloc_queue_desc;
  2010. }
  2011. adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
  2012. + 1);
  2013. adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
  2014. if (adapter->pm_conf == NULL) {
  2015. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2016. pci_name(pdev));
  2017. err = -ENOMEM;
  2018. goto err_alloc_pm;
  2019. }
  2020. err = vmxnet3_alloc_pci_resources(adapter, &dma64);
  2021. if (err < 0)
  2022. goto err_alloc_pci;
  2023. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
  2024. if (ver & 1) {
  2025. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
  2026. } else {
  2027. printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
  2028. " %s\n", ver, pci_name(pdev));
  2029. err = -EBUSY;
  2030. goto err_ver;
  2031. }
  2032. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
  2033. if (ver & 1) {
  2034. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
  2035. } else {
  2036. printk(KERN_ERR "Incompatible upt version (0x%x) for "
  2037. "adapter %s\n", ver, pci_name(pdev));
  2038. err = -EBUSY;
  2039. goto err_ver;
  2040. }
  2041. vmxnet3_declare_features(adapter, dma64);
  2042. adapter->dev_number = atomic_read(&devices_found);
  2043. vmxnet3_alloc_intr_resources(adapter);
  2044. vmxnet3_read_mac_addr(adapter, mac);
  2045. memcpy(netdev->dev_addr, mac, netdev->addr_len);
  2046. netdev->netdev_ops = &vmxnet3_netdev_ops;
  2047. netdev->watchdog_timeo = 5 * HZ;
  2048. vmxnet3_set_ethtool_ops(netdev);
  2049. INIT_WORK(&adapter->work, vmxnet3_reset_work);
  2050. netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
  2051. SET_NETDEV_DEV(netdev, &pdev->dev);
  2052. err = register_netdev(netdev);
  2053. if (err) {
  2054. printk(KERN_ERR "Failed to register adapter %s\n",
  2055. pci_name(pdev));
  2056. goto err_register;
  2057. }
  2058. set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  2059. vmxnet3_check_link(adapter, false);
  2060. atomic_inc(&devices_found);
  2061. return 0;
  2062. err_register:
  2063. vmxnet3_free_intr_resources(adapter);
  2064. err_ver:
  2065. vmxnet3_free_pci_resources(adapter);
  2066. err_alloc_pci:
  2067. kfree(adapter->pm_conf);
  2068. err_alloc_pm:
  2069. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
  2070. sizeof(struct Vmxnet3_RxQueueDesc),
  2071. adapter->tqd_start, adapter->queue_desc_pa);
  2072. err_alloc_queue_desc:
  2073. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2074. adapter->shared, adapter->shared_pa);
  2075. err_alloc_shared:
  2076. pci_set_drvdata(pdev, NULL);
  2077. free_netdev(netdev);
  2078. return err;
  2079. }
  2080. static void __devexit
  2081. vmxnet3_remove_device(struct pci_dev *pdev)
  2082. {
  2083. struct net_device *netdev = pci_get_drvdata(pdev);
  2084. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2085. flush_scheduled_work();
  2086. unregister_netdev(netdev);
  2087. vmxnet3_free_intr_resources(adapter);
  2088. vmxnet3_free_pci_resources(adapter);
  2089. kfree(adapter->pm_conf);
  2090. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
  2091. sizeof(struct Vmxnet3_RxQueueDesc),
  2092. adapter->tqd_start, adapter->queue_desc_pa);
  2093. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2094. adapter->shared, adapter->shared_pa);
  2095. free_netdev(netdev);
  2096. }
  2097. #ifdef CONFIG_PM
  2098. static int
  2099. vmxnet3_suspend(struct device *device)
  2100. {
  2101. struct pci_dev *pdev = to_pci_dev(device);
  2102. struct net_device *netdev = pci_get_drvdata(pdev);
  2103. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2104. struct Vmxnet3_PMConf *pmConf;
  2105. struct ethhdr *ehdr;
  2106. struct arphdr *ahdr;
  2107. u8 *arpreq;
  2108. struct in_device *in_dev;
  2109. struct in_ifaddr *ifa;
  2110. int i = 0;
  2111. if (!netif_running(netdev))
  2112. return 0;
  2113. vmxnet3_disable_all_intrs(adapter);
  2114. vmxnet3_free_irqs(adapter);
  2115. vmxnet3_free_intr_resources(adapter);
  2116. netif_device_detach(netdev);
  2117. netif_stop_queue(netdev);
  2118. /* Create wake-up filters. */
  2119. pmConf = adapter->pm_conf;
  2120. memset(pmConf, 0, sizeof(*pmConf));
  2121. if (adapter->wol & WAKE_UCAST) {
  2122. pmConf->filters[i].patternSize = ETH_ALEN;
  2123. pmConf->filters[i].maskSize = 1;
  2124. memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
  2125. pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
  2126. set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
  2127. i++;
  2128. }
  2129. if (adapter->wol & WAKE_ARP) {
  2130. in_dev = in_dev_get(netdev);
  2131. if (!in_dev)
  2132. goto skip_arp;
  2133. ifa = (struct in_ifaddr *)in_dev->ifa_list;
  2134. if (!ifa)
  2135. goto skip_arp;
  2136. pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
  2137. sizeof(struct arphdr) + /* ARP header */
  2138. 2 * ETH_ALEN + /* 2 Ethernet addresses*/
  2139. 2 * sizeof(u32); /*2 IPv4 addresses */
  2140. pmConf->filters[i].maskSize =
  2141. (pmConf->filters[i].patternSize - 1) / 8 + 1;
  2142. /* ETH_P_ARP in Ethernet header. */
  2143. ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
  2144. ehdr->h_proto = htons(ETH_P_ARP);
  2145. /* ARPOP_REQUEST in ARP header. */
  2146. ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
  2147. ahdr->ar_op = htons(ARPOP_REQUEST);
  2148. arpreq = (u8 *)(ahdr + 1);
  2149. /* The Unicast IPv4 address in 'tip' field. */
  2150. arpreq += 2 * ETH_ALEN + sizeof(u32);
  2151. *(u32 *)arpreq = ifa->ifa_address;
  2152. /* The mask for the relevant bits. */
  2153. pmConf->filters[i].mask[0] = 0x00;
  2154. pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
  2155. pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
  2156. pmConf->filters[i].mask[3] = 0x00;
  2157. pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
  2158. pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
  2159. in_dev_put(in_dev);
  2160. set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
  2161. i++;
  2162. }
  2163. skip_arp:
  2164. if (adapter->wol & WAKE_MAGIC)
  2165. set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
  2166. pmConf->numFilters = i;
  2167. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2168. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2169. *pmConf));
  2170. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
  2171. pmConf));
  2172. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2173. VMXNET3_CMD_UPDATE_PMCFG);
  2174. pci_save_state(pdev);
  2175. pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
  2176. adapter->wol);
  2177. pci_disable_device(pdev);
  2178. pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
  2179. return 0;
  2180. }
  2181. static int
  2182. vmxnet3_resume(struct device *device)
  2183. {
  2184. int err;
  2185. struct pci_dev *pdev = to_pci_dev(device);
  2186. struct net_device *netdev = pci_get_drvdata(pdev);
  2187. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2188. struct Vmxnet3_PMConf *pmConf;
  2189. if (!netif_running(netdev))
  2190. return 0;
  2191. /* Destroy wake-up filters. */
  2192. pmConf = adapter->pm_conf;
  2193. memset(pmConf, 0, sizeof(*pmConf));
  2194. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2195. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2196. *pmConf));
  2197. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
  2198. pmConf));
  2199. netif_device_attach(netdev);
  2200. pci_set_power_state(pdev, PCI_D0);
  2201. pci_restore_state(pdev);
  2202. err = pci_enable_device_mem(pdev);
  2203. if (err != 0)
  2204. return err;
  2205. pci_enable_wake(pdev, PCI_D0, 0);
  2206. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2207. VMXNET3_CMD_UPDATE_PMCFG);
  2208. vmxnet3_alloc_intr_resources(adapter);
  2209. vmxnet3_request_irqs(adapter);
  2210. vmxnet3_enable_all_intrs(adapter);
  2211. return 0;
  2212. }
  2213. static const struct dev_pm_ops vmxnet3_pm_ops = {
  2214. .suspend = vmxnet3_suspend,
  2215. .resume = vmxnet3_resume,
  2216. };
  2217. #endif
  2218. static struct pci_driver vmxnet3_driver = {
  2219. .name = vmxnet3_driver_name,
  2220. .id_table = vmxnet3_pciid_table,
  2221. .probe = vmxnet3_probe_device,
  2222. .remove = __devexit_p(vmxnet3_remove_device),
  2223. #ifdef CONFIG_PM
  2224. .driver.pm = &vmxnet3_pm_ops,
  2225. #endif
  2226. };
  2227. static int __init
  2228. vmxnet3_init_module(void)
  2229. {
  2230. printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
  2231. VMXNET3_DRIVER_VERSION_REPORT);
  2232. return pci_register_driver(&vmxnet3_driver);
  2233. }
  2234. module_init(vmxnet3_init_module);
  2235. static void
  2236. vmxnet3_exit_module(void)
  2237. {
  2238. pci_unregister_driver(&vmxnet3_driver);
  2239. }
  2240. module_exit(vmxnet3_exit_module);
  2241. MODULE_AUTHOR("VMware, Inc.");
  2242. MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
  2243. MODULE_LICENSE("GPL v2");
  2244. MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);