vmxnet3_drv.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726
  1. /*
  2. * Linux driver for VMware's vmxnet3 ethernet NIC.
  3. *
  4. * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * The full GNU General Public License is included in this distribution in
  21. * the file called "COPYING".
  22. *
  23. * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
  24. *
  25. */
  26. #include <net/ip6_checksum.h>
  27. #include "vmxnet3_int.h"
  28. char vmxnet3_driver_name[] = "vmxnet3";
  29. #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  30. /*
  31. * PCI Device ID Table
  32. * Last entry must be all 0s
  33. */
  34. static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
  35. {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  36. {0}
  37. };
  38. MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  39. static atomic_t devices_found;
  40. /*
  41. * Enable/Disable the given intr
  42. */
  43. static void
  44. vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  45. {
  46. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  47. }
  48. static void
  49. vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  50. {
  51. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  52. }
  53. /*
  54. * Enable/Disable all intrs used by the device
  55. */
  56. static void
  57. vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  58. {
  59. int i;
  60. for (i = 0; i < adapter->intr.num_intrs; i++)
  61. vmxnet3_enable_intr(adapter, i);
  62. }
  63. static void
  64. vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  65. {
  66. int i;
  67. for (i = 0; i < adapter->intr.num_intrs; i++)
  68. vmxnet3_disable_intr(adapter, i);
  69. }
  70. static void
  71. vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
  72. {
  73. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
  74. }
  75. static bool
  76. vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  77. {
  78. return netif_queue_stopped(adapter->netdev);
  79. }
  80. static void
  81. vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  82. {
  83. tq->stopped = false;
  84. netif_start_queue(adapter->netdev);
  85. }
  86. static void
  87. vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  88. {
  89. tq->stopped = false;
  90. netif_wake_queue(adapter->netdev);
  91. }
  92. static void
  93. vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  94. {
  95. tq->stopped = true;
  96. tq->num_stop++;
  97. netif_stop_queue(adapter->netdev);
  98. }
  99. /*
  100. * Check the link state. This may start or stop the tx queue.
  101. */
  102. static void
  103. vmxnet3_check_link(struct vmxnet3_adapter *adapter)
  104. {
  105. u32 ret;
  106. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
  107. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  108. adapter->link_speed = ret >> 16;
  109. if (ret & 1) { /* Link is up. */
  110. printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
  111. adapter->netdev->name, adapter->link_speed);
  112. if (!netif_carrier_ok(adapter->netdev))
  113. netif_carrier_on(adapter->netdev);
  114. vmxnet3_tq_start(&adapter->tx_queue, adapter);
  115. } else {
  116. printk(KERN_INFO "%s: NIC Link is Down\n",
  117. adapter->netdev->name);
  118. if (netif_carrier_ok(adapter->netdev))
  119. netif_carrier_off(adapter->netdev);
  120. vmxnet3_tq_stop(&adapter->tx_queue, adapter);
  121. }
  122. }
  123. static void
  124. vmxnet3_process_events(struct vmxnet3_adapter *adapter)
  125. {
  126. u32 events = le32_to_cpu(adapter->shared->ecr);
  127. if (!events)
  128. return;
  129. vmxnet3_ack_events(adapter, events);
  130. /* Check if link state has changed */
  131. if (events & VMXNET3_ECR_LINK)
  132. vmxnet3_check_link(adapter);
  133. /* Check if there is an error on xmit/recv queues */
  134. if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
  135. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  136. VMXNET3_CMD_GET_QUEUE_STATUS);
  137. if (adapter->tqd_start->status.stopped) {
  138. printk(KERN_ERR "%s: tq error 0x%x\n",
  139. adapter->netdev->name,
  140. le32_to_cpu(adapter->tqd_start->status.error));
  141. }
  142. if (adapter->rqd_start->status.stopped) {
  143. printk(KERN_ERR "%s: rq error 0x%x\n",
  144. adapter->netdev->name,
  145. adapter->rqd_start->status.error);
  146. }
  147. schedule_work(&adapter->work);
  148. }
  149. }
  150. #ifdef __BIG_ENDIAN_BITFIELD
  151. /*
  152. * The device expects the bitfields in shared structures to be written in
  153. * little endian. When CPU is big endian, the following routines are used to
  154. * correctly read and write into ABI.
  155. * The general technique used here is : double word bitfields are defined in
  156. * opposite order for big endian architecture. Then before reading them in
  157. * driver the complete double word is translated using le32_to_cpu. Similarly
  158. * After the driver writes into bitfields, cpu_to_le32 is used to translate the
  159. * double words into required format.
  160. * In order to avoid touching bits in shared structure more than once, temporary
  161. * descriptors are used. These are passed as srcDesc to following functions.
  162. */
  163. static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
  164. struct Vmxnet3_RxDesc *dstDesc)
  165. {
  166. u32 *src = (u32 *)srcDesc + 2;
  167. u32 *dst = (u32 *)dstDesc + 2;
  168. dstDesc->addr = le64_to_cpu(srcDesc->addr);
  169. *dst = le32_to_cpu(*src);
  170. dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
  171. }
  172. static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
  173. struct Vmxnet3_TxDesc *dstDesc)
  174. {
  175. int i;
  176. u32 *src = (u32 *)(srcDesc + 1);
  177. u32 *dst = (u32 *)(dstDesc + 1);
  178. /* Working backwards so that the gen bit is set at the end. */
  179. for (i = 2; i > 0; i--) {
  180. src--;
  181. dst--;
  182. *dst = cpu_to_le32(*src);
  183. }
  184. }
  185. static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
  186. struct Vmxnet3_RxCompDesc *dstDesc)
  187. {
  188. int i = 0;
  189. u32 *src = (u32 *)srcDesc;
  190. u32 *dst = (u32 *)dstDesc;
  191. for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
  192. *dst = le32_to_cpu(*src);
  193. src++;
  194. dst++;
  195. }
  196. }
  197. /* Used to read bitfield values from double words. */
  198. static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
  199. {
  200. u32 temp = le32_to_cpu(*bitfield);
  201. u32 mask = ((1 << size) - 1) << pos;
  202. temp &= mask;
  203. temp >>= pos;
  204. return temp;
  205. }
  206. #endif /* __BIG_ENDIAN_BITFIELD */
  207. #ifdef __BIG_ENDIAN_BITFIELD
  208. # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
  209. txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
  210. VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
  211. # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
  212. txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
  213. VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
  214. # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
  215. VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
  216. VMXNET3_TCD_GEN_SIZE)
  217. # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
  218. VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
  219. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
  220. (dstrcd) = (tmp); \
  221. vmxnet3_RxCompToCPU((rcd), (tmp)); \
  222. } while (0)
  223. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
  224. (dstrxd) = (tmp); \
  225. vmxnet3_RxDescToCPU((rxd), (tmp)); \
  226. } while (0)
  227. #else
  228. # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
  229. # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
  230. # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
  231. # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
  232. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
  233. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
  234. #endif /* __BIG_ENDIAN_BITFIELD */
  235. static void
  236. vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
  237. struct pci_dev *pdev)
  238. {
  239. if (tbi->map_type == VMXNET3_MAP_SINGLE)
  240. pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
  241. PCI_DMA_TODEVICE);
  242. else if (tbi->map_type == VMXNET3_MAP_PAGE)
  243. pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
  244. PCI_DMA_TODEVICE);
  245. else
  246. BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
  247. tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
  248. }
  249. static int
  250. vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
  251. struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
  252. {
  253. struct sk_buff *skb;
  254. int entries = 0;
  255. /* no out of order completion */
  256. BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
  257. BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
  258. skb = tq->buf_info[eop_idx].skb;
  259. BUG_ON(skb == NULL);
  260. tq->buf_info[eop_idx].skb = NULL;
  261. VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
  262. while (tq->tx_ring.next2comp != eop_idx) {
  263. vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
  264. pdev);
  265. /* update next2comp w/o tx_lock. Since we are marking more,
  266. * instead of less, tx ring entries avail, the worst case is
  267. * that the tx routine incorrectly re-queues a pkt due to
  268. * insufficient tx ring entries.
  269. */
  270. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  271. entries++;
  272. }
  273. dev_kfree_skb_any(skb);
  274. return entries;
  275. }
  276. static int
  277. vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
  278. struct vmxnet3_adapter *adapter)
  279. {
  280. int completed = 0;
  281. union Vmxnet3_GenericDesc *gdesc;
  282. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  283. while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
  284. completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
  285. &gdesc->tcd), tq, adapter->pdev,
  286. adapter);
  287. vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
  288. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  289. }
  290. if (completed) {
  291. spin_lock(&tq->tx_lock);
  292. if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
  293. vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
  294. VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
  295. netif_carrier_ok(adapter->netdev))) {
  296. vmxnet3_tq_wake(tq, adapter);
  297. }
  298. spin_unlock(&tq->tx_lock);
  299. }
  300. return completed;
  301. }
  302. static void
  303. vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
  304. struct vmxnet3_adapter *adapter)
  305. {
  306. int i;
  307. while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
  308. struct vmxnet3_tx_buf_info *tbi;
  309. union Vmxnet3_GenericDesc *gdesc;
  310. tbi = tq->buf_info + tq->tx_ring.next2comp;
  311. gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
  312. vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
  313. if (tbi->skb) {
  314. dev_kfree_skb_any(tbi->skb);
  315. tbi->skb = NULL;
  316. }
  317. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  318. }
  319. /* sanity check, verify all buffers are indeed unmapped and freed */
  320. for (i = 0; i < tq->tx_ring.size; i++) {
  321. BUG_ON(tq->buf_info[i].skb != NULL ||
  322. tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
  323. }
  324. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  325. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  326. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  327. tq->comp_ring.next2proc = 0;
  328. }
  329. void
  330. vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
  331. struct vmxnet3_adapter *adapter)
  332. {
  333. if (tq->tx_ring.base) {
  334. pci_free_consistent(adapter->pdev, tq->tx_ring.size *
  335. sizeof(struct Vmxnet3_TxDesc),
  336. tq->tx_ring.base, tq->tx_ring.basePA);
  337. tq->tx_ring.base = NULL;
  338. }
  339. if (tq->data_ring.base) {
  340. pci_free_consistent(adapter->pdev, tq->data_ring.size *
  341. sizeof(struct Vmxnet3_TxDataDesc),
  342. tq->data_ring.base, tq->data_ring.basePA);
  343. tq->data_ring.base = NULL;
  344. }
  345. if (tq->comp_ring.base) {
  346. pci_free_consistent(adapter->pdev, tq->comp_ring.size *
  347. sizeof(struct Vmxnet3_TxCompDesc),
  348. tq->comp_ring.base, tq->comp_ring.basePA);
  349. tq->comp_ring.base = NULL;
  350. }
  351. kfree(tq->buf_info);
  352. tq->buf_info = NULL;
  353. }
  354. static void
  355. vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
  356. struct vmxnet3_adapter *adapter)
  357. {
  358. int i;
  359. /* reset the tx ring contents to 0 and reset the tx ring states */
  360. memset(tq->tx_ring.base, 0, tq->tx_ring.size *
  361. sizeof(struct Vmxnet3_TxDesc));
  362. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  363. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  364. memset(tq->data_ring.base, 0, tq->data_ring.size *
  365. sizeof(struct Vmxnet3_TxDataDesc));
  366. /* reset the tx comp ring contents to 0 and reset comp ring states */
  367. memset(tq->comp_ring.base, 0, tq->comp_ring.size *
  368. sizeof(struct Vmxnet3_TxCompDesc));
  369. tq->comp_ring.next2proc = 0;
  370. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  371. /* reset the bookkeeping data */
  372. memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
  373. for (i = 0; i < tq->tx_ring.size; i++)
  374. tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
  375. /* stats are not reset */
  376. }
  377. static int
  378. vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
  379. struct vmxnet3_adapter *adapter)
  380. {
  381. BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
  382. tq->comp_ring.base || tq->buf_info);
  383. tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
  384. * sizeof(struct Vmxnet3_TxDesc),
  385. &tq->tx_ring.basePA);
  386. if (!tq->tx_ring.base) {
  387. printk(KERN_ERR "%s: failed to allocate tx ring\n",
  388. adapter->netdev->name);
  389. goto err;
  390. }
  391. tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
  392. tq->data_ring.size *
  393. sizeof(struct Vmxnet3_TxDataDesc),
  394. &tq->data_ring.basePA);
  395. if (!tq->data_ring.base) {
  396. printk(KERN_ERR "%s: failed to allocate data ring\n",
  397. adapter->netdev->name);
  398. goto err;
  399. }
  400. tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
  401. tq->comp_ring.size *
  402. sizeof(struct Vmxnet3_TxCompDesc),
  403. &tq->comp_ring.basePA);
  404. if (!tq->comp_ring.base) {
  405. printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
  406. adapter->netdev->name);
  407. goto err;
  408. }
  409. tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
  410. GFP_KERNEL);
  411. if (!tq->buf_info) {
  412. printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
  413. adapter->netdev->name);
  414. goto err;
  415. }
  416. return 0;
  417. err:
  418. vmxnet3_tq_destroy(tq, adapter);
  419. return -ENOMEM;
  420. }
  421. /*
  422. * starting from ring->next2fill, allocate rx buffers for the given ring
  423. * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
  424. * are allocated or allocation fails
  425. */
  426. static int
  427. vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
  428. int num_to_alloc, struct vmxnet3_adapter *adapter)
  429. {
  430. int num_allocated = 0;
  431. struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
  432. struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
  433. u32 val;
  434. while (num_allocated < num_to_alloc) {
  435. struct vmxnet3_rx_buf_info *rbi;
  436. union Vmxnet3_GenericDesc *gd;
  437. rbi = rbi_base + ring->next2fill;
  438. gd = ring->base + ring->next2fill;
  439. if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
  440. if (rbi->skb == NULL) {
  441. rbi->skb = dev_alloc_skb(rbi->len +
  442. NET_IP_ALIGN);
  443. if (unlikely(rbi->skb == NULL)) {
  444. rq->stats.rx_buf_alloc_failure++;
  445. break;
  446. }
  447. rbi->skb->dev = adapter->netdev;
  448. skb_reserve(rbi->skb, NET_IP_ALIGN);
  449. rbi->dma_addr = pci_map_single(adapter->pdev,
  450. rbi->skb->data, rbi->len,
  451. PCI_DMA_FROMDEVICE);
  452. } else {
  453. /* rx buffer skipped by the device */
  454. }
  455. val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
  456. } else {
  457. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
  458. rbi->len != PAGE_SIZE);
  459. if (rbi->page == NULL) {
  460. rbi->page = alloc_page(GFP_ATOMIC);
  461. if (unlikely(rbi->page == NULL)) {
  462. rq->stats.rx_buf_alloc_failure++;
  463. break;
  464. }
  465. rbi->dma_addr = pci_map_page(adapter->pdev,
  466. rbi->page, 0, PAGE_SIZE,
  467. PCI_DMA_FROMDEVICE);
  468. } else {
  469. /* rx buffers skipped by the device */
  470. }
  471. val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
  472. }
  473. BUG_ON(rbi->dma_addr == 0);
  474. gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
  475. gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
  476. | val | rbi->len);
  477. num_allocated++;
  478. vmxnet3_cmd_ring_adv_next2fill(ring);
  479. }
  480. rq->uncommitted[ring_idx] += num_allocated;
  481. dev_dbg(&adapter->netdev->dev,
  482. "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
  483. "%u, uncommited %u\n", num_allocated, ring->next2fill,
  484. ring->next2comp, rq->uncommitted[ring_idx]);
  485. /* so that the device can distinguish a full ring and an empty ring */
  486. BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
  487. return num_allocated;
  488. }
  489. static void
  490. vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
  491. struct vmxnet3_rx_buf_info *rbi)
  492. {
  493. struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
  494. skb_shinfo(skb)->nr_frags;
  495. BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
  496. frag->page = rbi->page;
  497. frag->page_offset = 0;
  498. frag->size = rcd->len;
  499. skb->data_len += frag->size;
  500. skb_shinfo(skb)->nr_frags++;
  501. }
  502. static void
  503. vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
  504. struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
  505. struct vmxnet3_adapter *adapter)
  506. {
  507. u32 dw2, len;
  508. unsigned long buf_offset;
  509. int i;
  510. union Vmxnet3_GenericDesc *gdesc;
  511. struct vmxnet3_tx_buf_info *tbi = NULL;
  512. BUG_ON(ctx->copy_size > skb_headlen(skb));
  513. /* use the previous gen bit for the SOP desc */
  514. dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
  515. ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
  516. gdesc = ctx->sop_txd; /* both loops below can be skipped */
  517. /* no need to map the buffer if headers are copied */
  518. if (ctx->copy_size) {
  519. ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
  520. tq->tx_ring.next2fill *
  521. sizeof(struct Vmxnet3_TxDataDesc));
  522. ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
  523. ctx->sop_txd->dword[3] = 0;
  524. tbi = tq->buf_info + tq->tx_ring.next2fill;
  525. tbi->map_type = VMXNET3_MAP_NONE;
  526. dev_dbg(&adapter->netdev->dev,
  527. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  528. tq->tx_ring.next2fill,
  529. le64_to_cpu(ctx->sop_txd->txd.addr),
  530. ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
  531. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  532. /* use the right gen for non-SOP desc */
  533. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  534. }
  535. /* linear part can use multiple tx desc if it's big */
  536. len = skb_headlen(skb) - ctx->copy_size;
  537. buf_offset = ctx->copy_size;
  538. while (len) {
  539. u32 buf_size;
  540. buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
  541. VMXNET3_MAX_TX_BUF_SIZE : len;
  542. tbi = tq->buf_info + tq->tx_ring.next2fill;
  543. tbi->map_type = VMXNET3_MAP_SINGLE;
  544. tbi->dma_addr = pci_map_single(adapter->pdev,
  545. skb->data + buf_offset, buf_size,
  546. PCI_DMA_TODEVICE);
  547. tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
  548. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  549. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  550. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  551. gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
  552. gdesc->dword[3] = 0;
  553. dev_dbg(&adapter->netdev->dev,
  554. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  555. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  556. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  557. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  558. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  559. len -= buf_size;
  560. buf_offset += buf_size;
  561. }
  562. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  563. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  564. tbi = tq->buf_info + tq->tx_ring.next2fill;
  565. tbi->map_type = VMXNET3_MAP_PAGE;
  566. tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
  567. frag->page_offset, frag->size,
  568. PCI_DMA_TODEVICE);
  569. tbi->len = frag->size;
  570. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  571. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  572. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  573. gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
  574. gdesc->dword[3] = 0;
  575. dev_dbg(&adapter->netdev->dev,
  576. "txd[%u]: 0x%llu %u %u\n",
  577. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  578. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  579. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  580. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  581. }
  582. ctx->eop_txd = gdesc;
  583. /* set the last buf_info for the pkt */
  584. tbi->skb = skb;
  585. tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
  586. }
  587. /*
  588. * parse and copy relevant protocol headers:
  589. * For a tso pkt, relevant headers are L2/3/4 including options
  590. * For a pkt requesting csum offloading, they are L2/3 and may include L4
  591. * if it's a TCP/UDP pkt
  592. *
  593. * Returns:
  594. * -1: error happens during parsing
  595. * 0: protocol headers parsed, but too big to be copied
  596. * 1: protocol headers parsed and copied
  597. *
  598. * Other effects:
  599. * 1. related *ctx fields are updated.
  600. * 2. ctx->copy_size is # of bytes copied
  601. * 3. the portion copied is guaranteed to be in the linear part
  602. *
  603. */
  604. static int
  605. vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  606. struct vmxnet3_tx_ctx *ctx,
  607. struct vmxnet3_adapter *adapter)
  608. {
  609. struct Vmxnet3_TxDataDesc *tdd;
  610. if (ctx->mss) {
  611. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  612. ctx->l4_hdr_size = ((struct tcphdr *)
  613. skb_transport_header(skb))->doff * 4;
  614. ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
  615. } else {
  616. unsigned int pull_size;
  617. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  618. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  619. if (ctx->ipv4) {
  620. struct iphdr *iph = (struct iphdr *)
  621. skb_network_header(skb);
  622. if (iph->protocol == IPPROTO_TCP) {
  623. pull_size = ctx->eth_ip_hdr_size +
  624. sizeof(struct tcphdr);
  625. if (unlikely(!pskb_may_pull(skb,
  626. pull_size))) {
  627. goto err;
  628. }
  629. ctx->l4_hdr_size = ((struct tcphdr *)
  630. skb_transport_header(skb))->doff * 4;
  631. } else if (iph->protocol == IPPROTO_UDP) {
  632. ctx->l4_hdr_size =
  633. sizeof(struct udphdr);
  634. } else {
  635. ctx->l4_hdr_size = 0;
  636. }
  637. } else {
  638. /* for simplicity, don't copy L4 headers */
  639. ctx->l4_hdr_size = 0;
  640. }
  641. ctx->copy_size = ctx->eth_ip_hdr_size +
  642. ctx->l4_hdr_size;
  643. } else {
  644. ctx->eth_ip_hdr_size = 0;
  645. ctx->l4_hdr_size = 0;
  646. /* copy as much as allowed */
  647. ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
  648. , skb_headlen(skb));
  649. }
  650. /* make sure headers are accessible directly */
  651. if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
  652. goto err;
  653. }
  654. if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
  655. tq->stats.oversized_hdr++;
  656. ctx->copy_size = 0;
  657. return 0;
  658. }
  659. tdd = tq->data_ring.base + tq->tx_ring.next2fill;
  660. memcpy(tdd->data, skb->data, ctx->copy_size);
  661. dev_dbg(&adapter->netdev->dev,
  662. "copy %u bytes to dataRing[%u]\n",
  663. ctx->copy_size, tq->tx_ring.next2fill);
  664. return 1;
  665. err:
  666. return -1;
  667. }
  668. static void
  669. vmxnet3_prepare_tso(struct sk_buff *skb,
  670. struct vmxnet3_tx_ctx *ctx)
  671. {
  672. struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
  673. if (ctx->ipv4) {
  674. struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
  675. iph->check = 0;
  676. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
  677. IPPROTO_TCP, 0);
  678. } else {
  679. struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
  680. tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
  681. IPPROTO_TCP, 0);
  682. }
  683. }
  684. /*
  685. * Transmits a pkt thru a given tq
  686. * Returns:
  687. * NETDEV_TX_OK: descriptors are setup successfully
  688. * NETDEV_TX_OK: error occured, the pkt is dropped
  689. * NETDEV_TX_BUSY: tx ring is full, queue is stopped
  690. *
  691. * Side-effects:
  692. * 1. tx ring may be changed
  693. * 2. tq stats may be updated accordingly
  694. * 3. shared->txNumDeferred may be updated
  695. */
  696. static int
  697. vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  698. struct vmxnet3_adapter *adapter, struct net_device *netdev)
  699. {
  700. int ret;
  701. u32 count;
  702. unsigned long flags;
  703. struct vmxnet3_tx_ctx ctx;
  704. union Vmxnet3_GenericDesc *gdesc;
  705. #ifdef __BIG_ENDIAN_BITFIELD
  706. /* Use temporary descriptor to avoid touching bits multiple times */
  707. union Vmxnet3_GenericDesc tempTxDesc;
  708. #endif
  709. /* conservatively estimate # of descriptors to use */
  710. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
  711. skb_shinfo(skb)->nr_frags + 1;
  712. ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
  713. ctx.mss = skb_shinfo(skb)->gso_size;
  714. if (ctx.mss) {
  715. if (skb_header_cloned(skb)) {
  716. if (unlikely(pskb_expand_head(skb, 0, 0,
  717. GFP_ATOMIC) != 0)) {
  718. tq->stats.drop_tso++;
  719. goto drop_pkt;
  720. }
  721. tq->stats.copy_skb_header++;
  722. }
  723. vmxnet3_prepare_tso(skb, &ctx);
  724. } else {
  725. if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
  726. /* non-tso pkts must not use more than
  727. * VMXNET3_MAX_TXD_PER_PKT entries
  728. */
  729. if (skb_linearize(skb) != 0) {
  730. tq->stats.drop_too_many_frags++;
  731. goto drop_pkt;
  732. }
  733. tq->stats.linearized++;
  734. /* recalculate the # of descriptors to use */
  735. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
  736. }
  737. }
  738. ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
  739. if (ret >= 0) {
  740. BUG_ON(ret <= 0 && ctx.copy_size != 0);
  741. /* hdrs parsed, check against other limits */
  742. if (ctx.mss) {
  743. if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
  744. VMXNET3_MAX_TX_BUF_SIZE)) {
  745. goto hdr_too_big;
  746. }
  747. } else {
  748. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  749. if (unlikely(ctx.eth_ip_hdr_size +
  750. skb->csum_offset >
  751. VMXNET3_MAX_CSUM_OFFSET)) {
  752. goto hdr_too_big;
  753. }
  754. }
  755. }
  756. } else {
  757. tq->stats.drop_hdr_inspect_err++;
  758. goto drop_pkt;
  759. }
  760. spin_lock_irqsave(&tq->tx_lock, flags);
  761. if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
  762. tq->stats.tx_ring_full++;
  763. dev_dbg(&adapter->netdev->dev,
  764. "tx queue stopped on %s, next2comp %u"
  765. " next2fill %u\n", adapter->netdev->name,
  766. tq->tx_ring.next2comp, tq->tx_ring.next2fill);
  767. vmxnet3_tq_stop(tq, adapter);
  768. spin_unlock_irqrestore(&tq->tx_lock, flags);
  769. return NETDEV_TX_BUSY;
  770. }
  771. /* fill tx descs related to addr & len */
  772. vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
  773. /* setup the EOP desc */
  774. ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
  775. /* setup the SOP desc */
  776. #ifdef __BIG_ENDIAN_BITFIELD
  777. gdesc = &tempTxDesc;
  778. gdesc->dword[2] = ctx.sop_txd->dword[2];
  779. gdesc->dword[3] = ctx.sop_txd->dword[3];
  780. #else
  781. gdesc = ctx.sop_txd;
  782. #endif
  783. if (ctx.mss) {
  784. gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
  785. gdesc->txd.om = VMXNET3_OM_TSO;
  786. gdesc->txd.msscof = ctx.mss;
  787. le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
  788. gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
  789. } else {
  790. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  791. gdesc->txd.hlen = ctx.eth_ip_hdr_size;
  792. gdesc->txd.om = VMXNET3_OM_CSUM;
  793. gdesc->txd.msscof = ctx.eth_ip_hdr_size +
  794. skb->csum_offset;
  795. } else {
  796. gdesc->txd.om = 0;
  797. gdesc->txd.msscof = 0;
  798. }
  799. le32_add_cpu(&tq->shared->txNumDeferred, 1);
  800. }
  801. if (vlan_tx_tag_present(skb)) {
  802. gdesc->txd.ti = 1;
  803. gdesc->txd.tci = vlan_tx_tag_get(skb);
  804. }
  805. /* finally flips the GEN bit of the SOP desc. */
  806. gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
  807. VMXNET3_TXD_GEN);
  808. #ifdef __BIG_ENDIAN_BITFIELD
  809. /* Finished updating in bitfields of Tx Desc, so write them in original
  810. * place.
  811. */
  812. vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
  813. (struct Vmxnet3_TxDesc *)ctx.sop_txd);
  814. gdesc = ctx.sop_txd;
  815. #endif
  816. dev_dbg(&adapter->netdev->dev,
  817. "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
  818. (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
  819. tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
  820. le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
  821. spin_unlock_irqrestore(&tq->tx_lock, flags);
  822. if (le32_to_cpu(tq->shared->txNumDeferred) >=
  823. le32_to_cpu(tq->shared->txThreshold)) {
  824. tq->shared->txNumDeferred = 0;
  825. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
  826. tq->tx_ring.next2fill);
  827. }
  828. return NETDEV_TX_OK;
  829. hdr_too_big:
  830. tq->stats.drop_oversized_hdr++;
  831. drop_pkt:
  832. tq->stats.drop_total++;
  833. dev_kfree_skb(skb);
  834. return NETDEV_TX_OK;
  835. }
  836. static netdev_tx_t
  837. vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  838. {
  839. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  840. return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
  841. }
  842. static void
  843. vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
  844. struct sk_buff *skb,
  845. union Vmxnet3_GenericDesc *gdesc)
  846. {
  847. if (!gdesc->rcd.cnc && adapter->rxcsum) {
  848. /* typical case: TCP/UDP over IP and both csums are correct */
  849. if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
  850. VMXNET3_RCD_CSUM_OK) {
  851. skb->ip_summed = CHECKSUM_UNNECESSARY;
  852. BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
  853. BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
  854. BUG_ON(gdesc->rcd.frg);
  855. } else {
  856. if (gdesc->rcd.csum) {
  857. skb->csum = htons(gdesc->rcd.csum);
  858. skb->ip_summed = CHECKSUM_PARTIAL;
  859. } else {
  860. skb->ip_summed = CHECKSUM_NONE;
  861. }
  862. }
  863. } else {
  864. skb->ip_summed = CHECKSUM_NONE;
  865. }
  866. }
  867. static void
  868. vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
  869. struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
  870. {
  871. rq->stats.drop_err++;
  872. if (!rcd->fcs)
  873. rq->stats.drop_fcs++;
  874. rq->stats.drop_total++;
  875. /*
  876. * We do not unmap and chain the rx buffer to the skb.
  877. * We basically pretend this buffer is not used and will be recycled
  878. * by vmxnet3_rq_alloc_rx_buf()
  879. */
  880. /*
  881. * ctx->skb may be NULL if this is the first and the only one
  882. * desc for the pkt
  883. */
  884. if (ctx->skb)
  885. dev_kfree_skb_irq(ctx->skb);
  886. ctx->skb = NULL;
  887. }
  888. static int
  889. vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
  890. struct vmxnet3_adapter *adapter, int quota)
  891. {
  892. static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
  893. u32 num_rxd = 0;
  894. struct Vmxnet3_RxCompDesc *rcd;
  895. struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
  896. #ifdef __BIG_ENDIAN_BITFIELD
  897. struct Vmxnet3_RxDesc rxCmdDesc;
  898. struct Vmxnet3_RxCompDesc rxComp;
  899. #endif
  900. vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
  901. &rxComp);
  902. while (rcd->gen == rq->comp_ring.gen) {
  903. struct vmxnet3_rx_buf_info *rbi;
  904. struct sk_buff *skb;
  905. int num_to_alloc;
  906. struct Vmxnet3_RxDesc *rxd;
  907. u32 idx, ring_idx;
  908. if (num_rxd >= quota) {
  909. /* we may stop even before we see the EOP desc of
  910. * the current pkt
  911. */
  912. break;
  913. }
  914. num_rxd++;
  915. idx = rcd->rxdIdx;
  916. ring_idx = rcd->rqID == rq->qid ? 0 : 1;
  917. vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
  918. &rxCmdDesc);
  919. rbi = rq->buf_info[ring_idx] + idx;
  920. BUG_ON(rxd->addr != rbi->dma_addr ||
  921. rxd->len != rbi->len);
  922. if (unlikely(rcd->eop && rcd->err)) {
  923. vmxnet3_rx_error(rq, rcd, ctx, adapter);
  924. goto rcd_done;
  925. }
  926. if (rcd->sop) { /* first buf of the pkt */
  927. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
  928. rcd->rqID != rq->qid);
  929. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
  930. BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
  931. if (unlikely(rcd->len == 0)) {
  932. /* Pretend the rx buffer is skipped. */
  933. BUG_ON(!(rcd->sop && rcd->eop));
  934. dev_dbg(&adapter->netdev->dev,
  935. "rxRing[%u][%u] 0 length\n",
  936. ring_idx, idx);
  937. goto rcd_done;
  938. }
  939. ctx->skb = rbi->skb;
  940. rbi->skb = NULL;
  941. pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
  942. PCI_DMA_FROMDEVICE);
  943. skb_put(ctx->skb, rcd->len);
  944. } else {
  945. BUG_ON(ctx->skb == NULL);
  946. /* non SOP buffer must be type 1 in most cases */
  947. if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
  948. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
  949. if (rcd->len) {
  950. pci_unmap_page(adapter->pdev,
  951. rbi->dma_addr, rbi->len,
  952. PCI_DMA_FROMDEVICE);
  953. vmxnet3_append_frag(ctx->skb, rcd, rbi);
  954. rbi->page = NULL;
  955. }
  956. } else {
  957. /*
  958. * The only time a non-SOP buffer is type 0 is
  959. * when it's EOP and error flag is raised, which
  960. * has already been handled.
  961. */
  962. BUG_ON(true);
  963. }
  964. }
  965. skb = ctx->skb;
  966. if (rcd->eop) {
  967. skb->len += skb->data_len;
  968. skb->truesize += skb->data_len;
  969. vmxnet3_rx_csum(adapter, skb,
  970. (union Vmxnet3_GenericDesc *)rcd);
  971. skb->protocol = eth_type_trans(skb, adapter->netdev);
  972. if (unlikely(adapter->vlan_grp && rcd->ts)) {
  973. vlan_hwaccel_receive_skb(skb,
  974. adapter->vlan_grp, rcd->tci);
  975. } else {
  976. netif_receive_skb(skb);
  977. }
  978. ctx->skb = NULL;
  979. }
  980. rcd_done:
  981. /* device may skip some rx descs */
  982. rq->rx_ring[ring_idx].next2comp = idx;
  983. VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
  984. rq->rx_ring[ring_idx].size);
  985. /* refill rx buffers frequently to avoid starving the h/w */
  986. num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
  987. ring_idx);
  988. if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
  989. ring_idx, adapter))) {
  990. vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
  991. adapter);
  992. /* if needed, update the register */
  993. if (unlikely(rq->shared->updateRxProd)) {
  994. VMXNET3_WRITE_BAR0_REG(adapter,
  995. rxprod_reg[ring_idx] + rq->qid * 8,
  996. rq->rx_ring[ring_idx].next2fill);
  997. rq->uncommitted[ring_idx] = 0;
  998. }
  999. }
  1000. vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
  1001. vmxnet3_getRxComp(rcd,
  1002. &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
  1003. }
  1004. return num_rxd;
  1005. }
  1006. static void
  1007. vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
  1008. struct vmxnet3_adapter *adapter)
  1009. {
  1010. u32 i, ring_idx;
  1011. struct Vmxnet3_RxDesc *rxd;
  1012. for (ring_idx = 0; ring_idx < 2; ring_idx++) {
  1013. for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
  1014. #ifdef __BIG_ENDIAN_BITFIELD
  1015. struct Vmxnet3_RxDesc rxDesc;
  1016. #endif
  1017. vmxnet3_getRxDesc(rxd,
  1018. &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
  1019. if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
  1020. rq->buf_info[ring_idx][i].skb) {
  1021. pci_unmap_single(adapter->pdev, rxd->addr,
  1022. rxd->len, PCI_DMA_FROMDEVICE);
  1023. dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
  1024. rq->buf_info[ring_idx][i].skb = NULL;
  1025. } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
  1026. rq->buf_info[ring_idx][i].page) {
  1027. pci_unmap_page(adapter->pdev, rxd->addr,
  1028. rxd->len, PCI_DMA_FROMDEVICE);
  1029. put_page(rq->buf_info[ring_idx][i].page);
  1030. rq->buf_info[ring_idx][i].page = NULL;
  1031. }
  1032. }
  1033. rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
  1034. rq->rx_ring[ring_idx].next2fill =
  1035. rq->rx_ring[ring_idx].next2comp = 0;
  1036. rq->uncommitted[ring_idx] = 0;
  1037. }
  1038. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1039. rq->comp_ring.next2proc = 0;
  1040. }
  1041. void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
  1042. struct vmxnet3_adapter *adapter)
  1043. {
  1044. int i;
  1045. int j;
  1046. /* all rx buffers must have already been freed */
  1047. for (i = 0; i < 2; i++) {
  1048. if (rq->buf_info[i]) {
  1049. for (j = 0; j < rq->rx_ring[i].size; j++)
  1050. BUG_ON(rq->buf_info[i][j].page != NULL);
  1051. }
  1052. }
  1053. kfree(rq->buf_info[0]);
  1054. for (i = 0; i < 2; i++) {
  1055. if (rq->rx_ring[i].base) {
  1056. pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
  1057. * sizeof(struct Vmxnet3_RxDesc),
  1058. rq->rx_ring[i].base,
  1059. rq->rx_ring[i].basePA);
  1060. rq->rx_ring[i].base = NULL;
  1061. }
  1062. rq->buf_info[i] = NULL;
  1063. }
  1064. if (rq->comp_ring.base) {
  1065. pci_free_consistent(adapter->pdev, rq->comp_ring.size *
  1066. sizeof(struct Vmxnet3_RxCompDesc),
  1067. rq->comp_ring.base, rq->comp_ring.basePA);
  1068. rq->comp_ring.base = NULL;
  1069. }
  1070. }
  1071. static int
  1072. vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
  1073. struct vmxnet3_adapter *adapter)
  1074. {
  1075. int i;
  1076. /* initialize buf_info */
  1077. for (i = 0; i < rq->rx_ring[0].size; i++) {
  1078. /* 1st buf for a pkt is skbuff */
  1079. if (i % adapter->rx_buf_per_pkt == 0) {
  1080. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
  1081. rq->buf_info[0][i].len = adapter->skb_buf_size;
  1082. } else { /* subsequent bufs for a pkt is frag */
  1083. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1084. rq->buf_info[0][i].len = PAGE_SIZE;
  1085. }
  1086. }
  1087. for (i = 0; i < rq->rx_ring[1].size; i++) {
  1088. rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1089. rq->buf_info[1][i].len = PAGE_SIZE;
  1090. }
  1091. /* reset internal state and allocate buffers for both rings */
  1092. for (i = 0; i < 2; i++) {
  1093. rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
  1094. rq->uncommitted[i] = 0;
  1095. memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
  1096. sizeof(struct Vmxnet3_RxDesc));
  1097. rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
  1098. }
  1099. if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
  1100. adapter) == 0) {
  1101. /* at least has 1 rx buffer for the 1st ring */
  1102. return -ENOMEM;
  1103. }
  1104. vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
  1105. /* reset the comp ring */
  1106. rq->comp_ring.next2proc = 0;
  1107. memset(rq->comp_ring.base, 0, rq->comp_ring.size *
  1108. sizeof(struct Vmxnet3_RxCompDesc));
  1109. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1110. /* reset rxctx */
  1111. rq->rx_ctx.skb = NULL;
  1112. /* stats are not reset */
  1113. return 0;
  1114. }
  1115. static int
  1116. vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
  1117. {
  1118. int i;
  1119. size_t sz;
  1120. struct vmxnet3_rx_buf_info *bi;
  1121. for (i = 0; i < 2; i++) {
  1122. sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
  1123. rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
  1124. &rq->rx_ring[i].basePA);
  1125. if (!rq->rx_ring[i].base) {
  1126. printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
  1127. adapter->netdev->name, i);
  1128. goto err;
  1129. }
  1130. }
  1131. sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
  1132. rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
  1133. &rq->comp_ring.basePA);
  1134. if (!rq->comp_ring.base) {
  1135. printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
  1136. adapter->netdev->name);
  1137. goto err;
  1138. }
  1139. sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
  1140. rq->rx_ring[1].size);
  1141. bi = kmalloc(sz, GFP_KERNEL);
  1142. if (!bi) {
  1143. printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
  1144. adapter->netdev->name);
  1145. goto err;
  1146. }
  1147. memset(bi, 0, sz);
  1148. rq->buf_info[0] = bi;
  1149. rq->buf_info[1] = bi + rq->rx_ring[0].size;
  1150. return 0;
  1151. err:
  1152. vmxnet3_rq_destroy(rq, adapter);
  1153. return -ENOMEM;
  1154. }
  1155. static int
  1156. vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
  1157. {
  1158. if (unlikely(adapter->shared->ecr))
  1159. vmxnet3_process_events(adapter);
  1160. vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
  1161. return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
  1162. }
  1163. static int
  1164. vmxnet3_poll(struct napi_struct *napi, int budget)
  1165. {
  1166. struct vmxnet3_adapter *adapter = container_of(napi,
  1167. struct vmxnet3_adapter, napi);
  1168. int rxd_done;
  1169. rxd_done = vmxnet3_do_poll(adapter, budget);
  1170. if (rxd_done < budget) {
  1171. napi_complete(napi);
  1172. vmxnet3_enable_intr(adapter, 0);
  1173. }
  1174. return rxd_done;
  1175. }
  1176. /* Interrupt handler for vmxnet3 */
  1177. static irqreturn_t
  1178. vmxnet3_intr(int irq, void *dev_id)
  1179. {
  1180. struct net_device *dev = dev_id;
  1181. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1182. if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
  1183. u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
  1184. if (unlikely(icr == 0))
  1185. /* not ours */
  1186. return IRQ_NONE;
  1187. }
  1188. /* disable intr if needed */
  1189. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1190. vmxnet3_disable_intr(adapter, 0);
  1191. napi_schedule(&adapter->napi);
  1192. return IRQ_HANDLED;
  1193. }
  1194. #ifdef CONFIG_NET_POLL_CONTROLLER
  1195. /* netpoll callback. */
  1196. static void
  1197. vmxnet3_netpoll(struct net_device *netdev)
  1198. {
  1199. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1200. int irq;
  1201. #ifdef CONFIG_PCI_MSI
  1202. if (adapter->intr.type == VMXNET3_IT_MSIX)
  1203. irq = adapter->intr.msix_entries[0].vector;
  1204. else
  1205. #endif
  1206. irq = adapter->pdev->irq;
  1207. disable_irq(irq);
  1208. vmxnet3_intr(irq, netdev);
  1209. enable_irq(irq);
  1210. }
  1211. #endif
  1212. static int
  1213. vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
  1214. {
  1215. int err;
  1216. #ifdef CONFIG_PCI_MSI
  1217. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  1218. /* we only use 1 MSI-X vector */
  1219. err = request_irq(adapter->intr.msix_entries[0].vector,
  1220. vmxnet3_intr, 0, adapter->netdev->name,
  1221. adapter->netdev);
  1222. } else if (adapter->intr.type == VMXNET3_IT_MSI) {
  1223. err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
  1224. adapter->netdev->name, adapter->netdev);
  1225. } else
  1226. #endif
  1227. {
  1228. err = request_irq(adapter->pdev->irq, vmxnet3_intr,
  1229. IRQF_SHARED, adapter->netdev->name,
  1230. adapter->netdev);
  1231. }
  1232. if (err)
  1233. printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
  1234. ":%d\n", adapter->netdev->name, adapter->intr.type, err);
  1235. if (!err) {
  1236. int i;
  1237. /* init our intr settings */
  1238. for (i = 0; i < adapter->intr.num_intrs; i++)
  1239. adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
  1240. /* next setup intr index for all intr sources */
  1241. adapter->tx_queue.comp_ring.intr_idx = 0;
  1242. adapter->rx_queue.comp_ring.intr_idx = 0;
  1243. adapter->intr.event_intr_idx = 0;
  1244. printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
  1245. "allocated\n", adapter->netdev->name, adapter->intr.type,
  1246. adapter->intr.mask_mode, adapter->intr.num_intrs);
  1247. }
  1248. return err;
  1249. }
  1250. static void
  1251. vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
  1252. {
  1253. BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
  1254. adapter->intr.num_intrs <= 0);
  1255. switch (adapter->intr.type) {
  1256. #ifdef CONFIG_PCI_MSI
  1257. case VMXNET3_IT_MSIX:
  1258. {
  1259. int i;
  1260. for (i = 0; i < adapter->intr.num_intrs; i++)
  1261. free_irq(adapter->intr.msix_entries[i].vector,
  1262. adapter->netdev);
  1263. break;
  1264. }
  1265. #endif
  1266. case VMXNET3_IT_MSI:
  1267. free_irq(adapter->pdev->irq, adapter->netdev);
  1268. break;
  1269. case VMXNET3_IT_INTX:
  1270. free_irq(adapter->pdev->irq, adapter->netdev);
  1271. break;
  1272. default:
  1273. BUG_ON(true);
  1274. }
  1275. }
  1276. inline void set_flag_le16(__le16 *data, u16 flag)
  1277. {
  1278. *data = cpu_to_le16(le16_to_cpu(*data) | flag);
  1279. }
  1280. inline void set_flag_le64(__le64 *data, u64 flag)
  1281. {
  1282. *data = cpu_to_le64(le64_to_cpu(*data) | flag);
  1283. }
  1284. inline void reset_flag_le64(__le64 *data, u64 flag)
  1285. {
  1286. *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
  1287. }
  1288. static void
  1289. vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  1290. {
  1291. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1292. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1293. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1294. if (grp) {
  1295. /* add vlan rx stripping. */
  1296. if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
  1297. int i;
  1298. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1299. adapter->vlan_grp = grp;
  1300. /* update FEATURES to device */
  1301. set_flag_le64(&devRead->misc.uptFeatures,
  1302. UPT1_F_RXVLAN);
  1303. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1304. VMXNET3_CMD_UPDATE_FEATURE);
  1305. /*
  1306. * Clear entire vfTable; then enable untagged pkts.
  1307. * Note: setting one entry in vfTable to non-zero turns
  1308. * on VLAN rx filtering.
  1309. */
  1310. for (i = 0; i < VMXNET3_VFT_SIZE; i++)
  1311. vfTable[i] = 0;
  1312. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1313. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1314. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1315. } else {
  1316. printk(KERN_ERR "%s: vlan_rx_register when device has "
  1317. "no NETIF_F_HW_VLAN_RX\n", netdev->name);
  1318. }
  1319. } else {
  1320. /* remove vlan rx stripping. */
  1321. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1322. adapter->vlan_grp = NULL;
  1323. if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
  1324. int i;
  1325. for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
  1326. /* clear entire vfTable; this also disables
  1327. * VLAN rx filtering
  1328. */
  1329. vfTable[i] = 0;
  1330. }
  1331. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1332. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1333. /* update FEATURES to device */
  1334. reset_flag_le64(&devRead->misc.uptFeatures,
  1335. UPT1_F_RXVLAN);
  1336. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1337. VMXNET3_CMD_UPDATE_FEATURE);
  1338. }
  1339. }
  1340. }
  1341. static void
  1342. vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
  1343. {
  1344. if (adapter->vlan_grp) {
  1345. u16 vid;
  1346. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1347. bool activeVlan = false;
  1348. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  1349. if (vlan_group_get_device(adapter->vlan_grp, vid)) {
  1350. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1351. activeVlan = true;
  1352. }
  1353. }
  1354. if (activeVlan) {
  1355. /* continue to allow untagged pkts */
  1356. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1357. }
  1358. }
  1359. }
  1360. static void
  1361. vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  1362. {
  1363. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1364. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1365. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1366. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1367. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1368. }
  1369. static void
  1370. vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  1371. {
  1372. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1373. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1374. VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
  1375. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1376. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1377. }
  1378. static u8 *
  1379. vmxnet3_copy_mc(struct net_device *netdev)
  1380. {
  1381. u8 *buf = NULL;
  1382. u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
  1383. /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
  1384. if (sz <= 0xffff) {
  1385. /* We may be called with BH disabled */
  1386. buf = kmalloc(sz, GFP_ATOMIC);
  1387. if (buf) {
  1388. struct netdev_hw_addr *ha;
  1389. int i = 0;
  1390. netdev_for_each_mc_addr(ha, netdev)
  1391. memcpy(buf + i++ * ETH_ALEN, ha->addr,
  1392. ETH_ALEN);
  1393. }
  1394. }
  1395. return buf;
  1396. }
  1397. static void
  1398. vmxnet3_set_mc(struct net_device *netdev)
  1399. {
  1400. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1401. struct Vmxnet3_RxFilterConf *rxConf =
  1402. &adapter->shared->devRead.rxFilterConf;
  1403. u8 *new_table = NULL;
  1404. u32 new_mode = VMXNET3_RXM_UCAST;
  1405. if (netdev->flags & IFF_PROMISC)
  1406. new_mode |= VMXNET3_RXM_PROMISC;
  1407. if (netdev->flags & IFF_BROADCAST)
  1408. new_mode |= VMXNET3_RXM_BCAST;
  1409. if (netdev->flags & IFF_ALLMULTI)
  1410. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1411. else
  1412. if (!netdev_mc_empty(netdev)) {
  1413. new_table = vmxnet3_copy_mc(netdev);
  1414. if (new_table) {
  1415. new_mode |= VMXNET3_RXM_MCAST;
  1416. rxConf->mfTableLen = cpu_to_le16(
  1417. netdev_mc_count(netdev) * ETH_ALEN);
  1418. rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
  1419. new_table));
  1420. } else {
  1421. printk(KERN_INFO "%s: failed to copy mcast list"
  1422. ", setting ALL_MULTI\n", netdev->name);
  1423. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1424. }
  1425. }
  1426. if (!(new_mode & VMXNET3_RXM_MCAST)) {
  1427. rxConf->mfTableLen = 0;
  1428. rxConf->mfTablePA = 0;
  1429. }
  1430. if (new_mode != rxConf->rxMode) {
  1431. rxConf->rxMode = cpu_to_le32(new_mode);
  1432. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1433. VMXNET3_CMD_UPDATE_RX_MODE);
  1434. }
  1435. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1436. VMXNET3_CMD_UPDATE_MAC_FILTERS);
  1437. kfree(new_table);
  1438. }
  1439. /*
  1440. * Set up driver_shared based on settings in adapter.
  1441. */
  1442. static void
  1443. vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
  1444. {
  1445. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1446. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1447. struct Vmxnet3_TxQueueConf *tqc;
  1448. struct Vmxnet3_RxQueueConf *rqc;
  1449. int i;
  1450. memset(shared, 0, sizeof(*shared));
  1451. /* driver settings */
  1452. shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
  1453. devRead->misc.driverInfo.version = cpu_to_le32(
  1454. VMXNET3_DRIVER_VERSION_NUM);
  1455. devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
  1456. VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
  1457. devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
  1458. *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
  1459. *((u32 *)&devRead->misc.driverInfo.gos));
  1460. devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
  1461. devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
  1462. devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
  1463. devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
  1464. /* set up feature flags */
  1465. if (adapter->rxcsum)
  1466. set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
  1467. if (adapter->lro) {
  1468. set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
  1469. devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
  1470. }
  1471. if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
  1472. adapter->vlan_grp) {
  1473. set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
  1474. }
  1475. devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
  1476. devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
  1477. devRead->misc.queueDescLen = cpu_to_le32(
  1478. sizeof(struct Vmxnet3_TxQueueDesc) +
  1479. sizeof(struct Vmxnet3_RxQueueDesc));
  1480. /* tx queue settings */
  1481. BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
  1482. devRead->misc.numTxQueues = 1;
  1483. tqc = &adapter->tqd_start->conf;
  1484. tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
  1485. tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
  1486. tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
  1487. tqc->ddPA = cpu_to_le64(virt_to_phys(
  1488. adapter->tx_queue.buf_info));
  1489. tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
  1490. tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
  1491. tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
  1492. tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
  1493. tqc->txRingSize);
  1494. tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
  1495. /* rx queue settings */
  1496. devRead->misc.numRxQueues = 1;
  1497. rqc = &adapter->rqd_start->conf;
  1498. rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
  1499. rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
  1500. rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
  1501. rqc->ddPA = cpu_to_le64(virt_to_phys(
  1502. adapter->rx_queue.buf_info));
  1503. rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
  1504. rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
  1505. rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
  1506. rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
  1507. (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
  1508. rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
  1509. /* intr settings */
  1510. devRead->intrConf.autoMask = adapter->intr.mask_mode ==
  1511. VMXNET3_IMM_AUTO;
  1512. devRead->intrConf.numIntrs = adapter->intr.num_intrs;
  1513. for (i = 0; i < adapter->intr.num_intrs; i++)
  1514. devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
  1515. devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
  1516. /* rx filter settings */
  1517. devRead->rxFilterConf.rxMode = 0;
  1518. vmxnet3_restore_vlan(adapter);
  1519. /* the rest are already zeroed */
  1520. }
  1521. int
  1522. vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
  1523. {
  1524. int err;
  1525. u32 ret;
  1526. dev_dbg(&adapter->netdev->dev,
  1527. "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
  1528. " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
  1529. adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
  1530. adapter->rx_queue.rx_ring[0].size,
  1531. adapter->rx_queue.rx_ring[1].size);
  1532. vmxnet3_tq_init(&adapter->tx_queue, adapter);
  1533. err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
  1534. if (err) {
  1535. printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
  1536. adapter->netdev->name, err);
  1537. goto rq_err;
  1538. }
  1539. err = vmxnet3_request_irqs(adapter);
  1540. if (err) {
  1541. printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
  1542. adapter->netdev->name, err);
  1543. goto irq_err;
  1544. }
  1545. vmxnet3_setup_driver_shared(adapter);
  1546. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
  1547. adapter->shared_pa));
  1548. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
  1549. adapter->shared_pa));
  1550. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1551. VMXNET3_CMD_ACTIVATE_DEV);
  1552. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1553. if (ret != 0) {
  1554. printk(KERN_ERR "Failed to activate dev %s: error %u\n",
  1555. adapter->netdev->name, ret);
  1556. err = -EINVAL;
  1557. goto activate_err;
  1558. }
  1559. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
  1560. adapter->rx_queue.rx_ring[0].next2fill);
  1561. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
  1562. adapter->rx_queue.rx_ring[1].next2fill);
  1563. /* Apply the rx filter settins last. */
  1564. vmxnet3_set_mc(adapter->netdev);
  1565. /*
  1566. * Check link state when first activating device. It will start the
  1567. * tx queue if the link is up.
  1568. */
  1569. vmxnet3_check_link(adapter);
  1570. napi_enable(&adapter->napi);
  1571. vmxnet3_enable_all_intrs(adapter);
  1572. clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  1573. return 0;
  1574. activate_err:
  1575. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
  1576. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
  1577. vmxnet3_free_irqs(adapter);
  1578. irq_err:
  1579. rq_err:
  1580. /* free up buffers we allocated */
  1581. vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
  1582. return err;
  1583. }
  1584. void
  1585. vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
  1586. {
  1587. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
  1588. }
  1589. int
  1590. vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
  1591. {
  1592. if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
  1593. return 0;
  1594. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1595. VMXNET3_CMD_QUIESCE_DEV);
  1596. vmxnet3_disable_all_intrs(adapter);
  1597. napi_disable(&adapter->napi);
  1598. netif_tx_disable(adapter->netdev);
  1599. adapter->link_speed = 0;
  1600. netif_carrier_off(adapter->netdev);
  1601. vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
  1602. vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
  1603. vmxnet3_free_irqs(adapter);
  1604. return 0;
  1605. }
  1606. static void
  1607. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  1608. {
  1609. u32 tmp;
  1610. tmp = *(u32 *)mac;
  1611. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
  1612. tmp = (mac[5] << 8) | mac[4];
  1613. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
  1614. }
  1615. static int
  1616. vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
  1617. {
  1618. struct sockaddr *addr = p;
  1619. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1620. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1621. vmxnet3_write_mac_addr(adapter, addr->sa_data);
  1622. return 0;
  1623. }
  1624. /* ==================== initialization and cleanup routines ============ */
  1625. static int
  1626. vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
  1627. {
  1628. int err;
  1629. unsigned long mmio_start, mmio_len;
  1630. struct pci_dev *pdev = adapter->pdev;
  1631. err = pci_enable_device(pdev);
  1632. if (err) {
  1633. printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
  1634. pci_name(pdev), err);
  1635. return err;
  1636. }
  1637. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
  1638. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
  1639. printk(KERN_ERR "pci_set_consistent_dma_mask failed "
  1640. "for adapter %s\n", pci_name(pdev));
  1641. err = -EIO;
  1642. goto err_set_mask;
  1643. }
  1644. *dma64 = true;
  1645. } else {
  1646. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
  1647. printk(KERN_ERR "pci_set_dma_mask failed for adapter "
  1648. "%s\n", pci_name(pdev));
  1649. err = -EIO;
  1650. goto err_set_mask;
  1651. }
  1652. *dma64 = false;
  1653. }
  1654. err = pci_request_selected_regions(pdev, (1 << 2) - 1,
  1655. vmxnet3_driver_name);
  1656. if (err) {
  1657. printk(KERN_ERR "Failed to request region for adapter %s: "
  1658. "error %d\n", pci_name(pdev), err);
  1659. goto err_set_mask;
  1660. }
  1661. pci_set_master(pdev);
  1662. mmio_start = pci_resource_start(pdev, 0);
  1663. mmio_len = pci_resource_len(pdev, 0);
  1664. adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
  1665. if (!adapter->hw_addr0) {
  1666. printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
  1667. pci_name(pdev));
  1668. err = -EIO;
  1669. goto err_ioremap;
  1670. }
  1671. mmio_start = pci_resource_start(pdev, 1);
  1672. mmio_len = pci_resource_len(pdev, 1);
  1673. adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
  1674. if (!adapter->hw_addr1) {
  1675. printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
  1676. pci_name(pdev));
  1677. err = -EIO;
  1678. goto err_bar1;
  1679. }
  1680. return 0;
  1681. err_bar1:
  1682. iounmap(adapter->hw_addr0);
  1683. err_ioremap:
  1684. pci_release_selected_regions(pdev, (1 << 2) - 1);
  1685. err_set_mask:
  1686. pci_disable_device(pdev);
  1687. return err;
  1688. }
  1689. static void
  1690. vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
  1691. {
  1692. BUG_ON(!adapter->pdev);
  1693. iounmap(adapter->hw_addr0);
  1694. iounmap(adapter->hw_addr1);
  1695. pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
  1696. pci_disable_device(adapter->pdev);
  1697. }
  1698. static void
  1699. vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
  1700. {
  1701. size_t sz;
  1702. if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
  1703. VMXNET3_MAX_ETH_HDR_SIZE) {
  1704. adapter->skb_buf_size = adapter->netdev->mtu +
  1705. VMXNET3_MAX_ETH_HDR_SIZE;
  1706. if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
  1707. adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
  1708. adapter->rx_buf_per_pkt = 1;
  1709. } else {
  1710. adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
  1711. sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
  1712. VMXNET3_MAX_ETH_HDR_SIZE;
  1713. adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
  1714. }
  1715. /*
  1716. * for simplicity, force the ring0 size to be a multiple of
  1717. * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
  1718. */
  1719. sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
  1720. adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
  1721. sz - 1) / sz * sz;
  1722. adapter->rx_queue.rx_ring[0].size = min_t(u32,
  1723. adapter->rx_queue.rx_ring[0].size,
  1724. VMXNET3_RX_RING_MAX_SIZE / sz * sz);
  1725. }
  1726. int
  1727. vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
  1728. u32 rx_ring_size, u32 rx_ring2_size)
  1729. {
  1730. int err;
  1731. adapter->tx_queue.tx_ring.size = tx_ring_size;
  1732. adapter->tx_queue.data_ring.size = tx_ring_size;
  1733. adapter->tx_queue.comp_ring.size = tx_ring_size;
  1734. adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
  1735. adapter->tx_queue.stopped = true;
  1736. err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
  1737. if (err)
  1738. return err;
  1739. adapter->rx_queue.rx_ring[0].size = rx_ring_size;
  1740. adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
  1741. vmxnet3_adjust_rx_ring_size(adapter);
  1742. adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
  1743. adapter->rx_queue.rx_ring[1].size;
  1744. adapter->rx_queue.qid = 0;
  1745. adapter->rx_queue.qid2 = 1;
  1746. adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
  1747. err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
  1748. if (err)
  1749. vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
  1750. return err;
  1751. }
  1752. static int
  1753. vmxnet3_open(struct net_device *netdev)
  1754. {
  1755. struct vmxnet3_adapter *adapter;
  1756. int err;
  1757. adapter = netdev_priv(netdev);
  1758. spin_lock_init(&adapter->tx_queue.tx_lock);
  1759. err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
  1760. VMXNET3_DEF_RX_RING_SIZE,
  1761. VMXNET3_DEF_RX_RING_SIZE);
  1762. if (err)
  1763. goto queue_err;
  1764. err = vmxnet3_activate_dev(adapter);
  1765. if (err)
  1766. goto activate_err;
  1767. return 0;
  1768. activate_err:
  1769. vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
  1770. vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
  1771. queue_err:
  1772. return err;
  1773. }
  1774. static int
  1775. vmxnet3_close(struct net_device *netdev)
  1776. {
  1777. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1778. /*
  1779. * Reset_work may be in the middle of resetting the device, wait for its
  1780. * completion.
  1781. */
  1782. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  1783. msleep(1);
  1784. vmxnet3_quiesce_dev(adapter);
  1785. vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
  1786. vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
  1787. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  1788. return 0;
  1789. }
  1790. void
  1791. vmxnet3_force_close(struct vmxnet3_adapter *adapter)
  1792. {
  1793. /*
  1794. * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
  1795. * vmxnet3_close() will deadlock.
  1796. */
  1797. BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
  1798. /* we need to enable NAPI, otherwise dev_close will deadlock */
  1799. napi_enable(&adapter->napi);
  1800. dev_close(adapter->netdev);
  1801. }
  1802. static int
  1803. vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
  1804. {
  1805. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1806. int err = 0;
  1807. if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
  1808. return -EINVAL;
  1809. if (new_mtu > 1500 && !adapter->jumbo_frame)
  1810. return -EINVAL;
  1811. netdev->mtu = new_mtu;
  1812. /*
  1813. * Reset_work may be in the middle of resetting the device, wait for its
  1814. * completion.
  1815. */
  1816. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  1817. msleep(1);
  1818. if (netif_running(netdev)) {
  1819. vmxnet3_quiesce_dev(adapter);
  1820. vmxnet3_reset_dev(adapter);
  1821. /* we need to re-create the rx queue based on the new mtu */
  1822. vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
  1823. vmxnet3_adjust_rx_ring_size(adapter);
  1824. adapter->rx_queue.comp_ring.size =
  1825. adapter->rx_queue.rx_ring[0].size +
  1826. adapter->rx_queue.rx_ring[1].size;
  1827. err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
  1828. if (err) {
  1829. printk(KERN_ERR "%s: failed to re-create rx queue,"
  1830. " error %d. Closing it.\n", netdev->name, err);
  1831. goto out;
  1832. }
  1833. err = vmxnet3_activate_dev(adapter);
  1834. if (err) {
  1835. printk(KERN_ERR "%s: failed to re-activate, error %d. "
  1836. "Closing it\n", netdev->name, err);
  1837. goto out;
  1838. }
  1839. }
  1840. out:
  1841. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  1842. if (err)
  1843. vmxnet3_force_close(adapter);
  1844. return err;
  1845. }
  1846. static void
  1847. vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
  1848. {
  1849. struct net_device *netdev = adapter->netdev;
  1850. netdev->features = NETIF_F_SG |
  1851. NETIF_F_HW_CSUM |
  1852. NETIF_F_HW_VLAN_TX |
  1853. NETIF_F_HW_VLAN_RX |
  1854. NETIF_F_HW_VLAN_FILTER |
  1855. NETIF_F_TSO |
  1856. NETIF_F_TSO6 |
  1857. NETIF_F_LRO;
  1858. printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
  1859. adapter->rxcsum = true;
  1860. adapter->jumbo_frame = true;
  1861. adapter->lro = true;
  1862. if (dma64) {
  1863. netdev->features |= NETIF_F_HIGHDMA;
  1864. printk(" highDMA");
  1865. }
  1866. netdev->vlan_features = netdev->features;
  1867. printk("\n");
  1868. }
  1869. static void
  1870. vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  1871. {
  1872. u32 tmp;
  1873. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
  1874. *(u32 *)mac = tmp;
  1875. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
  1876. mac[4] = tmp & 0xff;
  1877. mac[5] = (tmp >> 8) & 0xff;
  1878. }
  1879. static void
  1880. vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
  1881. {
  1882. u32 cfg;
  1883. /* intr settings */
  1884. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1885. VMXNET3_CMD_GET_CONF_INTR);
  1886. cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1887. adapter->intr.type = cfg & 0x3;
  1888. adapter->intr.mask_mode = (cfg >> 2) & 0x3;
  1889. if (adapter->intr.type == VMXNET3_IT_AUTO) {
  1890. int err;
  1891. #ifdef CONFIG_PCI_MSI
  1892. adapter->intr.msix_entries[0].entry = 0;
  1893. err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
  1894. VMXNET3_LINUX_MAX_MSIX_VECT);
  1895. if (!err) {
  1896. adapter->intr.num_intrs = 1;
  1897. adapter->intr.type = VMXNET3_IT_MSIX;
  1898. return;
  1899. }
  1900. #endif
  1901. err = pci_enable_msi(adapter->pdev);
  1902. if (!err) {
  1903. adapter->intr.num_intrs = 1;
  1904. adapter->intr.type = VMXNET3_IT_MSI;
  1905. return;
  1906. }
  1907. }
  1908. adapter->intr.type = VMXNET3_IT_INTX;
  1909. /* INT-X related setting */
  1910. adapter->intr.num_intrs = 1;
  1911. }
  1912. static void
  1913. vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
  1914. {
  1915. if (adapter->intr.type == VMXNET3_IT_MSIX)
  1916. pci_disable_msix(adapter->pdev);
  1917. else if (adapter->intr.type == VMXNET3_IT_MSI)
  1918. pci_disable_msi(adapter->pdev);
  1919. else
  1920. BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
  1921. }
  1922. static void
  1923. vmxnet3_tx_timeout(struct net_device *netdev)
  1924. {
  1925. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1926. adapter->tx_timeout_count++;
  1927. printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
  1928. schedule_work(&adapter->work);
  1929. }
  1930. static void
  1931. vmxnet3_reset_work(struct work_struct *data)
  1932. {
  1933. struct vmxnet3_adapter *adapter;
  1934. adapter = container_of(data, struct vmxnet3_adapter, work);
  1935. /* if another thread is resetting the device, no need to proceed */
  1936. if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  1937. return;
  1938. /* if the device is closed, we must leave it alone */
  1939. if (netif_running(adapter->netdev)) {
  1940. printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
  1941. vmxnet3_quiesce_dev(adapter);
  1942. vmxnet3_reset_dev(adapter);
  1943. vmxnet3_activate_dev(adapter);
  1944. } else {
  1945. printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
  1946. }
  1947. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  1948. }
  1949. static int __devinit
  1950. vmxnet3_probe_device(struct pci_dev *pdev,
  1951. const struct pci_device_id *id)
  1952. {
  1953. static const struct net_device_ops vmxnet3_netdev_ops = {
  1954. .ndo_open = vmxnet3_open,
  1955. .ndo_stop = vmxnet3_close,
  1956. .ndo_start_xmit = vmxnet3_xmit_frame,
  1957. .ndo_set_mac_address = vmxnet3_set_mac_addr,
  1958. .ndo_change_mtu = vmxnet3_change_mtu,
  1959. .ndo_get_stats = vmxnet3_get_stats,
  1960. .ndo_tx_timeout = vmxnet3_tx_timeout,
  1961. .ndo_set_multicast_list = vmxnet3_set_mc,
  1962. .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
  1963. .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
  1964. .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
  1965. #ifdef CONFIG_NET_POLL_CONTROLLER
  1966. .ndo_poll_controller = vmxnet3_netpoll,
  1967. #endif
  1968. };
  1969. int err;
  1970. bool dma64 = false; /* stupid gcc */
  1971. u32 ver;
  1972. struct net_device *netdev;
  1973. struct vmxnet3_adapter *adapter;
  1974. u8 mac[ETH_ALEN];
  1975. netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
  1976. if (!netdev) {
  1977. printk(KERN_ERR "Failed to alloc ethernet device for adapter "
  1978. "%s\n", pci_name(pdev));
  1979. return -ENOMEM;
  1980. }
  1981. pci_set_drvdata(pdev, netdev);
  1982. adapter = netdev_priv(netdev);
  1983. adapter->netdev = netdev;
  1984. adapter->pdev = pdev;
  1985. adapter->shared = pci_alloc_consistent(adapter->pdev,
  1986. sizeof(struct Vmxnet3_DriverShared),
  1987. &adapter->shared_pa);
  1988. if (!adapter->shared) {
  1989. printk(KERN_ERR "Failed to allocate memory for %s\n",
  1990. pci_name(pdev));
  1991. err = -ENOMEM;
  1992. goto err_alloc_shared;
  1993. }
  1994. adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
  1995. sizeof(struct Vmxnet3_TxQueueDesc) +
  1996. sizeof(struct Vmxnet3_RxQueueDesc),
  1997. &adapter->queue_desc_pa);
  1998. if (!adapter->tqd_start) {
  1999. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2000. pci_name(pdev));
  2001. err = -ENOMEM;
  2002. goto err_alloc_queue_desc;
  2003. }
  2004. adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
  2005. + 1);
  2006. adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
  2007. if (adapter->pm_conf == NULL) {
  2008. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2009. pci_name(pdev));
  2010. err = -ENOMEM;
  2011. goto err_alloc_pm;
  2012. }
  2013. err = vmxnet3_alloc_pci_resources(adapter, &dma64);
  2014. if (err < 0)
  2015. goto err_alloc_pci;
  2016. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
  2017. if (ver & 1) {
  2018. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
  2019. } else {
  2020. printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
  2021. " %s\n", ver, pci_name(pdev));
  2022. err = -EBUSY;
  2023. goto err_ver;
  2024. }
  2025. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
  2026. if (ver & 1) {
  2027. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
  2028. } else {
  2029. printk(KERN_ERR "Incompatible upt version (0x%x) for "
  2030. "adapter %s\n", ver, pci_name(pdev));
  2031. err = -EBUSY;
  2032. goto err_ver;
  2033. }
  2034. vmxnet3_declare_features(adapter, dma64);
  2035. adapter->dev_number = atomic_read(&devices_found);
  2036. vmxnet3_alloc_intr_resources(adapter);
  2037. vmxnet3_read_mac_addr(adapter, mac);
  2038. memcpy(netdev->dev_addr, mac, netdev->addr_len);
  2039. netdev->netdev_ops = &vmxnet3_netdev_ops;
  2040. netdev->watchdog_timeo = 5 * HZ;
  2041. vmxnet3_set_ethtool_ops(netdev);
  2042. INIT_WORK(&adapter->work, vmxnet3_reset_work);
  2043. netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
  2044. SET_NETDEV_DEV(netdev, &pdev->dev);
  2045. err = register_netdev(netdev);
  2046. if (err) {
  2047. printk(KERN_ERR "Failed to register adapter %s\n",
  2048. pci_name(pdev));
  2049. goto err_register;
  2050. }
  2051. set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  2052. atomic_inc(&devices_found);
  2053. return 0;
  2054. err_register:
  2055. vmxnet3_free_intr_resources(adapter);
  2056. err_ver:
  2057. vmxnet3_free_pci_resources(adapter);
  2058. err_alloc_pci:
  2059. kfree(adapter->pm_conf);
  2060. err_alloc_pm:
  2061. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
  2062. sizeof(struct Vmxnet3_RxQueueDesc),
  2063. adapter->tqd_start, adapter->queue_desc_pa);
  2064. err_alloc_queue_desc:
  2065. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2066. adapter->shared, adapter->shared_pa);
  2067. err_alloc_shared:
  2068. pci_set_drvdata(pdev, NULL);
  2069. free_netdev(netdev);
  2070. return err;
  2071. }
  2072. static void __devexit
  2073. vmxnet3_remove_device(struct pci_dev *pdev)
  2074. {
  2075. struct net_device *netdev = pci_get_drvdata(pdev);
  2076. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2077. flush_scheduled_work();
  2078. unregister_netdev(netdev);
  2079. vmxnet3_free_intr_resources(adapter);
  2080. vmxnet3_free_pci_resources(adapter);
  2081. kfree(adapter->pm_conf);
  2082. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
  2083. sizeof(struct Vmxnet3_RxQueueDesc),
  2084. adapter->tqd_start, adapter->queue_desc_pa);
  2085. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2086. adapter->shared, adapter->shared_pa);
  2087. free_netdev(netdev);
  2088. }
  2089. #ifdef CONFIG_PM
  2090. static int
  2091. vmxnet3_suspend(struct device *device)
  2092. {
  2093. struct pci_dev *pdev = to_pci_dev(device);
  2094. struct net_device *netdev = pci_get_drvdata(pdev);
  2095. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2096. struct Vmxnet3_PMConf *pmConf;
  2097. struct ethhdr *ehdr;
  2098. struct arphdr *ahdr;
  2099. u8 *arpreq;
  2100. struct in_device *in_dev;
  2101. struct in_ifaddr *ifa;
  2102. int i = 0;
  2103. if (!netif_running(netdev))
  2104. return 0;
  2105. vmxnet3_disable_all_intrs(adapter);
  2106. vmxnet3_free_irqs(adapter);
  2107. vmxnet3_free_intr_resources(adapter);
  2108. netif_device_detach(netdev);
  2109. netif_stop_queue(netdev);
  2110. /* Create wake-up filters. */
  2111. pmConf = adapter->pm_conf;
  2112. memset(pmConf, 0, sizeof(*pmConf));
  2113. if (adapter->wol & WAKE_UCAST) {
  2114. pmConf->filters[i].patternSize = ETH_ALEN;
  2115. pmConf->filters[i].maskSize = 1;
  2116. memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
  2117. pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
  2118. set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
  2119. i++;
  2120. }
  2121. if (adapter->wol & WAKE_ARP) {
  2122. in_dev = in_dev_get(netdev);
  2123. if (!in_dev)
  2124. goto skip_arp;
  2125. ifa = (struct in_ifaddr *)in_dev->ifa_list;
  2126. if (!ifa)
  2127. goto skip_arp;
  2128. pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
  2129. sizeof(struct arphdr) + /* ARP header */
  2130. 2 * ETH_ALEN + /* 2 Ethernet addresses*/
  2131. 2 * sizeof(u32); /*2 IPv4 addresses */
  2132. pmConf->filters[i].maskSize =
  2133. (pmConf->filters[i].patternSize - 1) / 8 + 1;
  2134. /* ETH_P_ARP in Ethernet header. */
  2135. ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
  2136. ehdr->h_proto = htons(ETH_P_ARP);
  2137. /* ARPOP_REQUEST in ARP header. */
  2138. ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
  2139. ahdr->ar_op = htons(ARPOP_REQUEST);
  2140. arpreq = (u8 *)(ahdr + 1);
  2141. /* The Unicast IPv4 address in 'tip' field. */
  2142. arpreq += 2 * ETH_ALEN + sizeof(u32);
  2143. *(u32 *)arpreq = ifa->ifa_address;
  2144. /* The mask for the relevant bits. */
  2145. pmConf->filters[i].mask[0] = 0x00;
  2146. pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
  2147. pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
  2148. pmConf->filters[i].mask[3] = 0x00;
  2149. pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
  2150. pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
  2151. in_dev_put(in_dev);
  2152. set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
  2153. i++;
  2154. }
  2155. skip_arp:
  2156. if (adapter->wol & WAKE_MAGIC)
  2157. set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
  2158. pmConf->numFilters = i;
  2159. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2160. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2161. *pmConf));
  2162. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
  2163. pmConf));
  2164. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2165. VMXNET3_CMD_UPDATE_PMCFG);
  2166. pci_save_state(pdev);
  2167. pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
  2168. adapter->wol);
  2169. pci_disable_device(pdev);
  2170. pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
  2171. return 0;
  2172. }
  2173. static int
  2174. vmxnet3_resume(struct device *device)
  2175. {
  2176. int err;
  2177. struct pci_dev *pdev = to_pci_dev(device);
  2178. struct net_device *netdev = pci_get_drvdata(pdev);
  2179. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2180. struct Vmxnet3_PMConf *pmConf;
  2181. if (!netif_running(netdev))
  2182. return 0;
  2183. /* Destroy wake-up filters. */
  2184. pmConf = adapter->pm_conf;
  2185. memset(pmConf, 0, sizeof(*pmConf));
  2186. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2187. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2188. *pmConf));
  2189. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
  2190. pmConf));
  2191. netif_device_attach(netdev);
  2192. pci_set_power_state(pdev, PCI_D0);
  2193. pci_restore_state(pdev);
  2194. err = pci_enable_device_mem(pdev);
  2195. if (err != 0)
  2196. return err;
  2197. pci_enable_wake(pdev, PCI_D0, 0);
  2198. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2199. VMXNET3_CMD_UPDATE_PMCFG);
  2200. vmxnet3_alloc_intr_resources(adapter);
  2201. vmxnet3_request_irqs(adapter);
  2202. vmxnet3_enable_all_intrs(adapter);
  2203. return 0;
  2204. }
  2205. static const struct dev_pm_ops vmxnet3_pm_ops = {
  2206. .suspend = vmxnet3_suspend,
  2207. .resume = vmxnet3_resume,
  2208. };
  2209. #endif
  2210. static struct pci_driver vmxnet3_driver = {
  2211. .name = vmxnet3_driver_name,
  2212. .id_table = vmxnet3_pciid_table,
  2213. .probe = vmxnet3_probe_device,
  2214. .remove = __devexit_p(vmxnet3_remove_device),
  2215. #ifdef CONFIG_PM
  2216. .driver.pm = &vmxnet3_pm_ops,
  2217. #endif
  2218. };
  2219. static int __init
  2220. vmxnet3_init_module(void)
  2221. {
  2222. printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
  2223. VMXNET3_DRIVER_VERSION_REPORT);
  2224. return pci_register_driver(&vmxnet3_driver);
  2225. }
  2226. module_init(vmxnet3_init_module);
  2227. static void
  2228. vmxnet3_exit_module(void)
  2229. {
  2230. pci_unregister_driver(&vmxnet3_driver);
  2231. }
  2232. module_exit(vmxnet3_exit_module);
  2233. MODULE_AUTHOR("VMware, Inc.");
  2234. MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
  2235. MODULE_LICENSE("GPL v2");
  2236. MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);