vmxnet3_drv.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320
  1. /*
  2. * Linux driver for VMware's vmxnet3 ethernet NIC.
  3. *
  4. * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * The full GNU General Public License is included in this distribution in
  21. * the file called "COPYING".
  22. *
  23. * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <net/ip6_checksum.h>
  28. #include "vmxnet3_int.h"
  29. char vmxnet3_driver_name[] = "vmxnet3";
  30. #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  31. /*
  32. * PCI Device ID Table
  33. * Last entry must be all 0s
  34. */
  35. static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
  36. {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  37. {0}
  38. };
  39. MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  40. static atomic_t devices_found;
  41. #define VMXNET3_MAX_DEVICES 10
  42. static int enable_mq = 1;
  43. static int irq_share_mode;
  44. static void
  45. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
  46. /*
  47. * Enable/Disable the given intr
  48. */
  49. static void
  50. vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  51. {
  52. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  53. }
  54. static void
  55. vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  56. {
  57. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  58. }
  59. /*
  60. * Enable/Disable all intrs used by the device
  61. */
  62. static void
  63. vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  64. {
  65. int i;
  66. for (i = 0; i < adapter->intr.num_intrs; i++)
  67. vmxnet3_enable_intr(adapter, i);
  68. adapter->shared->devRead.intrConf.intrCtrl &=
  69. cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
  70. }
  71. static void
  72. vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  73. {
  74. int i;
  75. adapter->shared->devRead.intrConf.intrCtrl |=
  76. cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  77. for (i = 0; i < adapter->intr.num_intrs; i++)
  78. vmxnet3_disable_intr(adapter, i);
  79. }
  80. static void
  81. vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
  82. {
  83. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
  84. }
  85. static bool
  86. vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  87. {
  88. return tq->stopped;
  89. }
  90. static void
  91. vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  92. {
  93. tq->stopped = false;
  94. netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
  95. }
  96. static void
  97. vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  98. {
  99. tq->stopped = false;
  100. netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
  101. }
  102. static void
  103. vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  104. {
  105. tq->stopped = true;
  106. tq->num_stop++;
  107. netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
  108. }
  109. /*
  110. * Check the link state. This may start or stop the tx queue.
  111. */
  112. static void
  113. vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
  114. {
  115. u32 ret;
  116. int i;
  117. unsigned long flags;
  118. spin_lock_irqsave(&adapter->cmd_lock, flags);
  119. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
  120. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  121. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  122. adapter->link_speed = ret >> 16;
  123. if (ret & 1) { /* Link is up. */
  124. printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
  125. adapter->netdev->name, adapter->link_speed);
  126. if (!netif_carrier_ok(adapter->netdev))
  127. netif_carrier_on(adapter->netdev);
  128. if (affectTxQueue) {
  129. for (i = 0; i < adapter->num_tx_queues; i++)
  130. vmxnet3_tq_start(&adapter->tx_queue[i],
  131. adapter);
  132. }
  133. } else {
  134. printk(KERN_INFO "%s: NIC Link is Down\n",
  135. adapter->netdev->name);
  136. if (netif_carrier_ok(adapter->netdev))
  137. netif_carrier_off(adapter->netdev);
  138. if (affectTxQueue) {
  139. for (i = 0; i < adapter->num_tx_queues; i++)
  140. vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
  141. }
  142. }
  143. }
  144. static void
  145. vmxnet3_process_events(struct vmxnet3_adapter *adapter)
  146. {
  147. int i;
  148. unsigned long flags;
  149. u32 events = le32_to_cpu(adapter->shared->ecr);
  150. if (!events)
  151. return;
  152. vmxnet3_ack_events(adapter, events);
  153. /* Check if link state has changed */
  154. if (events & VMXNET3_ECR_LINK)
  155. vmxnet3_check_link(adapter, true);
  156. /* Check if there is an error on xmit/recv queues */
  157. if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
  158. spin_lock_irqsave(&adapter->cmd_lock, flags);
  159. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  160. VMXNET3_CMD_GET_QUEUE_STATUS);
  161. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  162. for (i = 0; i < adapter->num_tx_queues; i++)
  163. if (adapter->tqd_start[i].status.stopped)
  164. dev_err(&adapter->netdev->dev,
  165. "%s: tq[%d] error 0x%x\n",
  166. adapter->netdev->name, i, le32_to_cpu(
  167. adapter->tqd_start[i].status.error));
  168. for (i = 0; i < adapter->num_rx_queues; i++)
  169. if (adapter->rqd_start[i].status.stopped)
  170. dev_err(&adapter->netdev->dev,
  171. "%s: rq[%d] error 0x%x\n",
  172. adapter->netdev->name, i,
  173. adapter->rqd_start[i].status.error);
  174. schedule_work(&adapter->work);
  175. }
  176. }
  177. #ifdef __BIG_ENDIAN_BITFIELD
  178. /*
  179. * The device expects the bitfields in shared structures to be written in
  180. * little endian. When CPU is big endian, the following routines are used to
  181. * correctly read and write into ABI.
  182. * The general technique used here is : double word bitfields are defined in
  183. * opposite order for big endian architecture. Then before reading them in
  184. * driver the complete double word is translated using le32_to_cpu. Similarly
  185. * After the driver writes into bitfields, cpu_to_le32 is used to translate the
  186. * double words into required format.
  187. * In order to avoid touching bits in shared structure more than once, temporary
  188. * descriptors are used. These are passed as srcDesc to following functions.
  189. */
  190. static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
  191. struct Vmxnet3_RxDesc *dstDesc)
  192. {
  193. u32 *src = (u32 *)srcDesc + 2;
  194. u32 *dst = (u32 *)dstDesc + 2;
  195. dstDesc->addr = le64_to_cpu(srcDesc->addr);
  196. *dst = le32_to_cpu(*src);
  197. dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
  198. }
  199. static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
  200. struct Vmxnet3_TxDesc *dstDesc)
  201. {
  202. int i;
  203. u32 *src = (u32 *)(srcDesc + 1);
  204. u32 *dst = (u32 *)(dstDesc + 1);
  205. /* Working backwards so that the gen bit is set at the end. */
  206. for (i = 2; i > 0; i--) {
  207. src--;
  208. dst--;
  209. *dst = cpu_to_le32(*src);
  210. }
  211. }
  212. static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
  213. struct Vmxnet3_RxCompDesc *dstDesc)
  214. {
  215. int i = 0;
  216. u32 *src = (u32 *)srcDesc;
  217. u32 *dst = (u32 *)dstDesc;
  218. for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
  219. *dst = le32_to_cpu(*src);
  220. src++;
  221. dst++;
  222. }
  223. }
  224. /* Used to read bitfield values from double words. */
  225. static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
  226. {
  227. u32 temp = le32_to_cpu(*bitfield);
  228. u32 mask = ((1 << size) - 1) << pos;
  229. temp &= mask;
  230. temp >>= pos;
  231. return temp;
  232. }
  233. #endif /* __BIG_ENDIAN_BITFIELD */
  234. #ifdef __BIG_ENDIAN_BITFIELD
  235. # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
  236. txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
  237. VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
  238. # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
  239. txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
  240. VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
  241. # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
  242. VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
  243. VMXNET3_TCD_GEN_SIZE)
  244. # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
  245. VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
  246. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
  247. (dstrcd) = (tmp); \
  248. vmxnet3_RxCompToCPU((rcd), (tmp)); \
  249. } while (0)
  250. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
  251. (dstrxd) = (tmp); \
  252. vmxnet3_RxDescToCPU((rxd), (tmp)); \
  253. } while (0)
  254. #else
  255. # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
  256. # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
  257. # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
  258. # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
  259. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
  260. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
  261. #endif /* __BIG_ENDIAN_BITFIELD */
  262. static void
  263. vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
  264. struct pci_dev *pdev)
  265. {
  266. if (tbi->map_type == VMXNET3_MAP_SINGLE)
  267. pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
  268. PCI_DMA_TODEVICE);
  269. else if (tbi->map_type == VMXNET3_MAP_PAGE)
  270. pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
  271. PCI_DMA_TODEVICE);
  272. else
  273. BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
  274. tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
  275. }
  276. static int
  277. vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
  278. struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
  279. {
  280. struct sk_buff *skb;
  281. int entries = 0;
  282. /* no out of order completion */
  283. BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
  284. BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
  285. skb = tq->buf_info[eop_idx].skb;
  286. BUG_ON(skb == NULL);
  287. tq->buf_info[eop_idx].skb = NULL;
  288. VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
  289. while (tq->tx_ring.next2comp != eop_idx) {
  290. vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
  291. pdev);
  292. /* update next2comp w/o tx_lock. Since we are marking more,
  293. * instead of less, tx ring entries avail, the worst case is
  294. * that the tx routine incorrectly re-queues a pkt due to
  295. * insufficient tx ring entries.
  296. */
  297. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  298. entries++;
  299. }
  300. dev_kfree_skb_any(skb);
  301. return entries;
  302. }
  303. static int
  304. vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
  305. struct vmxnet3_adapter *adapter)
  306. {
  307. int completed = 0;
  308. union Vmxnet3_GenericDesc *gdesc;
  309. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  310. while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
  311. completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
  312. &gdesc->tcd), tq, adapter->pdev,
  313. adapter);
  314. vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
  315. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  316. }
  317. if (completed) {
  318. spin_lock(&tq->tx_lock);
  319. if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
  320. vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
  321. VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
  322. netif_carrier_ok(adapter->netdev))) {
  323. vmxnet3_tq_wake(tq, adapter);
  324. }
  325. spin_unlock(&tq->tx_lock);
  326. }
  327. return completed;
  328. }
  329. static void
  330. vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
  331. struct vmxnet3_adapter *adapter)
  332. {
  333. int i;
  334. while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
  335. struct vmxnet3_tx_buf_info *tbi;
  336. tbi = tq->buf_info + tq->tx_ring.next2comp;
  337. vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
  338. if (tbi->skb) {
  339. dev_kfree_skb_any(tbi->skb);
  340. tbi->skb = NULL;
  341. }
  342. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  343. }
  344. /* sanity check, verify all buffers are indeed unmapped and freed */
  345. for (i = 0; i < tq->tx_ring.size; i++) {
  346. BUG_ON(tq->buf_info[i].skb != NULL ||
  347. tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
  348. }
  349. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  350. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  351. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  352. tq->comp_ring.next2proc = 0;
  353. }
  354. static void
  355. vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
  356. struct vmxnet3_adapter *adapter)
  357. {
  358. if (tq->tx_ring.base) {
  359. pci_free_consistent(adapter->pdev, tq->tx_ring.size *
  360. sizeof(struct Vmxnet3_TxDesc),
  361. tq->tx_ring.base, tq->tx_ring.basePA);
  362. tq->tx_ring.base = NULL;
  363. }
  364. if (tq->data_ring.base) {
  365. pci_free_consistent(adapter->pdev, tq->data_ring.size *
  366. sizeof(struct Vmxnet3_TxDataDesc),
  367. tq->data_ring.base, tq->data_ring.basePA);
  368. tq->data_ring.base = NULL;
  369. }
  370. if (tq->comp_ring.base) {
  371. pci_free_consistent(adapter->pdev, tq->comp_ring.size *
  372. sizeof(struct Vmxnet3_TxCompDesc),
  373. tq->comp_ring.base, tq->comp_ring.basePA);
  374. tq->comp_ring.base = NULL;
  375. }
  376. kfree(tq->buf_info);
  377. tq->buf_info = NULL;
  378. }
  379. /* Destroy all tx queues */
  380. void
  381. vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
  382. {
  383. int i;
  384. for (i = 0; i < adapter->num_tx_queues; i++)
  385. vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
  386. }
  387. static void
  388. vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
  389. struct vmxnet3_adapter *adapter)
  390. {
  391. int i;
  392. /* reset the tx ring contents to 0 and reset the tx ring states */
  393. memset(tq->tx_ring.base, 0, tq->tx_ring.size *
  394. sizeof(struct Vmxnet3_TxDesc));
  395. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  396. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  397. memset(tq->data_ring.base, 0, tq->data_ring.size *
  398. sizeof(struct Vmxnet3_TxDataDesc));
  399. /* reset the tx comp ring contents to 0 and reset comp ring states */
  400. memset(tq->comp_ring.base, 0, tq->comp_ring.size *
  401. sizeof(struct Vmxnet3_TxCompDesc));
  402. tq->comp_ring.next2proc = 0;
  403. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  404. /* reset the bookkeeping data */
  405. memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
  406. for (i = 0; i < tq->tx_ring.size; i++)
  407. tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
  408. /* stats are not reset */
  409. }
  410. static int
  411. vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
  412. struct vmxnet3_adapter *adapter)
  413. {
  414. BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
  415. tq->comp_ring.base || tq->buf_info);
  416. tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
  417. * sizeof(struct Vmxnet3_TxDesc),
  418. &tq->tx_ring.basePA);
  419. if (!tq->tx_ring.base) {
  420. printk(KERN_ERR "%s: failed to allocate tx ring\n",
  421. adapter->netdev->name);
  422. goto err;
  423. }
  424. tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
  425. tq->data_ring.size *
  426. sizeof(struct Vmxnet3_TxDataDesc),
  427. &tq->data_ring.basePA);
  428. if (!tq->data_ring.base) {
  429. printk(KERN_ERR "%s: failed to allocate data ring\n",
  430. adapter->netdev->name);
  431. goto err;
  432. }
  433. tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
  434. tq->comp_ring.size *
  435. sizeof(struct Vmxnet3_TxCompDesc),
  436. &tq->comp_ring.basePA);
  437. if (!tq->comp_ring.base) {
  438. printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
  439. adapter->netdev->name);
  440. goto err;
  441. }
  442. tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
  443. GFP_KERNEL);
  444. if (!tq->buf_info) {
  445. printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
  446. adapter->netdev->name);
  447. goto err;
  448. }
  449. return 0;
  450. err:
  451. vmxnet3_tq_destroy(tq, adapter);
  452. return -ENOMEM;
  453. }
  454. static void
  455. vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
  456. {
  457. int i;
  458. for (i = 0; i < adapter->num_tx_queues; i++)
  459. vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
  460. }
  461. /*
  462. * starting from ring->next2fill, allocate rx buffers for the given ring
  463. * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
  464. * are allocated or allocation fails
  465. */
  466. static int
  467. vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
  468. int num_to_alloc, struct vmxnet3_adapter *adapter)
  469. {
  470. int num_allocated = 0;
  471. struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
  472. struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
  473. u32 val;
  474. while (num_allocated <= num_to_alloc) {
  475. struct vmxnet3_rx_buf_info *rbi;
  476. union Vmxnet3_GenericDesc *gd;
  477. rbi = rbi_base + ring->next2fill;
  478. gd = ring->base + ring->next2fill;
  479. if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
  480. if (rbi->skb == NULL) {
  481. rbi->skb = dev_alloc_skb(rbi->len +
  482. NET_IP_ALIGN);
  483. if (unlikely(rbi->skb == NULL)) {
  484. rq->stats.rx_buf_alloc_failure++;
  485. break;
  486. }
  487. rbi->skb->dev = adapter->netdev;
  488. skb_reserve(rbi->skb, NET_IP_ALIGN);
  489. rbi->dma_addr = pci_map_single(adapter->pdev,
  490. rbi->skb->data, rbi->len,
  491. PCI_DMA_FROMDEVICE);
  492. } else {
  493. /* rx buffer skipped by the device */
  494. }
  495. val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
  496. } else {
  497. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
  498. rbi->len != PAGE_SIZE);
  499. if (rbi->page == NULL) {
  500. rbi->page = alloc_page(GFP_ATOMIC);
  501. if (unlikely(rbi->page == NULL)) {
  502. rq->stats.rx_buf_alloc_failure++;
  503. break;
  504. }
  505. rbi->dma_addr = pci_map_page(adapter->pdev,
  506. rbi->page, 0, PAGE_SIZE,
  507. PCI_DMA_FROMDEVICE);
  508. } else {
  509. /* rx buffers skipped by the device */
  510. }
  511. val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
  512. }
  513. BUG_ON(rbi->dma_addr == 0);
  514. gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
  515. gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
  516. | val | rbi->len);
  517. /* Fill the last buffer but dont mark it ready, or else the
  518. * device will think that the queue is full */
  519. if (num_allocated == num_to_alloc)
  520. break;
  521. gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
  522. num_allocated++;
  523. vmxnet3_cmd_ring_adv_next2fill(ring);
  524. }
  525. rq->uncommitted[ring_idx] += num_allocated;
  526. dev_dbg(&adapter->netdev->dev,
  527. "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
  528. "%u, uncommited %u\n", num_allocated, ring->next2fill,
  529. ring->next2comp, rq->uncommitted[ring_idx]);
  530. /* so that the device can distinguish a full ring and an empty ring */
  531. BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
  532. return num_allocated;
  533. }
  534. static void
  535. vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
  536. struct vmxnet3_rx_buf_info *rbi)
  537. {
  538. struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
  539. skb_shinfo(skb)->nr_frags;
  540. BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
  541. __skb_frag_set_page(frag, rbi->page);
  542. frag->page_offset = 0;
  543. skb_frag_size_set(frag, rcd->len);
  544. skb->data_len += rcd->len;
  545. skb->truesize += PAGE_SIZE;
  546. skb_shinfo(skb)->nr_frags++;
  547. }
  548. static void
  549. vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
  550. struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
  551. struct vmxnet3_adapter *adapter)
  552. {
  553. u32 dw2, len;
  554. unsigned long buf_offset;
  555. int i;
  556. union Vmxnet3_GenericDesc *gdesc;
  557. struct vmxnet3_tx_buf_info *tbi = NULL;
  558. BUG_ON(ctx->copy_size > skb_headlen(skb));
  559. /* use the previous gen bit for the SOP desc */
  560. dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
  561. ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
  562. gdesc = ctx->sop_txd; /* both loops below can be skipped */
  563. /* no need to map the buffer if headers are copied */
  564. if (ctx->copy_size) {
  565. ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
  566. tq->tx_ring.next2fill *
  567. sizeof(struct Vmxnet3_TxDataDesc));
  568. ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
  569. ctx->sop_txd->dword[3] = 0;
  570. tbi = tq->buf_info + tq->tx_ring.next2fill;
  571. tbi->map_type = VMXNET3_MAP_NONE;
  572. dev_dbg(&adapter->netdev->dev,
  573. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  574. tq->tx_ring.next2fill,
  575. le64_to_cpu(ctx->sop_txd->txd.addr),
  576. ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
  577. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  578. /* use the right gen for non-SOP desc */
  579. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  580. }
  581. /* linear part can use multiple tx desc if it's big */
  582. len = skb_headlen(skb) - ctx->copy_size;
  583. buf_offset = ctx->copy_size;
  584. while (len) {
  585. u32 buf_size;
  586. if (len < VMXNET3_MAX_TX_BUF_SIZE) {
  587. buf_size = len;
  588. dw2 |= len;
  589. } else {
  590. buf_size = VMXNET3_MAX_TX_BUF_SIZE;
  591. /* spec says that for TxDesc.len, 0 == 2^14 */
  592. }
  593. tbi = tq->buf_info + tq->tx_ring.next2fill;
  594. tbi->map_type = VMXNET3_MAP_SINGLE;
  595. tbi->dma_addr = pci_map_single(adapter->pdev,
  596. skb->data + buf_offset, buf_size,
  597. PCI_DMA_TODEVICE);
  598. tbi->len = buf_size;
  599. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  600. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  601. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  602. gdesc->dword[2] = cpu_to_le32(dw2);
  603. gdesc->dword[3] = 0;
  604. dev_dbg(&adapter->netdev->dev,
  605. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  606. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  607. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  608. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  609. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  610. len -= buf_size;
  611. buf_offset += buf_size;
  612. }
  613. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  614. const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  615. tbi = tq->buf_info + tq->tx_ring.next2fill;
  616. tbi->map_type = VMXNET3_MAP_PAGE;
  617. tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
  618. 0, skb_frag_size(frag),
  619. DMA_TO_DEVICE);
  620. tbi->len = skb_frag_size(frag);
  621. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  622. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  623. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  624. gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
  625. gdesc->dword[3] = 0;
  626. dev_dbg(&adapter->netdev->dev,
  627. "txd[%u]: 0x%llu %u %u\n",
  628. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  629. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  630. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  631. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  632. }
  633. ctx->eop_txd = gdesc;
  634. /* set the last buf_info for the pkt */
  635. tbi->skb = skb;
  636. tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
  637. }
  638. /* Init all tx queues */
  639. static void
  640. vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
  641. {
  642. int i;
  643. for (i = 0; i < adapter->num_tx_queues; i++)
  644. vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
  645. }
  646. /*
  647. * parse and copy relevant protocol headers:
  648. * For a tso pkt, relevant headers are L2/3/4 including options
  649. * For a pkt requesting csum offloading, they are L2/3 and may include L4
  650. * if it's a TCP/UDP pkt
  651. *
  652. * Returns:
  653. * -1: error happens during parsing
  654. * 0: protocol headers parsed, but too big to be copied
  655. * 1: protocol headers parsed and copied
  656. *
  657. * Other effects:
  658. * 1. related *ctx fields are updated.
  659. * 2. ctx->copy_size is # of bytes copied
  660. * 3. the portion copied is guaranteed to be in the linear part
  661. *
  662. */
  663. static int
  664. vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  665. struct vmxnet3_tx_ctx *ctx,
  666. struct vmxnet3_adapter *adapter)
  667. {
  668. struct Vmxnet3_TxDataDesc *tdd;
  669. if (ctx->mss) { /* TSO */
  670. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  671. ctx->l4_hdr_size = ((struct tcphdr *)
  672. skb_transport_header(skb))->doff * 4;
  673. ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
  674. } else {
  675. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  676. ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
  677. if (ctx->ipv4) {
  678. struct iphdr *iph = (struct iphdr *)
  679. skb_network_header(skb);
  680. if (iph->protocol == IPPROTO_TCP)
  681. ctx->l4_hdr_size = ((struct tcphdr *)
  682. skb_transport_header(skb))->doff * 4;
  683. else if (iph->protocol == IPPROTO_UDP)
  684. /*
  685. * Use tcp header size so that bytes to
  686. * be copied are more than required by
  687. * the device.
  688. */
  689. ctx->l4_hdr_size =
  690. sizeof(struct tcphdr);
  691. else
  692. ctx->l4_hdr_size = 0;
  693. } else {
  694. /* for simplicity, don't copy L4 headers */
  695. ctx->l4_hdr_size = 0;
  696. }
  697. ctx->copy_size = ctx->eth_ip_hdr_size +
  698. ctx->l4_hdr_size;
  699. } else {
  700. ctx->eth_ip_hdr_size = 0;
  701. ctx->l4_hdr_size = 0;
  702. /* copy as much as allowed */
  703. ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
  704. , skb_headlen(skb));
  705. }
  706. /* make sure headers are accessible directly */
  707. if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
  708. goto err;
  709. }
  710. if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
  711. tq->stats.oversized_hdr++;
  712. ctx->copy_size = 0;
  713. return 0;
  714. }
  715. tdd = tq->data_ring.base + tq->tx_ring.next2fill;
  716. memcpy(tdd->data, skb->data, ctx->copy_size);
  717. dev_dbg(&adapter->netdev->dev,
  718. "copy %u bytes to dataRing[%u]\n",
  719. ctx->copy_size, tq->tx_ring.next2fill);
  720. return 1;
  721. err:
  722. return -1;
  723. }
  724. static void
  725. vmxnet3_prepare_tso(struct sk_buff *skb,
  726. struct vmxnet3_tx_ctx *ctx)
  727. {
  728. struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
  729. if (ctx->ipv4) {
  730. struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
  731. iph->check = 0;
  732. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
  733. IPPROTO_TCP, 0);
  734. } else {
  735. struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
  736. tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
  737. IPPROTO_TCP, 0);
  738. }
  739. }
  740. /*
  741. * Transmits a pkt thru a given tq
  742. * Returns:
  743. * NETDEV_TX_OK: descriptors are setup successfully
  744. * NETDEV_TX_OK: error occurred, the pkt is dropped
  745. * NETDEV_TX_BUSY: tx ring is full, queue is stopped
  746. *
  747. * Side-effects:
  748. * 1. tx ring may be changed
  749. * 2. tq stats may be updated accordingly
  750. * 3. shared->txNumDeferred may be updated
  751. */
  752. static int
  753. vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  754. struct vmxnet3_adapter *adapter, struct net_device *netdev)
  755. {
  756. int ret;
  757. u32 count;
  758. unsigned long flags;
  759. struct vmxnet3_tx_ctx ctx;
  760. union Vmxnet3_GenericDesc *gdesc;
  761. #ifdef __BIG_ENDIAN_BITFIELD
  762. /* Use temporary descriptor to avoid touching bits multiple times */
  763. union Vmxnet3_GenericDesc tempTxDesc;
  764. #endif
  765. /* conservatively estimate # of descriptors to use */
  766. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
  767. skb_shinfo(skb)->nr_frags + 1;
  768. ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
  769. ctx.mss = skb_shinfo(skb)->gso_size;
  770. if (ctx.mss) {
  771. if (skb_header_cloned(skb)) {
  772. if (unlikely(pskb_expand_head(skb, 0, 0,
  773. GFP_ATOMIC) != 0)) {
  774. tq->stats.drop_tso++;
  775. goto drop_pkt;
  776. }
  777. tq->stats.copy_skb_header++;
  778. }
  779. vmxnet3_prepare_tso(skb, &ctx);
  780. } else {
  781. if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
  782. /* non-tso pkts must not use more than
  783. * VMXNET3_MAX_TXD_PER_PKT entries
  784. */
  785. if (skb_linearize(skb) != 0) {
  786. tq->stats.drop_too_many_frags++;
  787. goto drop_pkt;
  788. }
  789. tq->stats.linearized++;
  790. /* recalculate the # of descriptors to use */
  791. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
  792. }
  793. }
  794. spin_lock_irqsave(&tq->tx_lock, flags);
  795. if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
  796. tq->stats.tx_ring_full++;
  797. dev_dbg(&adapter->netdev->dev,
  798. "tx queue stopped on %s, next2comp %u"
  799. " next2fill %u\n", adapter->netdev->name,
  800. tq->tx_ring.next2comp, tq->tx_ring.next2fill);
  801. vmxnet3_tq_stop(tq, adapter);
  802. spin_unlock_irqrestore(&tq->tx_lock, flags);
  803. return NETDEV_TX_BUSY;
  804. }
  805. ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
  806. if (ret >= 0) {
  807. BUG_ON(ret <= 0 && ctx.copy_size != 0);
  808. /* hdrs parsed, check against other limits */
  809. if (ctx.mss) {
  810. if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
  811. VMXNET3_MAX_TX_BUF_SIZE)) {
  812. goto hdr_too_big;
  813. }
  814. } else {
  815. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  816. if (unlikely(ctx.eth_ip_hdr_size +
  817. skb->csum_offset >
  818. VMXNET3_MAX_CSUM_OFFSET)) {
  819. goto hdr_too_big;
  820. }
  821. }
  822. }
  823. } else {
  824. tq->stats.drop_hdr_inspect_err++;
  825. goto unlock_drop_pkt;
  826. }
  827. /* fill tx descs related to addr & len */
  828. vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
  829. /* setup the EOP desc */
  830. ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
  831. /* setup the SOP desc */
  832. #ifdef __BIG_ENDIAN_BITFIELD
  833. gdesc = &tempTxDesc;
  834. gdesc->dword[2] = ctx.sop_txd->dword[2];
  835. gdesc->dword[3] = ctx.sop_txd->dword[3];
  836. #else
  837. gdesc = ctx.sop_txd;
  838. #endif
  839. if (ctx.mss) {
  840. gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
  841. gdesc->txd.om = VMXNET3_OM_TSO;
  842. gdesc->txd.msscof = ctx.mss;
  843. le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
  844. gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
  845. } else {
  846. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  847. gdesc->txd.hlen = ctx.eth_ip_hdr_size;
  848. gdesc->txd.om = VMXNET3_OM_CSUM;
  849. gdesc->txd.msscof = ctx.eth_ip_hdr_size +
  850. skb->csum_offset;
  851. } else {
  852. gdesc->txd.om = 0;
  853. gdesc->txd.msscof = 0;
  854. }
  855. le32_add_cpu(&tq->shared->txNumDeferred, 1);
  856. }
  857. if (vlan_tx_tag_present(skb)) {
  858. gdesc->txd.ti = 1;
  859. gdesc->txd.tci = vlan_tx_tag_get(skb);
  860. }
  861. /* finally flips the GEN bit of the SOP desc. */
  862. gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
  863. VMXNET3_TXD_GEN);
  864. #ifdef __BIG_ENDIAN_BITFIELD
  865. /* Finished updating in bitfields of Tx Desc, so write them in original
  866. * place.
  867. */
  868. vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
  869. (struct Vmxnet3_TxDesc *)ctx.sop_txd);
  870. gdesc = ctx.sop_txd;
  871. #endif
  872. dev_dbg(&adapter->netdev->dev,
  873. "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
  874. (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
  875. tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
  876. le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
  877. spin_unlock_irqrestore(&tq->tx_lock, flags);
  878. if (le32_to_cpu(tq->shared->txNumDeferred) >=
  879. le32_to_cpu(tq->shared->txThreshold)) {
  880. tq->shared->txNumDeferred = 0;
  881. VMXNET3_WRITE_BAR0_REG(adapter,
  882. VMXNET3_REG_TXPROD + tq->qid * 8,
  883. tq->tx_ring.next2fill);
  884. }
  885. return NETDEV_TX_OK;
  886. hdr_too_big:
  887. tq->stats.drop_oversized_hdr++;
  888. unlock_drop_pkt:
  889. spin_unlock_irqrestore(&tq->tx_lock, flags);
  890. drop_pkt:
  891. tq->stats.drop_total++;
  892. dev_kfree_skb(skb);
  893. return NETDEV_TX_OK;
  894. }
  895. static netdev_tx_t
  896. vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  897. {
  898. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  899. BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
  900. return vmxnet3_tq_xmit(skb,
  901. &adapter->tx_queue[skb->queue_mapping],
  902. adapter, netdev);
  903. }
  904. static void
  905. vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
  906. struct sk_buff *skb,
  907. union Vmxnet3_GenericDesc *gdesc)
  908. {
  909. if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
  910. /* typical case: TCP/UDP over IP and both csums are correct */
  911. if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
  912. VMXNET3_RCD_CSUM_OK) {
  913. skb->ip_summed = CHECKSUM_UNNECESSARY;
  914. BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
  915. BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
  916. BUG_ON(gdesc->rcd.frg);
  917. } else {
  918. if (gdesc->rcd.csum) {
  919. skb->csum = htons(gdesc->rcd.csum);
  920. skb->ip_summed = CHECKSUM_PARTIAL;
  921. } else {
  922. skb_checksum_none_assert(skb);
  923. }
  924. }
  925. } else {
  926. skb_checksum_none_assert(skb);
  927. }
  928. }
  929. static void
  930. vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
  931. struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
  932. {
  933. rq->stats.drop_err++;
  934. if (!rcd->fcs)
  935. rq->stats.drop_fcs++;
  936. rq->stats.drop_total++;
  937. /*
  938. * We do not unmap and chain the rx buffer to the skb.
  939. * We basically pretend this buffer is not used and will be recycled
  940. * by vmxnet3_rq_alloc_rx_buf()
  941. */
  942. /*
  943. * ctx->skb may be NULL if this is the first and the only one
  944. * desc for the pkt
  945. */
  946. if (ctx->skb)
  947. dev_kfree_skb_irq(ctx->skb);
  948. ctx->skb = NULL;
  949. }
  950. static int
  951. vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
  952. struct vmxnet3_adapter *adapter, int quota)
  953. {
  954. static const u32 rxprod_reg[2] = {
  955. VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
  956. };
  957. u32 num_rxd = 0;
  958. bool skip_page_frags = false;
  959. struct Vmxnet3_RxCompDesc *rcd;
  960. struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
  961. #ifdef __BIG_ENDIAN_BITFIELD
  962. struct Vmxnet3_RxDesc rxCmdDesc;
  963. struct Vmxnet3_RxCompDesc rxComp;
  964. #endif
  965. vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
  966. &rxComp);
  967. while (rcd->gen == rq->comp_ring.gen) {
  968. struct vmxnet3_rx_buf_info *rbi;
  969. struct sk_buff *skb, *new_skb = NULL;
  970. struct page *new_page = NULL;
  971. int num_to_alloc;
  972. struct Vmxnet3_RxDesc *rxd;
  973. u32 idx, ring_idx;
  974. struct vmxnet3_cmd_ring *ring = NULL;
  975. if (num_rxd >= quota) {
  976. /* we may stop even before we see the EOP desc of
  977. * the current pkt
  978. */
  979. break;
  980. }
  981. num_rxd++;
  982. BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
  983. idx = rcd->rxdIdx;
  984. ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
  985. ring = rq->rx_ring + ring_idx;
  986. vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
  987. &rxCmdDesc);
  988. rbi = rq->buf_info[ring_idx] + idx;
  989. BUG_ON(rxd->addr != rbi->dma_addr ||
  990. rxd->len != rbi->len);
  991. if (unlikely(rcd->eop && rcd->err)) {
  992. vmxnet3_rx_error(rq, rcd, ctx, adapter);
  993. goto rcd_done;
  994. }
  995. if (rcd->sop) { /* first buf of the pkt */
  996. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
  997. rcd->rqID != rq->qid);
  998. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
  999. BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
  1000. if (unlikely(rcd->len == 0)) {
  1001. /* Pretend the rx buffer is skipped. */
  1002. BUG_ON(!(rcd->sop && rcd->eop));
  1003. dev_dbg(&adapter->netdev->dev,
  1004. "rxRing[%u][%u] 0 length\n",
  1005. ring_idx, idx);
  1006. goto rcd_done;
  1007. }
  1008. skip_page_frags = false;
  1009. ctx->skb = rbi->skb;
  1010. new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
  1011. if (new_skb == NULL) {
  1012. /* Skb allocation failed, do not handover this
  1013. * skb to stack. Reuse it. Drop the existing pkt
  1014. */
  1015. rq->stats.rx_buf_alloc_failure++;
  1016. ctx->skb = NULL;
  1017. rq->stats.drop_total++;
  1018. skip_page_frags = true;
  1019. goto rcd_done;
  1020. }
  1021. pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
  1022. PCI_DMA_FROMDEVICE);
  1023. skb_put(ctx->skb, rcd->len);
  1024. /* Immediate refill */
  1025. new_skb->dev = adapter->netdev;
  1026. skb_reserve(new_skb, NET_IP_ALIGN);
  1027. rbi->skb = new_skb;
  1028. rbi->dma_addr = pci_map_single(adapter->pdev,
  1029. rbi->skb->data, rbi->len,
  1030. PCI_DMA_FROMDEVICE);
  1031. rxd->addr = cpu_to_le64(rbi->dma_addr);
  1032. rxd->len = rbi->len;
  1033. } else {
  1034. BUG_ON(ctx->skb == NULL && !skip_page_frags);
  1035. /* non SOP buffer must be type 1 in most cases */
  1036. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
  1037. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
  1038. /* If an sop buffer was dropped, skip all
  1039. * following non-sop fragments. They will be reused.
  1040. */
  1041. if (skip_page_frags)
  1042. goto rcd_done;
  1043. new_page = alloc_page(GFP_ATOMIC);
  1044. if (unlikely(new_page == NULL)) {
  1045. /* Replacement page frag could not be allocated.
  1046. * Reuse this page. Drop the pkt and free the
  1047. * skb which contained this page as a frag. Skip
  1048. * processing all the following non-sop frags.
  1049. */
  1050. rq->stats.rx_buf_alloc_failure++;
  1051. dev_kfree_skb(ctx->skb);
  1052. ctx->skb = NULL;
  1053. skip_page_frags = true;
  1054. goto rcd_done;
  1055. }
  1056. if (rcd->len) {
  1057. pci_unmap_page(adapter->pdev,
  1058. rbi->dma_addr, rbi->len,
  1059. PCI_DMA_FROMDEVICE);
  1060. vmxnet3_append_frag(ctx->skb, rcd, rbi);
  1061. }
  1062. /* Immediate refill */
  1063. rbi->page = new_page;
  1064. rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
  1065. 0, PAGE_SIZE,
  1066. PCI_DMA_FROMDEVICE);
  1067. rxd->addr = cpu_to_le64(rbi->dma_addr);
  1068. rxd->len = rbi->len;
  1069. }
  1070. skb = ctx->skb;
  1071. if (rcd->eop) {
  1072. skb->len += skb->data_len;
  1073. vmxnet3_rx_csum(adapter, skb,
  1074. (union Vmxnet3_GenericDesc *)rcd);
  1075. skb->protocol = eth_type_trans(skb, adapter->netdev);
  1076. if (unlikely(rcd->ts))
  1077. __vlan_hwaccel_put_tag(skb, rcd->tci);
  1078. if (adapter->netdev->features & NETIF_F_LRO)
  1079. netif_receive_skb(skb);
  1080. else
  1081. napi_gro_receive(&rq->napi, skb);
  1082. ctx->skb = NULL;
  1083. }
  1084. rcd_done:
  1085. /* device may have skipped some rx descs */
  1086. ring->next2comp = idx;
  1087. num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
  1088. ring = rq->rx_ring + ring_idx;
  1089. while (num_to_alloc) {
  1090. vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
  1091. &rxCmdDesc);
  1092. BUG_ON(!rxd->addr);
  1093. /* Recv desc is ready to be used by the device */
  1094. rxd->gen = ring->gen;
  1095. vmxnet3_cmd_ring_adv_next2fill(ring);
  1096. num_to_alloc--;
  1097. }
  1098. /* if needed, update the register */
  1099. if (unlikely(rq->shared->updateRxProd)) {
  1100. VMXNET3_WRITE_BAR0_REG(adapter,
  1101. rxprod_reg[ring_idx] + rq->qid * 8,
  1102. ring->next2fill);
  1103. rq->uncommitted[ring_idx] = 0;
  1104. }
  1105. vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
  1106. vmxnet3_getRxComp(rcd,
  1107. &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
  1108. }
  1109. return num_rxd;
  1110. }
  1111. static void
  1112. vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
  1113. struct vmxnet3_adapter *adapter)
  1114. {
  1115. u32 i, ring_idx;
  1116. struct Vmxnet3_RxDesc *rxd;
  1117. for (ring_idx = 0; ring_idx < 2; ring_idx++) {
  1118. for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
  1119. #ifdef __BIG_ENDIAN_BITFIELD
  1120. struct Vmxnet3_RxDesc rxDesc;
  1121. #endif
  1122. vmxnet3_getRxDesc(rxd,
  1123. &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
  1124. if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
  1125. rq->buf_info[ring_idx][i].skb) {
  1126. pci_unmap_single(adapter->pdev, rxd->addr,
  1127. rxd->len, PCI_DMA_FROMDEVICE);
  1128. dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
  1129. rq->buf_info[ring_idx][i].skb = NULL;
  1130. } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
  1131. rq->buf_info[ring_idx][i].page) {
  1132. pci_unmap_page(adapter->pdev, rxd->addr,
  1133. rxd->len, PCI_DMA_FROMDEVICE);
  1134. put_page(rq->buf_info[ring_idx][i].page);
  1135. rq->buf_info[ring_idx][i].page = NULL;
  1136. }
  1137. }
  1138. rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
  1139. rq->rx_ring[ring_idx].next2fill =
  1140. rq->rx_ring[ring_idx].next2comp = 0;
  1141. rq->uncommitted[ring_idx] = 0;
  1142. }
  1143. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1144. rq->comp_ring.next2proc = 0;
  1145. }
  1146. static void
  1147. vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
  1148. {
  1149. int i;
  1150. for (i = 0; i < adapter->num_rx_queues; i++)
  1151. vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
  1152. }
  1153. void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
  1154. struct vmxnet3_adapter *adapter)
  1155. {
  1156. int i;
  1157. int j;
  1158. /* all rx buffers must have already been freed */
  1159. for (i = 0; i < 2; i++) {
  1160. if (rq->buf_info[i]) {
  1161. for (j = 0; j < rq->rx_ring[i].size; j++)
  1162. BUG_ON(rq->buf_info[i][j].page != NULL);
  1163. }
  1164. }
  1165. kfree(rq->buf_info[0]);
  1166. for (i = 0; i < 2; i++) {
  1167. if (rq->rx_ring[i].base) {
  1168. pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
  1169. * sizeof(struct Vmxnet3_RxDesc),
  1170. rq->rx_ring[i].base,
  1171. rq->rx_ring[i].basePA);
  1172. rq->rx_ring[i].base = NULL;
  1173. }
  1174. rq->buf_info[i] = NULL;
  1175. }
  1176. if (rq->comp_ring.base) {
  1177. pci_free_consistent(adapter->pdev, rq->comp_ring.size *
  1178. sizeof(struct Vmxnet3_RxCompDesc),
  1179. rq->comp_ring.base, rq->comp_ring.basePA);
  1180. rq->comp_ring.base = NULL;
  1181. }
  1182. }
  1183. static int
  1184. vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
  1185. struct vmxnet3_adapter *adapter)
  1186. {
  1187. int i;
  1188. /* initialize buf_info */
  1189. for (i = 0; i < rq->rx_ring[0].size; i++) {
  1190. /* 1st buf for a pkt is skbuff */
  1191. if (i % adapter->rx_buf_per_pkt == 0) {
  1192. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
  1193. rq->buf_info[0][i].len = adapter->skb_buf_size;
  1194. } else { /* subsequent bufs for a pkt is frag */
  1195. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1196. rq->buf_info[0][i].len = PAGE_SIZE;
  1197. }
  1198. }
  1199. for (i = 0; i < rq->rx_ring[1].size; i++) {
  1200. rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1201. rq->buf_info[1][i].len = PAGE_SIZE;
  1202. }
  1203. /* reset internal state and allocate buffers for both rings */
  1204. for (i = 0; i < 2; i++) {
  1205. rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
  1206. rq->uncommitted[i] = 0;
  1207. memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
  1208. sizeof(struct Vmxnet3_RxDesc));
  1209. rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
  1210. }
  1211. if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
  1212. adapter) == 0) {
  1213. /* at least has 1 rx buffer for the 1st ring */
  1214. return -ENOMEM;
  1215. }
  1216. vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
  1217. /* reset the comp ring */
  1218. rq->comp_ring.next2proc = 0;
  1219. memset(rq->comp_ring.base, 0, rq->comp_ring.size *
  1220. sizeof(struct Vmxnet3_RxCompDesc));
  1221. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1222. /* reset rxctx */
  1223. rq->rx_ctx.skb = NULL;
  1224. /* stats are not reset */
  1225. return 0;
  1226. }
  1227. static int
  1228. vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
  1229. {
  1230. int i, err = 0;
  1231. for (i = 0; i < adapter->num_rx_queues; i++) {
  1232. err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
  1233. if (unlikely(err)) {
  1234. dev_err(&adapter->netdev->dev, "%s: failed to "
  1235. "initialize rx queue%i\n",
  1236. adapter->netdev->name, i);
  1237. break;
  1238. }
  1239. }
  1240. return err;
  1241. }
  1242. static int
  1243. vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
  1244. {
  1245. int i;
  1246. size_t sz;
  1247. struct vmxnet3_rx_buf_info *bi;
  1248. for (i = 0; i < 2; i++) {
  1249. sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
  1250. rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
  1251. &rq->rx_ring[i].basePA);
  1252. if (!rq->rx_ring[i].base) {
  1253. printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
  1254. adapter->netdev->name, i);
  1255. goto err;
  1256. }
  1257. }
  1258. sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
  1259. rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
  1260. &rq->comp_ring.basePA);
  1261. if (!rq->comp_ring.base) {
  1262. printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
  1263. adapter->netdev->name);
  1264. goto err;
  1265. }
  1266. sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
  1267. rq->rx_ring[1].size);
  1268. bi = kzalloc(sz, GFP_KERNEL);
  1269. if (!bi) {
  1270. printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
  1271. adapter->netdev->name);
  1272. goto err;
  1273. }
  1274. rq->buf_info[0] = bi;
  1275. rq->buf_info[1] = bi + rq->rx_ring[0].size;
  1276. return 0;
  1277. err:
  1278. vmxnet3_rq_destroy(rq, adapter);
  1279. return -ENOMEM;
  1280. }
  1281. static int
  1282. vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
  1283. {
  1284. int i, err = 0;
  1285. for (i = 0; i < adapter->num_rx_queues; i++) {
  1286. err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
  1287. if (unlikely(err)) {
  1288. dev_err(&adapter->netdev->dev,
  1289. "%s: failed to create rx queue%i\n",
  1290. adapter->netdev->name, i);
  1291. goto err_out;
  1292. }
  1293. }
  1294. return err;
  1295. err_out:
  1296. vmxnet3_rq_destroy_all(adapter);
  1297. return err;
  1298. }
  1299. /* Multiple queue aware polling function for tx and rx */
  1300. static int
  1301. vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
  1302. {
  1303. int rcd_done = 0, i;
  1304. if (unlikely(adapter->shared->ecr))
  1305. vmxnet3_process_events(adapter);
  1306. for (i = 0; i < adapter->num_tx_queues; i++)
  1307. vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
  1308. for (i = 0; i < adapter->num_rx_queues; i++)
  1309. rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
  1310. adapter, budget);
  1311. return rcd_done;
  1312. }
  1313. static int
  1314. vmxnet3_poll(struct napi_struct *napi, int budget)
  1315. {
  1316. struct vmxnet3_rx_queue *rx_queue = container_of(napi,
  1317. struct vmxnet3_rx_queue, napi);
  1318. int rxd_done;
  1319. rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
  1320. if (rxd_done < budget) {
  1321. napi_complete(napi);
  1322. vmxnet3_enable_all_intrs(rx_queue->adapter);
  1323. }
  1324. return rxd_done;
  1325. }
  1326. /*
  1327. * NAPI polling function for MSI-X mode with multiple Rx queues
  1328. * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
  1329. */
  1330. static int
  1331. vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
  1332. {
  1333. struct vmxnet3_rx_queue *rq = container_of(napi,
  1334. struct vmxnet3_rx_queue, napi);
  1335. struct vmxnet3_adapter *adapter = rq->adapter;
  1336. int rxd_done;
  1337. /* When sharing interrupt with corresponding tx queue, process
  1338. * tx completions in that queue as well
  1339. */
  1340. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
  1341. struct vmxnet3_tx_queue *tq =
  1342. &adapter->tx_queue[rq - adapter->rx_queue];
  1343. vmxnet3_tq_tx_complete(tq, adapter);
  1344. }
  1345. rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
  1346. if (rxd_done < budget) {
  1347. napi_complete(napi);
  1348. vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
  1349. }
  1350. return rxd_done;
  1351. }
  1352. #ifdef CONFIG_PCI_MSI
  1353. /*
  1354. * Handle completion interrupts on tx queues
  1355. * Returns whether or not the intr is handled
  1356. */
  1357. static irqreturn_t
  1358. vmxnet3_msix_tx(int irq, void *data)
  1359. {
  1360. struct vmxnet3_tx_queue *tq = data;
  1361. struct vmxnet3_adapter *adapter = tq->adapter;
  1362. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1363. vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
  1364. /* Handle the case where only one irq is allocate for all tx queues */
  1365. if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
  1366. int i;
  1367. for (i = 0; i < adapter->num_tx_queues; i++) {
  1368. struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
  1369. vmxnet3_tq_tx_complete(txq, adapter);
  1370. }
  1371. } else {
  1372. vmxnet3_tq_tx_complete(tq, adapter);
  1373. }
  1374. vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
  1375. return IRQ_HANDLED;
  1376. }
  1377. /*
  1378. * Handle completion interrupts on rx queues. Returns whether or not the
  1379. * intr is handled
  1380. */
  1381. static irqreturn_t
  1382. vmxnet3_msix_rx(int irq, void *data)
  1383. {
  1384. struct vmxnet3_rx_queue *rq = data;
  1385. struct vmxnet3_adapter *adapter = rq->adapter;
  1386. /* disable intr if needed */
  1387. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1388. vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
  1389. napi_schedule(&rq->napi);
  1390. return IRQ_HANDLED;
  1391. }
  1392. /*
  1393. *----------------------------------------------------------------------------
  1394. *
  1395. * vmxnet3_msix_event --
  1396. *
  1397. * vmxnet3 msix event intr handler
  1398. *
  1399. * Result:
  1400. * whether or not the intr is handled
  1401. *
  1402. *----------------------------------------------------------------------------
  1403. */
  1404. static irqreturn_t
  1405. vmxnet3_msix_event(int irq, void *data)
  1406. {
  1407. struct net_device *dev = data;
  1408. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1409. /* disable intr if needed */
  1410. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1411. vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
  1412. if (adapter->shared->ecr)
  1413. vmxnet3_process_events(adapter);
  1414. vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
  1415. return IRQ_HANDLED;
  1416. }
  1417. #endif /* CONFIG_PCI_MSI */
  1418. /* Interrupt handler for vmxnet3 */
  1419. static irqreturn_t
  1420. vmxnet3_intr(int irq, void *dev_id)
  1421. {
  1422. struct net_device *dev = dev_id;
  1423. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1424. if (adapter->intr.type == VMXNET3_IT_INTX) {
  1425. u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
  1426. if (unlikely(icr == 0))
  1427. /* not ours */
  1428. return IRQ_NONE;
  1429. }
  1430. /* disable intr if needed */
  1431. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1432. vmxnet3_disable_all_intrs(adapter);
  1433. napi_schedule(&adapter->rx_queue[0].napi);
  1434. return IRQ_HANDLED;
  1435. }
  1436. #ifdef CONFIG_NET_POLL_CONTROLLER
  1437. /* netpoll callback. */
  1438. static void
  1439. vmxnet3_netpoll(struct net_device *netdev)
  1440. {
  1441. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1442. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1443. vmxnet3_disable_all_intrs(adapter);
  1444. vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
  1445. vmxnet3_enable_all_intrs(adapter);
  1446. }
  1447. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1448. static int
  1449. vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
  1450. {
  1451. struct vmxnet3_intr *intr = &adapter->intr;
  1452. int err = 0, i;
  1453. int vector = 0;
  1454. #ifdef CONFIG_PCI_MSI
  1455. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  1456. for (i = 0; i < adapter->num_tx_queues; i++) {
  1457. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
  1458. sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
  1459. adapter->netdev->name, vector);
  1460. err = request_irq(
  1461. intr->msix_entries[vector].vector,
  1462. vmxnet3_msix_tx, 0,
  1463. adapter->tx_queue[i].name,
  1464. &adapter->tx_queue[i]);
  1465. } else {
  1466. sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
  1467. adapter->netdev->name, vector);
  1468. }
  1469. if (err) {
  1470. dev_err(&adapter->netdev->dev,
  1471. "Failed to request irq for MSIX, %s, "
  1472. "error %d\n",
  1473. adapter->tx_queue[i].name, err);
  1474. return err;
  1475. }
  1476. /* Handle the case where only 1 MSIx was allocated for
  1477. * all tx queues */
  1478. if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
  1479. for (; i < adapter->num_tx_queues; i++)
  1480. adapter->tx_queue[i].comp_ring.intr_idx
  1481. = vector;
  1482. vector++;
  1483. break;
  1484. } else {
  1485. adapter->tx_queue[i].comp_ring.intr_idx
  1486. = vector++;
  1487. }
  1488. }
  1489. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
  1490. vector = 0;
  1491. for (i = 0; i < adapter->num_rx_queues; i++) {
  1492. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
  1493. sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
  1494. adapter->netdev->name, vector);
  1495. else
  1496. sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
  1497. adapter->netdev->name, vector);
  1498. err = request_irq(intr->msix_entries[vector].vector,
  1499. vmxnet3_msix_rx, 0,
  1500. adapter->rx_queue[i].name,
  1501. &(adapter->rx_queue[i]));
  1502. if (err) {
  1503. printk(KERN_ERR "Failed to request irq for MSIX"
  1504. ", %s, error %d\n",
  1505. adapter->rx_queue[i].name, err);
  1506. return err;
  1507. }
  1508. adapter->rx_queue[i].comp_ring.intr_idx = vector++;
  1509. }
  1510. sprintf(intr->event_msi_vector_name, "%s-event-%d",
  1511. adapter->netdev->name, vector);
  1512. err = request_irq(intr->msix_entries[vector].vector,
  1513. vmxnet3_msix_event, 0,
  1514. intr->event_msi_vector_name, adapter->netdev);
  1515. intr->event_intr_idx = vector;
  1516. } else if (intr->type == VMXNET3_IT_MSI) {
  1517. adapter->num_rx_queues = 1;
  1518. err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
  1519. adapter->netdev->name, adapter->netdev);
  1520. } else {
  1521. #endif
  1522. adapter->num_rx_queues = 1;
  1523. err = request_irq(adapter->pdev->irq, vmxnet3_intr,
  1524. IRQF_SHARED, adapter->netdev->name,
  1525. adapter->netdev);
  1526. #ifdef CONFIG_PCI_MSI
  1527. }
  1528. #endif
  1529. intr->num_intrs = vector + 1;
  1530. if (err) {
  1531. printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
  1532. ":%d\n", adapter->netdev->name, intr->type, err);
  1533. } else {
  1534. /* Number of rx queues will not change after this */
  1535. for (i = 0; i < adapter->num_rx_queues; i++) {
  1536. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  1537. rq->qid = i;
  1538. rq->qid2 = i + adapter->num_rx_queues;
  1539. }
  1540. /* init our intr settings */
  1541. for (i = 0; i < intr->num_intrs; i++)
  1542. intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
  1543. if (adapter->intr.type != VMXNET3_IT_MSIX) {
  1544. adapter->intr.event_intr_idx = 0;
  1545. for (i = 0; i < adapter->num_tx_queues; i++)
  1546. adapter->tx_queue[i].comp_ring.intr_idx = 0;
  1547. adapter->rx_queue[0].comp_ring.intr_idx = 0;
  1548. }
  1549. printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
  1550. "allocated\n", adapter->netdev->name, intr->type,
  1551. intr->mask_mode, intr->num_intrs);
  1552. }
  1553. return err;
  1554. }
  1555. static void
  1556. vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
  1557. {
  1558. struct vmxnet3_intr *intr = &adapter->intr;
  1559. BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
  1560. switch (intr->type) {
  1561. #ifdef CONFIG_PCI_MSI
  1562. case VMXNET3_IT_MSIX:
  1563. {
  1564. int i, vector = 0;
  1565. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
  1566. for (i = 0; i < adapter->num_tx_queues; i++) {
  1567. free_irq(intr->msix_entries[vector++].vector,
  1568. &(adapter->tx_queue[i]));
  1569. if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
  1570. break;
  1571. }
  1572. }
  1573. for (i = 0; i < adapter->num_rx_queues; i++) {
  1574. free_irq(intr->msix_entries[vector++].vector,
  1575. &(adapter->rx_queue[i]));
  1576. }
  1577. free_irq(intr->msix_entries[vector].vector,
  1578. adapter->netdev);
  1579. BUG_ON(vector >= intr->num_intrs);
  1580. break;
  1581. }
  1582. #endif
  1583. case VMXNET3_IT_MSI:
  1584. free_irq(adapter->pdev->irq, adapter->netdev);
  1585. break;
  1586. case VMXNET3_IT_INTX:
  1587. free_irq(adapter->pdev->irq, adapter->netdev);
  1588. break;
  1589. default:
  1590. BUG_ON(true);
  1591. }
  1592. }
  1593. static void
  1594. vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
  1595. {
  1596. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1597. u16 vid;
  1598. /* allow untagged pkts */
  1599. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1600. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  1601. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1602. }
  1603. static void
  1604. vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  1605. {
  1606. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1607. if (!(netdev->flags & IFF_PROMISC)) {
  1608. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1609. unsigned long flags;
  1610. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1611. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1612. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1613. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1614. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1615. }
  1616. set_bit(vid, adapter->active_vlans);
  1617. }
  1618. static void
  1619. vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  1620. {
  1621. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1622. if (!(netdev->flags & IFF_PROMISC)) {
  1623. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1624. unsigned long flags;
  1625. VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
  1626. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1627. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1628. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1629. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1630. }
  1631. clear_bit(vid, adapter->active_vlans);
  1632. }
  1633. static u8 *
  1634. vmxnet3_copy_mc(struct net_device *netdev)
  1635. {
  1636. u8 *buf = NULL;
  1637. u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
  1638. /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
  1639. if (sz <= 0xffff) {
  1640. /* We may be called with BH disabled */
  1641. buf = kmalloc(sz, GFP_ATOMIC);
  1642. if (buf) {
  1643. struct netdev_hw_addr *ha;
  1644. int i = 0;
  1645. netdev_for_each_mc_addr(ha, netdev)
  1646. memcpy(buf + i++ * ETH_ALEN, ha->addr,
  1647. ETH_ALEN);
  1648. }
  1649. }
  1650. return buf;
  1651. }
  1652. static void
  1653. vmxnet3_set_mc(struct net_device *netdev)
  1654. {
  1655. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1656. unsigned long flags;
  1657. struct Vmxnet3_RxFilterConf *rxConf =
  1658. &adapter->shared->devRead.rxFilterConf;
  1659. u8 *new_table = NULL;
  1660. u32 new_mode = VMXNET3_RXM_UCAST;
  1661. if (netdev->flags & IFF_PROMISC) {
  1662. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1663. memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
  1664. new_mode |= VMXNET3_RXM_PROMISC;
  1665. } else {
  1666. vmxnet3_restore_vlan(adapter);
  1667. }
  1668. if (netdev->flags & IFF_BROADCAST)
  1669. new_mode |= VMXNET3_RXM_BCAST;
  1670. if (netdev->flags & IFF_ALLMULTI)
  1671. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1672. else
  1673. if (!netdev_mc_empty(netdev)) {
  1674. new_table = vmxnet3_copy_mc(netdev);
  1675. if (new_table) {
  1676. new_mode |= VMXNET3_RXM_MCAST;
  1677. rxConf->mfTableLen = cpu_to_le16(
  1678. netdev_mc_count(netdev) * ETH_ALEN);
  1679. rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
  1680. new_table));
  1681. } else {
  1682. printk(KERN_INFO "%s: failed to copy mcast list"
  1683. ", setting ALL_MULTI\n", netdev->name);
  1684. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1685. }
  1686. }
  1687. if (!(new_mode & VMXNET3_RXM_MCAST)) {
  1688. rxConf->mfTableLen = 0;
  1689. rxConf->mfTablePA = 0;
  1690. }
  1691. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1692. if (new_mode != rxConf->rxMode) {
  1693. rxConf->rxMode = cpu_to_le32(new_mode);
  1694. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1695. VMXNET3_CMD_UPDATE_RX_MODE);
  1696. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1697. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1698. }
  1699. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1700. VMXNET3_CMD_UPDATE_MAC_FILTERS);
  1701. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1702. kfree(new_table);
  1703. }
  1704. void
  1705. vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
  1706. {
  1707. int i;
  1708. for (i = 0; i < adapter->num_rx_queues; i++)
  1709. vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
  1710. }
  1711. /*
  1712. * Set up driver_shared based on settings in adapter.
  1713. */
  1714. static void
  1715. vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
  1716. {
  1717. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1718. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1719. struct Vmxnet3_TxQueueConf *tqc;
  1720. struct Vmxnet3_RxQueueConf *rqc;
  1721. int i;
  1722. memset(shared, 0, sizeof(*shared));
  1723. /* driver settings */
  1724. shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
  1725. devRead->misc.driverInfo.version = cpu_to_le32(
  1726. VMXNET3_DRIVER_VERSION_NUM);
  1727. devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
  1728. VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
  1729. devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
  1730. *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
  1731. *((u32 *)&devRead->misc.driverInfo.gos));
  1732. devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
  1733. devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
  1734. devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
  1735. devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
  1736. /* set up feature flags */
  1737. if (adapter->netdev->features & NETIF_F_RXCSUM)
  1738. devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
  1739. if (adapter->netdev->features & NETIF_F_LRO) {
  1740. devRead->misc.uptFeatures |= UPT1_F_LRO;
  1741. devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
  1742. }
  1743. if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
  1744. devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
  1745. devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
  1746. devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
  1747. devRead->misc.queueDescLen = cpu_to_le32(
  1748. adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
  1749. adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
  1750. /* tx queue settings */
  1751. devRead->misc.numTxQueues = adapter->num_tx_queues;
  1752. for (i = 0; i < adapter->num_tx_queues; i++) {
  1753. struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
  1754. BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
  1755. tqc = &adapter->tqd_start[i].conf;
  1756. tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
  1757. tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
  1758. tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
  1759. tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
  1760. tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
  1761. tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
  1762. tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
  1763. tqc->ddLen = cpu_to_le32(
  1764. sizeof(struct vmxnet3_tx_buf_info) *
  1765. tqc->txRingSize);
  1766. tqc->intrIdx = tq->comp_ring.intr_idx;
  1767. }
  1768. /* rx queue settings */
  1769. devRead->misc.numRxQueues = adapter->num_rx_queues;
  1770. for (i = 0; i < adapter->num_rx_queues; i++) {
  1771. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  1772. rqc = &adapter->rqd_start[i].conf;
  1773. rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
  1774. rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
  1775. rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
  1776. rqc->ddPA = cpu_to_le64(virt_to_phys(
  1777. rq->buf_info));
  1778. rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
  1779. rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
  1780. rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
  1781. rqc->ddLen = cpu_to_le32(
  1782. sizeof(struct vmxnet3_rx_buf_info) *
  1783. (rqc->rxRingSize[0] +
  1784. rqc->rxRingSize[1]));
  1785. rqc->intrIdx = rq->comp_ring.intr_idx;
  1786. }
  1787. #ifdef VMXNET3_RSS
  1788. memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
  1789. if (adapter->rss) {
  1790. struct UPT1_RSSConf *rssConf = adapter->rss_conf;
  1791. devRead->misc.uptFeatures |= UPT1_F_RSS;
  1792. devRead->misc.numRxQueues = adapter->num_rx_queues;
  1793. rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
  1794. UPT1_RSS_HASH_TYPE_IPV4 |
  1795. UPT1_RSS_HASH_TYPE_TCP_IPV6 |
  1796. UPT1_RSS_HASH_TYPE_IPV6;
  1797. rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
  1798. rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
  1799. rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
  1800. get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
  1801. for (i = 0; i < rssConf->indTableSize; i++)
  1802. rssConf->indTable[i] = i % adapter->num_rx_queues;
  1803. devRead->rssConfDesc.confVer = 1;
  1804. devRead->rssConfDesc.confLen = sizeof(*rssConf);
  1805. devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
  1806. }
  1807. #endif /* VMXNET3_RSS */
  1808. /* intr settings */
  1809. devRead->intrConf.autoMask = adapter->intr.mask_mode ==
  1810. VMXNET3_IMM_AUTO;
  1811. devRead->intrConf.numIntrs = adapter->intr.num_intrs;
  1812. for (i = 0; i < adapter->intr.num_intrs; i++)
  1813. devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
  1814. devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
  1815. devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  1816. /* rx filter settings */
  1817. devRead->rxFilterConf.rxMode = 0;
  1818. vmxnet3_restore_vlan(adapter);
  1819. vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
  1820. /* the rest are already zeroed */
  1821. }
  1822. int
  1823. vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
  1824. {
  1825. int err, i;
  1826. u32 ret;
  1827. unsigned long flags;
  1828. dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
  1829. " ring sizes %u %u %u\n", adapter->netdev->name,
  1830. adapter->skb_buf_size, adapter->rx_buf_per_pkt,
  1831. adapter->tx_queue[0].tx_ring.size,
  1832. adapter->rx_queue[0].rx_ring[0].size,
  1833. adapter->rx_queue[0].rx_ring[1].size);
  1834. vmxnet3_tq_init_all(adapter);
  1835. err = vmxnet3_rq_init_all(adapter);
  1836. if (err) {
  1837. printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
  1838. adapter->netdev->name, err);
  1839. goto rq_err;
  1840. }
  1841. err = vmxnet3_request_irqs(adapter);
  1842. if (err) {
  1843. printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
  1844. adapter->netdev->name, err);
  1845. goto irq_err;
  1846. }
  1847. vmxnet3_setup_driver_shared(adapter);
  1848. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
  1849. adapter->shared_pa));
  1850. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
  1851. adapter->shared_pa));
  1852. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1853. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1854. VMXNET3_CMD_ACTIVATE_DEV);
  1855. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1856. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1857. if (ret != 0) {
  1858. printk(KERN_ERR "Failed to activate dev %s: error %u\n",
  1859. adapter->netdev->name, ret);
  1860. err = -EINVAL;
  1861. goto activate_err;
  1862. }
  1863. for (i = 0; i < adapter->num_rx_queues; i++) {
  1864. VMXNET3_WRITE_BAR0_REG(adapter,
  1865. VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
  1866. adapter->rx_queue[i].rx_ring[0].next2fill);
  1867. VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
  1868. (i * VMXNET3_REG_ALIGN)),
  1869. adapter->rx_queue[i].rx_ring[1].next2fill);
  1870. }
  1871. /* Apply the rx filter settins last. */
  1872. vmxnet3_set_mc(adapter->netdev);
  1873. /*
  1874. * Check link state when first activating device. It will start the
  1875. * tx queue if the link is up.
  1876. */
  1877. vmxnet3_check_link(adapter, true);
  1878. for (i = 0; i < adapter->num_rx_queues; i++)
  1879. napi_enable(&adapter->rx_queue[i].napi);
  1880. vmxnet3_enable_all_intrs(adapter);
  1881. clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  1882. return 0;
  1883. activate_err:
  1884. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
  1885. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
  1886. vmxnet3_free_irqs(adapter);
  1887. irq_err:
  1888. rq_err:
  1889. /* free up buffers we allocated */
  1890. vmxnet3_rq_cleanup_all(adapter);
  1891. return err;
  1892. }
  1893. void
  1894. vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
  1895. {
  1896. unsigned long flags;
  1897. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1898. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
  1899. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1900. }
  1901. int
  1902. vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
  1903. {
  1904. int i;
  1905. unsigned long flags;
  1906. if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
  1907. return 0;
  1908. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1909. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1910. VMXNET3_CMD_QUIESCE_DEV);
  1911. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1912. vmxnet3_disable_all_intrs(adapter);
  1913. for (i = 0; i < adapter->num_rx_queues; i++)
  1914. napi_disable(&adapter->rx_queue[i].napi);
  1915. netif_tx_disable(adapter->netdev);
  1916. adapter->link_speed = 0;
  1917. netif_carrier_off(adapter->netdev);
  1918. vmxnet3_tq_cleanup_all(adapter);
  1919. vmxnet3_rq_cleanup_all(adapter);
  1920. vmxnet3_free_irqs(adapter);
  1921. return 0;
  1922. }
  1923. static void
  1924. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  1925. {
  1926. u32 tmp;
  1927. tmp = *(u32 *)mac;
  1928. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
  1929. tmp = (mac[5] << 8) | mac[4];
  1930. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
  1931. }
  1932. static int
  1933. vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
  1934. {
  1935. struct sockaddr *addr = p;
  1936. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1937. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1938. vmxnet3_write_mac_addr(adapter, addr->sa_data);
  1939. return 0;
  1940. }
  1941. /* ==================== initialization and cleanup routines ============ */
  1942. static int
  1943. vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
  1944. {
  1945. int err;
  1946. unsigned long mmio_start, mmio_len;
  1947. struct pci_dev *pdev = adapter->pdev;
  1948. err = pci_enable_device(pdev);
  1949. if (err) {
  1950. printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
  1951. pci_name(pdev), err);
  1952. return err;
  1953. }
  1954. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
  1955. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
  1956. printk(KERN_ERR "pci_set_consistent_dma_mask failed "
  1957. "for adapter %s\n", pci_name(pdev));
  1958. err = -EIO;
  1959. goto err_set_mask;
  1960. }
  1961. *dma64 = true;
  1962. } else {
  1963. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
  1964. printk(KERN_ERR "pci_set_dma_mask failed for adapter "
  1965. "%s\n", pci_name(pdev));
  1966. err = -EIO;
  1967. goto err_set_mask;
  1968. }
  1969. *dma64 = false;
  1970. }
  1971. err = pci_request_selected_regions(pdev, (1 << 2) - 1,
  1972. vmxnet3_driver_name);
  1973. if (err) {
  1974. printk(KERN_ERR "Failed to request region for adapter %s: "
  1975. "error %d\n", pci_name(pdev), err);
  1976. goto err_set_mask;
  1977. }
  1978. pci_set_master(pdev);
  1979. mmio_start = pci_resource_start(pdev, 0);
  1980. mmio_len = pci_resource_len(pdev, 0);
  1981. adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
  1982. if (!adapter->hw_addr0) {
  1983. printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
  1984. pci_name(pdev));
  1985. err = -EIO;
  1986. goto err_ioremap;
  1987. }
  1988. mmio_start = pci_resource_start(pdev, 1);
  1989. mmio_len = pci_resource_len(pdev, 1);
  1990. adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
  1991. if (!adapter->hw_addr1) {
  1992. printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
  1993. pci_name(pdev));
  1994. err = -EIO;
  1995. goto err_bar1;
  1996. }
  1997. return 0;
  1998. err_bar1:
  1999. iounmap(adapter->hw_addr0);
  2000. err_ioremap:
  2001. pci_release_selected_regions(pdev, (1 << 2) - 1);
  2002. err_set_mask:
  2003. pci_disable_device(pdev);
  2004. return err;
  2005. }
  2006. static void
  2007. vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
  2008. {
  2009. BUG_ON(!adapter->pdev);
  2010. iounmap(adapter->hw_addr0);
  2011. iounmap(adapter->hw_addr1);
  2012. pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
  2013. pci_disable_device(adapter->pdev);
  2014. }
  2015. static void
  2016. vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
  2017. {
  2018. size_t sz, i, ring0_size, ring1_size, comp_size;
  2019. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
  2020. if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
  2021. VMXNET3_MAX_ETH_HDR_SIZE) {
  2022. adapter->skb_buf_size = adapter->netdev->mtu +
  2023. VMXNET3_MAX_ETH_HDR_SIZE;
  2024. if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
  2025. adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
  2026. adapter->rx_buf_per_pkt = 1;
  2027. } else {
  2028. adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
  2029. sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
  2030. VMXNET3_MAX_ETH_HDR_SIZE;
  2031. adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
  2032. }
  2033. /*
  2034. * for simplicity, force the ring0 size to be a multiple of
  2035. * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
  2036. */
  2037. sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
  2038. ring0_size = adapter->rx_queue[0].rx_ring[0].size;
  2039. ring0_size = (ring0_size + sz - 1) / sz * sz;
  2040. ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
  2041. sz * sz);
  2042. ring1_size = adapter->rx_queue[0].rx_ring[1].size;
  2043. comp_size = ring0_size + ring1_size;
  2044. for (i = 0; i < adapter->num_rx_queues; i++) {
  2045. rq = &adapter->rx_queue[i];
  2046. rq->rx_ring[0].size = ring0_size;
  2047. rq->rx_ring[1].size = ring1_size;
  2048. rq->comp_ring.size = comp_size;
  2049. }
  2050. }
  2051. int
  2052. vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
  2053. u32 rx_ring_size, u32 rx_ring2_size)
  2054. {
  2055. int err = 0, i;
  2056. for (i = 0; i < adapter->num_tx_queues; i++) {
  2057. struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
  2058. tq->tx_ring.size = tx_ring_size;
  2059. tq->data_ring.size = tx_ring_size;
  2060. tq->comp_ring.size = tx_ring_size;
  2061. tq->shared = &adapter->tqd_start[i].ctrl;
  2062. tq->stopped = true;
  2063. tq->adapter = adapter;
  2064. tq->qid = i;
  2065. err = vmxnet3_tq_create(tq, adapter);
  2066. /*
  2067. * Too late to change num_tx_queues. We cannot do away with
  2068. * lesser number of queues than what we asked for
  2069. */
  2070. if (err)
  2071. goto queue_err;
  2072. }
  2073. adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
  2074. adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
  2075. vmxnet3_adjust_rx_ring_size(adapter);
  2076. for (i = 0; i < adapter->num_rx_queues; i++) {
  2077. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  2078. /* qid and qid2 for rx queues will be assigned later when num
  2079. * of rx queues is finalized after allocating intrs */
  2080. rq->shared = &adapter->rqd_start[i].ctrl;
  2081. rq->adapter = adapter;
  2082. err = vmxnet3_rq_create(rq, adapter);
  2083. if (err) {
  2084. if (i == 0) {
  2085. printk(KERN_ERR "Could not allocate any rx"
  2086. "queues. Aborting.\n");
  2087. goto queue_err;
  2088. } else {
  2089. printk(KERN_INFO "Number of rx queues changed "
  2090. "to : %d.\n", i);
  2091. adapter->num_rx_queues = i;
  2092. err = 0;
  2093. break;
  2094. }
  2095. }
  2096. }
  2097. return err;
  2098. queue_err:
  2099. vmxnet3_tq_destroy_all(adapter);
  2100. return err;
  2101. }
  2102. static int
  2103. vmxnet3_open(struct net_device *netdev)
  2104. {
  2105. struct vmxnet3_adapter *adapter;
  2106. int err, i;
  2107. adapter = netdev_priv(netdev);
  2108. for (i = 0; i < adapter->num_tx_queues; i++)
  2109. spin_lock_init(&adapter->tx_queue[i].tx_lock);
  2110. err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
  2111. VMXNET3_DEF_RX_RING_SIZE,
  2112. VMXNET3_DEF_RX_RING_SIZE);
  2113. if (err)
  2114. goto queue_err;
  2115. err = vmxnet3_activate_dev(adapter);
  2116. if (err)
  2117. goto activate_err;
  2118. return 0;
  2119. activate_err:
  2120. vmxnet3_rq_destroy_all(adapter);
  2121. vmxnet3_tq_destroy_all(adapter);
  2122. queue_err:
  2123. return err;
  2124. }
  2125. static int
  2126. vmxnet3_close(struct net_device *netdev)
  2127. {
  2128. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2129. /*
  2130. * Reset_work may be in the middle of resetting the device, wait for its
  2131. * completion.
  2132. */
  2133. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2134. msleep(1);
  2135. vmxnet3_quiesce_dev(adapter);
  2136. vmxnet3_rq_destroy_all(adapter);
  2137. vmxnet3_tq_destroy_all(adapter);
  2138. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2139. return 0;
  2140. }
  2141. void
  2142. vmxnet3_force_close(struct vmxnet3_adapter *adapter)
  2143. {
  2144. int i;
  2145. /*
  2146. * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
  2147. * vmxnet3_close() will deadlock.
  2148. */
  2149. BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
  2150. /* we need to enable NAPI, otherwise dev_close will deadlock */
  2151. for (i = 0; i < adapter->num_rx_queues; i++)
  2152. napi_enable(&adapter->rx_queue[i].napi);
  2153. dev_close(adapter->netdev);
  2154. }
  2155. static int
  2156. vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
  2157. {
  2158. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2159. int err = 0;
  2160. if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
  2161. return -EINVAL;
  2162. netdev->mtu = new_mtu;
  2163. /*
  2164. * Reset_work may be in the middle of resetting the device, wait for its
  2165. * completion.
  2166. */
  2167. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2168. msleep(1);
  2169. if (netif_running(netdev)) {
  2170. vmxnet3_quiesce_dev(adapter);
  2171. vmxnet3_reset_dev(adapter);
  2172. /* we need to re-create the rx queue based on the new mtu */
  2173. vmxnet3_rq_destroy_all(adapter);
  2174. vmxnet3_adjust_rx_ring_size(adapter);
  2175. err = vmxnet3_rq_create_all(adapter);
  2176. if (err) {
  2177. printk(KERN_ERR "%s: failed to re-create rx queues,"
  2178. " error %d. Closing it.\n", netdev->name, err);
  2179. goto out;
  2180. }
  2181. err = vmxnet3_activate_dev(adapter);
  2182. if (err) {
  2183. printk(KERN_ERR "%s: failed to re-activate, error %d. "
  2184. "Closing it\n", netdev->name, err);
  2185. goto out;
  2186. }
  2187. }
  2188. out:
  2189. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2190. if (err)
  2191. vmxnet3_force_close(adapter);
  2192. return err;
  2193. }
  2194. static void
  2195. vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
  2196. {
  2197. struct net_device *netdev = adapter->netdev;
  2198. netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
  2199. NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
  2200. NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
  2201. NETIF_F_LRO;
  2202. if (dma64)
  2203. netdev->hw_features |= NETIF_F_HIGHDMA;
  2204. netdev->vlan_features = netdev->hw_features &
  2205. ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
  2206. netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
  2207. netdev_info(adapter->netdev,
  2208. "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
  2209. dma64 ? " highDMA" : "");
  2210. }
  2211. static void
  2212. vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  2213. {
  2214. u32 tmp;
  2215. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
  2216. *(u32 *)mac = tmp;
  2217. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
  2218. mac[4] = tmp & 0xff;
  2219. mac[5] = (tmp >> 8) & 0xff;
  2220. }
  2221. #ifdef CONFIG_PCI_MSI
  2222. /*
  2223. * Enable MSIx vectors.
  2224. * Returns :
  2225. * 0 on successful enabling of required vectors,
  2226. * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
  2227. * could be enabled.
  2228. * number of vectors which can be enabled otherwise (this number is smaller
  2229. * than VMXNET3_LINUX_MIN_MSIX_VECT)
  2230. */
  2231. static int
  2232. vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
  2233. int vectors)
  2234. {
  2235. int err = 0, vector_threshold;
  2236. vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
  2237. while (vectors >= vector_threshold) {
  2238. err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
  2239. vectors);
  2240. if (!err) {
  2241. adapter->intr.num_intrs = vectors;
  2242. return 0;
  2243. } else if (err < 0) {
  2244. printk(KERN_ERR "Failed to enable MSI-X for %s, error"
  2245. " %d\n", adapter->netdev->name, err);
  2246. vectors = 0;
  2247. } else if (err < vector_threshold) {
  2248. break;
  2249. } else {
  2250. /* If fails to enable required number of MSI-x vectors
  2251. * try enabling minimum number of vectors required.
  2252. */
  2253. vectors = vector_threshold;
  2254. printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
  2255. " %d instead\n", vectors, adapter->netdev->name,
  2256. vector_threshold);
  2257. }
  2258. }
  2259. printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
  2260. " are lower than min threshold required.\n");
  2261. return err;
  2262. }
  2263. #endif /* CONFIG_PCI_MSI */
  2264. static void
  2265. vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
  2266. {
  2267. u32 cfg;
  2268. unsigned long flags;
  2269. /* intr settings */
  2270. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2271. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2272. VMXNET3_CMD_GET_CONF_INTR);
  2273. cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  2274. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2275. adapter->intr.type = cfg & 0x3;
  2276. adapter->intr.mask_mode = (cfg >> 2) & 0x3;
  2277. if (adapter->intr.type == VMXNET3_IT_AUTO) {
  2278. adapter->intr.type = VMXNET3_IT_MSIX;
  2279. }
  2280. #ifdef CONFIG_PCI_MSI
  2281. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  2282. int vector, err = 0;
  2283. adapter->intr.num_intrs = (adapter->share_intr ==
  2284. VMXNET3_INTR_TXSHARE) ? 1 :
  2285. adapter->num_tx_queues;
  2286. adapter->intr.num_intrs += (adapter->share_intr ==
  2287. VMXNET3_INTR_BUDDYSHARE) ? 0 :
  2288. adapter->num_rx_queues;
  2289. adapter->intr.num_intrs += 1; /* for link event */
  2290. adapter->intr.num_intrs = (adapter->intr.num_intrs >
  2291. VMXNET3_LINUX_MIN_MSIX_VECT
  2292. ? adapter->intr.num_intrs :
  2293. VMXNET3_LINUX_MIN_MSIX_VECT);
  2294. for (vector = 0; vector < adapter->intr.num_intrs; vector++)
  2295. adapter->intr.msix_entries[vector].entry = vector;
  2296. err = vmxnet3_acquire_msix_vectors(adapter,
  2297. adapter->intr.num_intrs);
  2298. /* If we cannot allocate one MSIx vector per queue
  2299. * then limit the number of rx queues to 1
  2300. */
  2301. if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
  2302. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
  2303. || adapter->num_rx_queues != 1) {
  2304. adapter->share_intr = VMXNET3_INTR_TXSHARE;
  2305. printk(KERN_ERR "Number of rx queues : 1\n");
  2306. adapter->num_rx_queues = 1;
  2307. adapter->intr.num_intrs =
  2308. VMXNET3_LINUX_MIN_MSIX_VECT;
  2309. }
  2310. return;
  2311. }
  2312. if (!err)
  2313. return;
  2314. /* If we cannot allocate MSIx vectors use only one rx queue */
  2315. printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
  2316. "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
  2317. adapter->intr.type = VMXNET3_IT_MSI;
  2318. }
  2319. if (adapter->intr.type == VMXNET3_IT_MSI) {
  2320. int err;
  2321. err = pci_enable_msi(adapter->pdev);
  2322. if (!err) {
  2323. adapter->num_rx_queues = 1;
  2324. adapter->intr.num_intrs = 1;
  2325. return;
  2326. }
  2327. }
  2328. #endif /* CONFIG_PCI_MSI */
  2329. adapter->num_rx_queues = 1;
  2330. printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
  2331. adapter->intr.type = VMXNET3_IT_INTX;
  2332. /* INT-X related setting */
  2333. adapter->intr.num_intrs = 1;
  2334. }
  2335. static void
  2336. vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
  2337. {
  2338. if (adapter->intr.type == VMXNET3_IT_MSIX)
  2339. pci_disable_msix(adapter->pdev);
  2340. else if (adapter->intr.type == VMXNET3_IT_MSI)
  2341. pci_disable_msi(adapter->pdev);
  2342. else
  2343. BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
  2344. }
  2345. static void
  2346. vmxnet3_tx_timeout(struct net_device *netdev)
  2347. {
  2348. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2349. adapter->tx_timeout_count++;
  2350. printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
  2351. schedule_work(&adapter->work);
  2352. netif_wake_queue(adapter->netdev);
  2353. }
  2354. static void
  2355. vmxnet3_reset_work(struct work_struct *data)
  2356. {
  2357. struct vmxnet3_adapter *adapter;
  2358. adapter = container_of(data, struct vmxnet3_adapter, work);
  2359. /* if another thread is resetting the device, no need to proceed */
  2360. if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2361. return;
  2362. /* if the device is closed, we must leave it alone */
  2363. rtnl_lock();
  2364. if (netif_running(adapter->netdev)) {
  2365. printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
  2366. vmxnet3_quiesce_dev(adapter);
  2367. vmxnet3_reset_dev(adapter);
  2368. vmxnet3_activate_dev(adapter);
  2369. } else {
  2370. printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
  2371. }
  2372. rtnl_unlock();
  2373. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2374. }
  2375. static int __devinit
  2376. vmxnet3_probe_device(struct pci_dev *pdev,
  2377. const struct pci_device_id *id)
  2378. {
  2379. static const struct net_device_ops vmxnet3_netdev_ops = {
  2380. .ndo_open = vmxnet3_open,
  2381. .ndo_stop = vmxnet3_close,
  2382. .ndo_start_xmit = vmxnet3_xmit_frame,
  2383. .ndo_set_mac_address = vmxnet3_set_mac_addr,
  2384. .ndo_change_mtu = vmxnet3_change_mtu,
  2385. .ndo_set_features = vmxnet3_set_features,
  2386. .ndo_get_stats64 = vmxnet3_get_stats64,
  2387. .ndo_tx_timeout = vmxnet3_tx_timeout,
  2388. .ndo_set_rx_mode = vmxnet3_set_mc,
  2389. .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
  2390. .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
  2391. #ifdef CONFIG_NET_POLL_CONTROLLER
  2392. .ndo_poll_controller = vmxnet3_netpoll,
  2393. #endif
  2394. };
  2395. int err;
  2396. bool dma64 = false; /* stupid gcc */
  2397. u32 ver;
  2398. struct net_device *netdev;
  2399. struct vmxnet3_adapter *adapter;
  2400. u8 mac[ETH_ALEN];
  2401. int size;
  2402. int num_tx_queues;
  2403. int num_rx_queues;
  2404. if (!pci_msi_enabled())
  2405. enable_mq = 0;
  2406. #ifdef VMXNET3_RSS
  2407. if (enable_mq)
  2408. num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
  2409. (int)num_online_cpus());
  2410. else
  2411. #endif
  2412. num_rx_queues = 1;
  2413. num_rx_queues = rounddown_pow_of_two(num_rx_queues);
  2414. if (enable_mq)
  2415. num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
  2416. (int)num_online_cpus());
  2417. else
  2418. num_tx_queues = 1;
  2419. num_tx_queues = rounddown_pow_of_two(num_tx_queues);
  2420. netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
  2421. max(num_tx_queues, num_rx_queues));
  2422. printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
  2423. num_tx_queues, num_rx_queues);
  2424. if (!netdev) {
  2425. printk(KERN_ERR "Failed to alloc ethernet device for adapter "
  2426. "%s\n", pci_name(pdev));
  2427. return -ENOMEM;
  2428. }
  2429. pci_set_drvdata(pdev, netdev);
  2430. adapter = netdev_priv(netdev);
  2431. adapter->netdev = netdev;
  2432. adapter->pdev = pdev;
  2433. spin_lock_init(&adapter->cmd_lock);
  2434. adapter->shared = pci_alloc_consistent(adapter->pdev,
  2435. sizeof(struct Vmxnet3_DriverShared),
  2436. &adapter->shared_pa);
  2437. if (!adapter->shared) {
  2438. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2439. pci_name(pdev));
  2440. err = -ENOMEM;
  2441. goto err_alloc_shared;
  2442. }
  2443. adapter->num_rx_queues = num_rx_queues;
  2444. adapter->num_tx_queues = num_tx_queues;
  2445. size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
  2446. size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
  2447. adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
  2448. &adapter->queue_desc_pa);
  2449. if (!adapter->tqd_start) {
  2450. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2451. pci_name(pdev));
  2452. err = -ENOMEM;
  2453. goto err_alloc_queue_desc;
  2454. }
  2455. adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
  2456. adapter->num_tx_queues);
  2457. adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
  2458. if (adapter->pm_conf == NULL) {
  2459. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2460. pci_name(pdev));
  2461. err = -ENOMEM;
  2462. goto err_alloc_pm;
  2463. }
  2464. #ifdef VMXNET3_RSS
  2465. adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
  2466. if (adapter->rss_conf == NULL) {
  2467. printk(KERN_ERR "Failed to allocate memory for %s\n",
  2468. pci_name(pdev));
  2469. err = -ENOMEM;
  2470. goto err_alloc_rss;
  2471. }
  2472. #endif /* VMXNET3_RSS */
  2473. err = vmxnet3_alloc_pci_resources(adapter, &dma64);
  2474. if (err < 0)
  2475. goto err_alloc_pci;
  2476. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
  2477. if (ver & 1) {
  2478. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
  2479. } else {
  2480. printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
  2481. " %s\n", ver, pci_name(pdev));
  2482. err = -EBUSY;
  2483. goto err_ver;
  2484. }
  2485. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
  2486. if (ver & 1) {
  2487. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
  2488. } else {
  2489. printk(KERN_ERR "Incompatible upt version (0x%x) for "
  2490. "adapter %s\n", ver, pci_name(pdev));
  2491. err = -EBUSY;
  2492. goto err_ver;
  2493. }
  2494. SET_NETDEV_DEV(netdev, &pdev->dev);
  2495. vmxnet3_declare_features(adapter, dma64);
  2496. adapter->dev_number = atomic_read(&devices_found);
  2497. adapter->share_intr = irq_share_mode;
  2498. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
  2499. adapter->num_tx_queues != adapter->num_rx_queues)
  2500. adapter->share_intr = VMXNET3_INTR_DONTSHARE;
  2501. vmxnet3_alloc_intr_resources(adapter);
  2502. #ifdef VMXNET3_RSS
  2503. if (adapter->num_rx_queues > 1 &&
  2504. adapter->intr.type == VMXNET3_IT_MSIX) {
  2505. adapter->rss = true;
  2506. printk(KERN_INFO "RSS is enabled.\n");
  2507. } else {
  2508. adapter->rss = false;
  2509. }
  2510. #endif
  2511. vmxnet3_read_mac_addr(adapter, mac);
  2512. memcpy(netdev->dev_addr, mac, netdev->addr_len);
  2513. netdev->netdev_ops = &vmxnet3_netdev_ops;
  2514. vmxnet3_set_ethtool_ops(netdev);
  2515. netdev->watchdog_timeo = 5 * HZ;
  2516. INIT_WORK(&adapter->work, vmxnet3_reset_work);
  2517. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  2518. int i;
  2519. for (i = 0; i < adapter->num_rx_queues; i++) {
  2520. netif_napi_add(adapter->netdev,
  2521. &adapter->rx_queue[i].napi,
  2522. vmxnet3_poll_rx_only, 64);
  2523. }
  2524. } else {
  2525. netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
  2526. vmxnet3_poll, 64);
  2527. }
  2528. netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
  2529. netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
  2530. err = register_netdev(netdev);
  2531. if (err) {
  2532. printk(KERN_ERR "Failed to register adapter %s\n",
  2533. pci_name(pdev));
  2534. goto err_register;
  2535. }
  2536. set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  2537. vmxnet3_check_link(adapter, false);
  2538. atomic_inc(&devices_found);
  2539. return 0;
  2540. err_register:
  2541. vmxnet3_free_intr_resources(adapter);
  2542. err_ver:
  2543. vmxnet3_free_pci_resources(adapter);
  2544. err_alloc_pci:
  2545. #ifdef VMXNET3_RSS
  2546. kfree(adapter->rss_conf);
  2547. err_alloc_rss:
  2548. #endif
  2549. kfree(adapter->pm_conf);
  2550. err_alloc_pm:
  2551. pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
  2552. adapter->queue_desc_pa);
  2553. err_alloc_queue_desc:
  2554. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2555. adapter->shared, adapter->shared_pa);
  2556. err_alloc_shared:
  2557. pci_set_drvdata(pdev, NULL);
  2558. free_netdev(netdev);
  2559. return err;
  2560. }
  2561. static void __devexit
  2562. vmxnet3_remove_device(struct pci_dev *pdev)
  2563. {
  2564. struct net_device *netdev = pci_get_drvdata(pdev);
  2565. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2566. int size = 0;
  2567. int num_rx_queues;
  2568. #ifdef VMXNET3_RSS
  2569. if (enable_mq)
  2570. num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
  2571. (int)num_online_cpus());
  2572. else
  2573. #endif
  2574. num_rx_queues = 1;
  2575. num_rx_queues = rounddown_pow_of_two(num_rx_queues);
  2576. cancel_work_sync(&adapter->work);
  2577. unregister_netdev(netdev);
  2578. vmxnet3_free_intr_resources(adapter);
  2579. vmxnet3_free_pci_resources(adapter);
  2580. #ifdef VMXNET3_RSS
  2581. kfree(adapter->rss_conf);
  2582. #endif
  2583. kfree(adapter->pm_conf);
  2584. size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
  2585. size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
  2586. pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
  2587. adapter->queue_desc_pa);
  2588. pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
  2589. adapter->shared, adapter->shared_pa);
  2590. free_netdev(netdev);
  2591. }
  2592. #ifdef CONFIG_PM
  2593. static int
  2594. vmxnet3_suspend(struct device *device)
  2595. {
  2596. struct pci_dev *pdev = to_pci_dev(device);
  2597. struct net_device *netdev = pci_get_drvdata(pdev);
  2598. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2599. struct Vmxnet3_PMConf *pmConf;
  2600. struct ethhdr *ehdr;
  2601. struct arphdr *ahdr;
  2602. u8 *arpreq;
  2603. struct in_device *in_dev;
  2604. struct in_ifaddr *ifa;
  2605. unsigned long flags;
  2606. int i = 0;
  2607. if (!netif_running(netdev))
  2608. return 0;
  2609. for (i = 0; i < adapter->num_rx_queues; i++)
  2610. napi_disable(&adapter->rx_queue[i].napi);
  2611. vmxnet3_disable_all_intrs(adapter);
  2612. vmxnet3_free_irqs(adapter);
  2613. vmxnet3_free_intr_resources(adapter);
  2614. netif_device_detach(netdev);
  2615. netif_tx_stop_all_queues(netdev);
  2616. /* Create wake-up filters. */
  2617. pmConf = adapter->pm_conf;
  2618. memset(pmConf, 0, sizeof(*pmConf));
  2619. if (adapter->wol & WAKE_UCAST) {
  2620. pmConf->filters[i].patternSize = ETH_ALEN;
  2621. pmConf->filters[i].maskSize = 1;
  2622. memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
  2623. pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
  2624. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
  2625. i++;
  2626. }
  2627. if (adapter->wol & WAKE_ARP) {
  2628. in_dev = in_dev_get(netdev);
  2629. if (!in_dev)
  2630. goto skip_arp;
  2631. ifa = (struct in_ifaddr *)in_dev->ifa_list;
  2632. if (!ifa)
  2633. goto skip_arp;
  2634. pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
  2635. sizeof(struct arphdr) + /* ARP header */
  2636. 2 * ETH_ALEN + /* 2 Ethernet addresses*/
  2637. 2 * sizeof(u32); /*2 IPv4 addresses */
  2638. pmConf->filters[i].maskSize =
  2639. (pmConf->filters[i].patternSize - 1) / 8 + 1;
  2640. /* ETH_P_ARP in Ethernet header. */
  2641. ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
  2642. ehdr->h_proto = htons(ETH_P_ARP);
  2643. /* ARPOP_REQUEST in ARP header. */
  2644. ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
  2645. ahdr->ar_op = htons(ARPOP_REQUEST);
  2646. arpreq = (u8 *)(ahdr + 1);
  2647. /* The Unicast IPv4 address in 'tip' field. */
  2648. arpreq += 2 * ETH_ALEN + sizeof(u32);
  2649. *(u32 *)arpreq = ifa->ifa_address;
  2650. /* The mask for the relevant bits. */
  2651. pmConf->filters[i].mask[0] = 0x00;
  2652. pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
  2653. pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
  2654. pmConf->filters[i].mask[3] = 0x00;
  2655. pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
  2656. pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
  2657. in_dev_put(in_dev);
  2658. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
  2659. i++;
  2660. }
  2661. skip_arp:
  2662. if (adapter->wol & WAKE_MAGIC)
  2663. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
  2664. pmConf->numFilters = i;
  2665. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2666. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2667. *pmConf));
  2668. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
  2669. pmConf));
  2670. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2671. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2672. VMXNET3_CMD_UPDATE_PMCFG);
  2673. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2674. pci_save_state(pdev);
  2675. pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
  2676. adapter->wol);
  2677. pci_disable_device(pdev);
  2678. pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
  2679. return 0;
  2680. }
  2681. static int
  2682. vmxnet3_resume(struct device *device)
  2683. {
  2684. int err, i = 0;
  2685. unsigned long flags;
  2686. struct pci_dev *pdev = to_pci_dev(device);
  2687. struct net_device *netdev = pci_get_drvdata(pdev);
  2688. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2689. struct Vmxnet3_PMConf *pmConf;
  2690. if (!netif_running(netdev))
  2691. return 0;
  2692. /* Destroy wake-up filters. */
  2693. pmConf = adapter->pm_conf;
  2694. memset(pmConf, 0, sizeof(*pmConf));
  2695. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2696. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2697. *pmConf));
  2698. adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
  2699. pmConf));
  2700. netif_device_attach(netdev);
  2701. pci_set_power_state(pdev, PCI_D0);
  2702. pci_restore_state(pdev);
  2703. err = pci_enable_device_mem(pdev);
  2704. if (err != 0)
  2705. return err;
  2706. pci_enable_wake(pdev, PCI_D0, 0);
  2707. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2708. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2709. VMXNET3_CMD_UPDATE_PMCFG);
  2710. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2711. vmxnet3_alloc_intr_resources(adapter);
  2712. vmxnet3_request_irqs(adapter);
  2713. for (i = 0; i < adapter->num_rx_queues; i++)
  2714. napi_enable(&adapter->rx_queue[i].napi);
  2715. vmxnet3_enable_all_intrs(adapter);
  2716. return 0;
  2717. }
  2718. static const struct dev_pm_ops vmxnet3_pm_ops = {
  2719. .suspend = vmxnet3_suspend,
  2720. .resume = vmxnet3_resume,
  2721. };
  2722. #endif
  2723. static struct pci_driver vmxnet3_driver = {
  2724. .name = vmxnet3_driver_name,
  2725. .id_table = vmxnet3_pciid_table,
  2726. .probe = vmxnet3_probe_device,
  2727. .remove = __devexit_p(vmxnet3_remove_device),
  2728. #ifdef CONFIG_PM
  2729. .driver.pm = &vmxnet3_pm_ops,
  2730. #endif
  2731. };
  2732. static int __init
  2733. vmxnet3_init_module(void)
  2734. {
  2735. printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
  2736. VMXNET3_DRIVER_VERSION_REPORT);
  2737. return pci_register_driver(&vmxnet3_driver);
  2738. }
  2739. module_init(vmxnet3_init_module);
  2740. static void
  2741. vmxnet3_exit_module(void)
  2742. {
  2743. pci_unregister_driver(&vmxnet3_driver);
  2744. }
  2745. module_exit(vmxnet3_exit_module);
  2746. MODULE_AUTHOR("VMware, Inc.");
  2747. MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
  2748. MODULE_LICENSE("GPL v2");
  2749. MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);