bnx2x_cmn.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252
  1. /* bnx2x_cmn.c: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2007-2010 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10. * Written by: Eliezer Tamir
  11. * Based on code from Michael Chan's bnx2 driver
  12. * UDP CSUM errata workaround by Arik Gendelman
  13. * Slowpath and fastpath rework by Vladislav Zolotarov
  14. * Statistics and Link management by Yitchak Gertner
  15. *
  16. */
  17. #include <linux/etherdevice.h>
  18. #include <linux/ip.h>
  19. #include <linux/ipv6.h>
  20. #include <net/ip6_checksum.h>
  21. #include "bnx2x_cmn.h"
  22. #ifdef BCM_VLAN
  23. #include <linux/if_vlan.h>
  24. #endif
  25. static int bnx2x_poll(struct napi_struct *napi, int budget);
  26. /* free skb in the packet ring at pos idx
  27. * return idx of last bd freed
  28. */
  29. static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  30. u16 idx)
  31. {
  32. struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
  33. struct eth_tx_start_bd *tx_start_bd;
  34. struct eth_tx_bd *tx_data_bd;
  35. struct sk_buff *skb = tx_buf->skb;
  36. u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
  37. int nbd;
  38. /* prefetch skb end pointer to speedup dev_kfree_skb() */
  39. prefetch(&skb->end);
  40. DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
  41. idx, tx_buf, skb);
  42. /* unmap first bd */
  43. DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
  44. tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
  45. dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
  46. BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
  47. nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
  48. #ifdef BNX2X_STOP_ON_ERROR
  49. if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
  50. BNX2X_ERR("BAD nbd!\n");
  51. bnx2x_panic();
  52. }
  53. #endif
  54. new_cons = nbd + tx_buf->first_bd;
  55. /* Get the next bd */
  56. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  57. /* Skip a parse bd... */
  58. --nbd;
  59. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  60. /* ...and the TSO split header bd since they have no mapping */
  61. if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
  62. --nbd;
  63. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  64. }
  65. /* now free frags */
  66. while (nbd > 0) {
  67. DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
  68. tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
  69. dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
  70. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  71. if (--nbd)
  72. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  73. }
  74. /* release skb */
  75. WARN_ON(!skb);
  76. dev_kfree_skb(skb);
  77. tx_buf->first_bd = 0;
  78. tx_buf->skb = NULL;
  79. return new_cons;
  80. }
  81. int bnx2x_tx_int(struct bnx2x_fastpath *fp)
  82. {
  83. struct bnx2x *bp = fp->bp;
  84. struct netdev_queue *txq;
  85. u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
  86. #ifdef BNX2X_STOP_ON_ERROR
  87. if (unlikely(bp->panic))
  88. return -1;
  89. #endif
  90. txq = netdev_get_tx_queue(bp->dev, fp->index);
  91. hw_cons = le16_to_cpu(*fp->tx_cons_sb);
  92. sw_cons = fp->tx_pkt_cons;
  93. while (sw_cons != hw_cons) {
  94. u16 pkt_cons;
  95. pkt_cons = TX_BD(sw_cons);
  96. /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
  97. DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
  98. hw_cons, sw_cons, pkt_cons);
  99. /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
  100. rmb();
  101. prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
  102. }
  103. */
  104. bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
  105. sw_cons++;
  106. }
  107. fp->tx_pkt_cons = sw_cons;
  108. fp->tx_bd_cons = bd_cons;
  109. /* Need to make the tx_bd_cons update visible to start_xmit()
  110. * before checking for netif_tx_queue_stopped(). Without the
  111. * memory barrier, there is a small possibility that
  112. * start_xmit() will miss it and cause the queue to be stopped
  113. * forever.
  114. */
  115. smp_mb();
  116. /* TBD need a thresh? */
  117. if (unlikely(netif_tx_queue_stopped(txq))) {
  118. /* Taking tx_lock() is needed to prevent reenabling the queue
  119. * while it's empty. This could have happen if rx_action() gets
  120. * suspended in bnx2x_tx_int() after the condition before
  121. * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
  122. *
  123. * stops the queue->sees fresh tx_bd_cons->releases the queue->
  124. * sends some packets consuming the whole queue again->
  125. * stops the queue
  126. */
  127. __netif_tx_lock(txq, smp_processor_id());
  128. if ((netif_tx_queue_stopped(txq)) &&
  129. (bp->state == BNX2X_STATE_OPEN) &&
  130. (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
  131. netif_tx_wake_queue(txq);
  132. __netif_tx_unlock(txq);
  133. }
  134. return 0;
  135. }
  136. static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
  137. u16 idx)
  138. {
  139. u16 last_max = fp->last_max_sge;
  140. if (SUB_S16(idx, last_max) > 0)
  141. fp->last_max_sge = idx;
  142. }
  143. static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
  144. struct eth_fast_path_rx_cqe *fp_cqe)
  145. {
  146. struct bnx2x *bp = fp->bp;
  147. u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
  148. le16_to_cpu(fp_cqe->len_on_bd)) >>
  149. SGE_PAGE_SHIFT;
  150. u16 last_max, last_elem, first_elem;
  151. u16 delta = 0;
  152. u16 i;
  153. if (!sge_len)
  154. return;
  155. /* First mark all used pages */
  156. for (i = 0; i < sge_len; i++)
  157. SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
  158. DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
  159. sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
  160. /* Here we assume that the last SGE index is the biggest */
  161. prefetch((void *)(fp->sge_mask));
  162. bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
  163. last_max = RX_SGE(fp->last_max_sge);
  164. last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
  165. first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
  166. /* If ring is not full */
  167. if (last_elem + 1 != first_elem)
  168. last_elem++;
  169. /* Now update the prod */
  170. for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
  171. if (likely(fp->sge_mask[i]))
  172. break;
  173. fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
  174. delta += RX_SGE_MASK_ELEM_SZ;
  175. }
  176. if (delta > 0) {
  177. fp->rx_sge_prod += delta;
  178. /* clear page-end entries */
  179. bnx2x_clear_sge_mask_next_elems(fp);
  180. }
  181. DP(NETIF_MSG_RX_STATUS,
  182. "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
  183. fp->last_max_sge, fp->rx_sge_prod);
  184. }
  185. static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
  186. struct sk_buff *skb, u16 cons, u16 prod)
  187. {
  188. struct bnx2x *bp = fp->bp;
  189. struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
  190. struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
  191. struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
  192. dma_addr_t mapping;
  193. /* move empty skb from pool to prod and map it */
  194. prod_rx_buf->skb = fp->tpa_pool[queue].skb;
  195. mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
  196. bp->rx_buf_size, DMA_FROM_DEVICE);
  197. dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
  198. /* move partial skb from cons to pool (don't unmap yet) */
  199. fp->tpa_pool[queue] = *cons_rx_buf;
  200. /* mark bin state as start - print error if current state != stop */
  201. if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
  202. BNX2X_ERR("start of bin not in stop [%d]\n", queue);
  203. fp->tpa_state[queue] = BNX2X_TPA_START;
  204. /* point prod_bd to new skb */
  205. prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  206. prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  207. #ifdef BNX2X_STOP_ON_ERROR
  208. fp->tpa_queue_used |= (1 << queue);
  209. #ifdef _ASM_GENERIC_INT_L64_H
  210. DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
  211. #else
  212. DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
  213. #endif
  214. fp->tpa_queue_used);
  215. #endif
  216. }
  217. static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  218. struct sk_buff *skb,
  219. struct eth_fast_path_rx_cqe *fp_cqe,
  220. u16 cqe_idx)
  221. {
  222. struct sw_rx_page *rx_pg, old_rx_pg;
  223. u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
  224. u32 i, frag_len, frag_size, pages;
  225. int err;
  226. int j;
  227. frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
  228. pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
  229. /* This is needed in order to enable forwarding support */
  230. if (frag_size)
  231. skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
  232. max(frag_size, (u32)len_on_bd));
  233. #ifdef BNX2X_STOP_ON_ERROR
  234. if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
  235. BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
  236. pages, cqe_idx);
  237. BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
  238. fp_cqe->pkt_len, len_on_bd);
  239. bnx2x_panic();
  240. return -EINVAL;
  241. }
  242. #endif
  243. /* Run through the SGL and compose the fragmented skb */
  244. for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
  245. u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
  246. /* FW gives the indices of the SGE as if the ring is an array
  247. (meaning that "next" element will consume 2 indices) */
  248. frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
  249. rx_pg = &fp->rx_page_ring[sge_idx];
  250. old_rx_pg = *rx_pg;
  251. /* If we fail to allocate a substitute page, we simply stop
  252. where we are and drop the whole packet */
  253. err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
  254. if (unlikely(err)) {
  255. fp->eth_q_stats.rx_skb_alloc_failed++;
  256. return err;
  257. }
  258. /* Unmap the page as we r going to pass it to the stack */
  259. dma_unmap_page(&bp->pdev->dev,
  260. dma_unmap_addr(&old_rx_pg, mapping),
  261. SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
  262. /* Add one frag and update the appropriate fields in the skb */
  263. skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
  264. skb->data_len += frag_len;
  265. skb->truesize += frag_len;
  266. skb->len += frag_len;
  267. frag_size -= frag_len;
  268. }
  269. return 0;
  270. }
  271. static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  272. u16 queue, int pad, int len, union eth_rx_cqe *cqe,
  273. u16 cqe_idx)
  274. {
  275. struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
  276. struct sk_buff *skb = rx_buf->skb;
  277. /* alloc new skb */
  278. struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  279. /* Unmap skb in the pool anyway, as we are going to change
  280. pool entry status to BNX2X_TPA_STOP even if new skb allocation
  281. fails. */
  282. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
  283. bp->rx_buf_size, DMA_FROM_DEVICE);
  284. if (likely(new_skb)) {
  285. /* fix ip xsum and give it to the stack */
  286. /* (no need to map the new skb) */
  287. #ifdef BCM_VLAN
  288. int is_vlan_cqe =
  289. (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
  290. PARSING_FLAGS_VLAN);
  291. int is_not_hwaccel_vlan_cqe =
  292. (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
  293. #endif
  294. prefetch(skb);
  295. prefetch(((char *)(skb)) + 128);
  296. #ifdef BNX2X_STOP_ON_ERROR
  297. if (pad + len > bp->rx_buf_size) {
  298. BNX2X_ERR("skb_put is about to fail... "
  299. "pad %d len %d rx_buf_size %d\n",
  300. pad, len, bp->rx_buf_size);
  301. bnx2x_panic();
  302. return;
  303. }
  304. #endif
  305. skb_reserve(skb, pad);
  306. skb_put(skb, len);
  307. skb->protocol = eth_type_trans(skb, bp->dev);
  308. skb->ip_summed = CHECKSUM_UNNECESSARY;
  309. {
  310. struct iphdr *iph;
  311. iph = (struct iphdr *)skb->data;
  312. #ifdef BCM_VLAN
  313. /* If there is no Rx VLAN offloading -
  314. take VLAN tag into an account */
  315. if (unlikely(is_not_hwaccel_vlan_cqe))
  316. iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
  317. #endif
  318. iph->check = 0;
  319. iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
  320. }
  321. if (!bnx2x_fill_frag_skb(bp, fp, skb,
  322. &cqe->fast_path_cqe, cqe_idx)) {
  323. #ifdef BCM_VLAN
  324. if ((bp->vlgrp != NULL) && is_vlan_cqe &&
  325. (!is_not_hwaccel_vlan_cqe))
  326. vlan_gro_receive(&fp->napi, bp->vlgrp,
  327. le16_to_cpu(cqe->fast_path_cqe.
  328. vlan_tag), skb);
  329. else
  330. #endif
  331. napi_gro_receive(&fp->napi, skb);
  332. } else {
  333. DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
  334. " - dropping packet!\n");
  335. dev_kfree_skb(skb);
  336. }
  337. /* put new skb in bin */
  338. fp->tpa_pool[queue].skb = new_skb;
  339. } else {
  340. /* else drop the packet and keep the buffer in the bin */
  341. DP(NETIF_MSG_RX_STATUS,
  342. "Failed to allocate new skb - dropping packet!\n");
  343. fp->eth_q_stats.rx_skb_alloc_failed++;
  344. }
  345. fp->tpa_state[queue] = BNX2X_TPA_STOP;
  346. }
  347. /* Set Toeplitz hash value in the skb using the value from the
  348. * CQE (calculated by HW).
  349. */
  350. static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
  351. struct sk_buff *skb)
  352. {
  353. /* Set Toeplitz hash from CQE */
  354. if ((bp->dev->features & NETIF_F_RXHASH) &&
  355. (cqe->fast_path_cqe.status_flags &
  356. ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
  357. skb->rxhash =
  358. le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
  359. }
  360. int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
  361. {
  362. struct bnx2x *bp = fp->bp;
  363. u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
  364. u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
  365. int rx_pkt = 0;
  366. #ifdef BNX2X_STOP_ON_ERROR
  367. if (unlikely(bp->panic))
  368. return 0;
  369. #endif
  370. /* CQ "next element" is of the size of the regular element,
  371. that's why it's ok here */
  372. hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
  373. if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
  374. hw_comp_cons++;
  375. bd_cons = fp->rx_bd_cons;
  376. bd_prod = fp->rx_bd_prod;
  377. bd_prod_fw = bd_prod;
  378. sw_comp_cons = fp->rx_comp_cons;
  379. sw_comp_prod = fp->rx_comp_prod;
  380. /* Memory barrier necessary as speculative reads of the rx
  381. * buffer can be ahead of the index in the status block
  382. */
  383. rmb();
  384. DP(NETIF_MSG_RX_STATUS,
  385. "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
  386. fp->index, hw_comp_cons, sw_comp_cons);
  387. while (sw_comp_cons != hw_comp_cons) {
  388. struct sw_rx_bd *rx_buf = NULL;
  389. struct sk_buff *skb;
  390. union eth_rx_cqe *cqe;
  391. u8 cqe_fp_flags;
  392. u16 len, pad;
  393. comp_ring_cons = RCQ_BD(sw_comp_cons);
  394. bd_prod = RX_BD(bd_prod);
  395. bd_cons = RX_BD(bd_cons);
  396. /* Prefetch the page containing the BD descriptor
  397. at producer's index. It will be needed when new skb is
  398. allocated */
  399. prefetch((void *)(PAGE_ALIGN((unsigned long)
  400. (&fp->rx_desc_ring[bd_prod])) -
  401. PAGE_SIZE + 1));
  402. cqe = &fp->rx_comp_ring[comp_ring_cons];
  403. cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
  404. DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
  405. " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
  406. cqe_fp_flags, cqe->fast_path_cqe.status_flags,
  407. le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
  408. le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
  409. le16_to_cpu(cqe->fast_path_cqe.pkt_len));
  410. /* is this a slowpath msg? */
  411. if (unlikely(CQE_TYPE(cqe_fp_flags))) {
  412. bnx2x_sp_event(fp, cqe);
  413. goto next_cqe;
  414. /* this is an rx packet */
  415. } else {
  416. rx_buf = &fp->rx_buf_ring[bd_cons];
  417. skb = rx_buf->skb;
  418. prefetch(skb);
  419. len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
  420. pad = cqe->fast_path_cqe.placement_offset;
  421. /* If CQE is marked both TPA_START and TPA_END
  422. it is a non-TPA CQE */
  423. if ((!fp->disable_tpa) &&
  424. (TPA_TYPE(cqe_fp_flags) !=
  425. (TPA_TYPE_START | TPA_TYPE_END))) {
  426. u16 queue = cqe->fast_path_cqe.queue_index;
  427. if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
  428. DP(NETIF_MSG_RX_STATUS,
  429. "calling tpa_start on queue %d\n",
  430. queue);
  431. bnx2x_tpa_start(fp, queue, skb,
  432. bd_cons, bd_prod);
  433. /* Set Toeplitz hash for an LRO skb */
  434. bnx2x_set_skb_rxhash(bp, cqe, skb);
  435. goto next_rx;
  436. }
  437. if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
  438. DP(NETIF_MSG_RX_STATUS,
  439. "calling tpa_stop on queue %d\n",
  440. queue);
  441. if (!BNX2X_RX_SUM_FIX(cqe))
  442. BNX2X_ERR("STOP on none TCP "
  443. "data\n");
  444. /* This is a size of the linear data
  445. on this skb */
  446. len = le16_to_cpu(cqe->fast_path_cqe.
  447. len_on_bd);
  448. bnx2x_tpa_stop(bp, fp, queue, pad,
  449. len, cqe, comp_ring_cons);
  450. #ifdef BNX2X_STOP_ON_ERROR
  451. if (bp->panic)
  452. return 0;
  453. #endif
  454. bnx2x_update_sge_prod(fp,
  455. &cqe->fast_path_cqe);
  456. goto next_cqe;
  457. }
  458. }
  459. dma_sync_single_for_device(&bp->pdev->dev,
  460. dma_unmap_addr(rx_buf, mapping),
  461. pad + RX_COPY_THRESH,
  462. DMA_FROM_DEVICE);
  463. prefetch(((char *)(skb)) + 128);
  464. /* is this an error packet? */
  465. if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
  466. DP(NETIF_MSG_RX_ERR,
  467. "ERROR flags %x rx packet %u\n",
  468. cqe_fp_flags, sw_comp_cons);
  469. fp->eth_q_stats.rx_err_discard_pkt++;
  470. goto reuse_rx;
  471. }
  472. /* Since we don't have a jumbo ring
  473. * copy small packets if mtu > 1500
  474. */
  475. if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
  476. (len <= RX_COPY_THRESH)) {
  477. struct sk_buff *new_skb;
  478. new_skb = netdev_alloc_skb(bp->dev,
  479. len + pad);
  480. if (new_skb == NULL) {
  481. DP(NETIF_MSG_RX_ERR,
  482. "ERROR packet dropped "
  483. "because of alloc failure\n");
  484. fp->eth_q_stats.rx_skb_alloc_failed++;
  485. goto reuse_rx;
  486. }
  487. /* aligned copy */
  488. skb_copy_from_linear_data_offset(skb, pad,
  489. new_skb->data + pad, len);
  490. skb_reserve(new_skb, pad);
  491. skb_put(new_skb, len);
  492. bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
  493. skb = new_skb;
  494. } else
  495. if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
  496. dma_unmap_single(&bp->pdev->dev,
  497. dma_unmap_addr(rx_buf, mapping),
  498. bp->rx_buf_size,
  499. DMA_FROM_DEVICE);
  500. skb_reserve(skb, pad);
  501. skb_put(skb, len);
  502. } else {
  503. DP(NETIF_MSG_RX_ERR,
  504. "ERROR packet dropped because "
  505. "of alloc failure\n");
  506. fp->eth_q_stats.rx_skb_alloc_failed++;
  507. reuse_rx:
  508. bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
  509. goto next_rx;
  510. }
  511. skb->protocol = eth_type_trans(skb, bp->dev);
  512. /* Set Toeplitz hash for a none-LRO skb */
  513. bnx2x_set_skb_rxhash(bp, cqe, skb);
  514. skb->ip_summed = CHECKSUM_NONE;
  515. if (bp->rx_csum) {
  516. if (likely(BNX2X_RX_CSUM_OK(cqe)))
  517. skb->ip_summed = CHECKSUM_UNNECESSARY;
  518. else
  519. fp->eth_q_stats.hw_csum_err++;
  520. }
  521. }
  522. skb_record_rx_queue(skb, fp->index);
  523. #ifdef BCM_VLAN
  524. if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
  525. (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
  526. PARSING_FLAGS_VLAN))
  527. vlan_gro_receive(&fp->napi, bp->vlgrp,
  528. le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
  529. else
  530. #endif
  531. napi_gro_receive(&fp->napi, skb);
  532. next_rx:
  533. rx_buf->skb = NULL;
  534. bd_cons = NEXT_RX_IDX(bd_cons);
  535. bd_prod = NEXT_RX_IDX(bd_prod);
  536. bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
  537. rx_pkt++;
  538. next_cqe:
  539. sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
  540. sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
  541. if (rx_pkt == budget)
  542. break;
  543. } /* while */
  544. fp->rx_bd_cons = bd_cons;
  545. fp->rx_bd_prod = bd_prod_fw;
  546. fp->rx_comp_cons = sw_comp_cons;
  547. fp->rx_comp_prod = sw_comp_prod;
  548. /* Update producers */
  549. bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
  550. fp->rx_sge_prod);
  551. fp->rx_pkt += rx_pkt;
  552. fp->rx_calls++;
  553. return rx_pkt;
  554. }
  555. static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
  556. {
  557. struct bnx2x_fastpath *fp = fp_cookie;
  558. struct bnx2x *bp = fp->bp;
  559. /* Return here if interrupt is disabled */
  560. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  561. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  562. return IRQ_HANDLED;
  563. }
  564. DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
  565. fp->index, fp->sb_id);
  566. bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
  567. #ifdef BNX2X_STOP_ON_ERROR
  568. if (unlikely(bp->panic))
  569. return IRQ_HANDLED;
  570. #endif
  571. /* Handle Rx and Tx according to MSI-X vector */
  572. prefetch(fp->rx_cons_sb);
  573. prefetch(fp->tx_cons_sb);
  574. prefetch(&fp->status_blk->u_status_block.status_block_index);
  575. prefetch(&fp->status_blk->c_status_block.status_block_index);
  576. napi_schedule(&bnx2x_fp(bp, fp->index, napi));
  577. return IRQ_HANDLED;
  578. }
  579. /* HW Lock for shared dual port PHYs */
  580. void bnx2x_acquire_phy_lock(struct bnx2x *bp)
  581. {
  582. mutex_lock(&bp->port.phy_mutex);
  583. if (bp->port.need_hw_lock)
  584. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
  585. }
  586. void bnx2x_release_phy_lock(struct bnx2x *bp)
  587. {
  588. if (bp->port.need_hw_lock)
  589. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
  590. mutex_unlock(&bp->port.phy_mutex);
  591. }
  592. void bnx2x_link_report(struct bnx2x *bp)
  593. {
  594. if (bp->flags & MF_FUNC_DIS) {
  595. netif_carrier_off(bp->dev);
  596. netdev_err(bp->dev, "NIC Link is Down\n");
  597. return;
  598. }
  599. if (bp->link_vars.link_up) {
  600. u16 line_speed;
  601. if (bp->state == BNX2X_STATE_OPEN)
  602. netif_carrier_on(bp->dev);
  603. netdev_info(bp->dev, "NIC Link is Up, ");
  604. line_speed = bp->link_vars.line_speed;
  605. if (IS_E1HMF(bp)) {
  606. u16 vn_max_rate;
  607. vn_max_rate =
  608. ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
  609. FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
  610. if (vn_max_rate < line_speed)
  611. line_speed = vn_max_rate;
  612. }
  613. pr_cont("%d Mbps ", line_speed);
  614. if (bp->link_vars.duplex == DUPLEX_FULL)
  615. pr_cont("full duplex");
  616. else
  617. pr_cont("half duplex");
  618. if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
  619. if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
  620. pr_cont(", receive ");
  621. if (bp->link_vars.flow_ctrl &
  622. BNX2X_FLOW_CTRL_TX)
  623. pr_cont("& transmit ");
  624. } else {
  625. pr_cont(", transmit ");
  626. }
  627. pr_cont("flow control ON");
  628. }
  629. pr_cont("\n");
  630. } else { /* link_down */
  631. netif_carrier_off(bp->dev);
  632. netdev_err(bp->dev, "NIC Link is Down\n");
  633. }
  634. }
  635. void bnx2x_init_rx_rings(struct bnx2x *bp)
  636. {
  637. int func = BP_FUNC(bp);
  638. int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
  639. ETH_MAX_AGGREGATION_QUEUES_E1H;
  640. u16 ring_prod, cqe_ring_prod;
  641. int i, j;
  642. bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
  643. DP(NETIF_MSG_IFUP,
  644. "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
  645. if (bp->flags & TPA_ENABLE_FLAG) {
  646. for_each_queue(bp, j) {
  647. struct bnx2x_fastpath *fp = &bp->fp[j];
  648. for (i = 0; i < max_agg_queues; i++) {
  649. fp->tpa_pool[i].skb =
  650. netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  651. if (!fp->tpa_pool[i].skb) {
  652. BNX2X_ERR("Failed to allocate TPA "
  653. "skb pool for queue[%d] - "
  654. "disabling TPA on this "
  655. "queue!\n", j);
  656. bnx2x_free_tpa_pool(bp, fp, i);
  657. fp->disable_tpa = 1;
  658. break;
  659. }
  660. dma_unmap_addr_set((struct sw_rx_bd *)
  661. &bp->fp->tpa_pool[i],
  662. mapping, 0);
  663. fp->tpa_state[i] = BNX2X_TPA_STOP;
  664. }
  665. }
  666. }
  667. for_each_queue(bp, j) {
  668. struct bnx2x_fastpath *fp = &bp->fp[j];
  669. fp->rx_bd_cons = 0;
  670. fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
  671. fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
  672. /* "next page" elements initialization */
  673. /* SGE ring */
  674. for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
  675. struct eth_rx_sge *sge;
  676. sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
  677. sge->addr_hi =
  678. cpu_to_le32(U64_HI(fp->rx_sge_mapping +
  679. BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
  680. sge->addr_lo =
  681. cpu_to_le32(U64_LO(fp->rx_sge_mapping +
  682. BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
  683. }
  684. bnx2x_init_sge_ring_bit_mask(fp);
  685. /* RX BD ring */
  686. for (i = 1; i <= NUM_RX_RINGS; i++) {
  687. struct eth_rx_bd *rx_bd;
  688. rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
  689. rx_bd->addr_hi =
  690. cpu_to_le32(U64_HI(fp->rx_desc_mapping +
  691. BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
  692. rx_bd->addr_lo =
  693. cpu_to_le32(U64_LO(fp->rx_desc_mapping +
  694. BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
  695. }
  696. /* CQ ring */
  697. for (i = 1; i <= NUM_RCQ_RINGS; i++) {
  698. struct eth_rx_cqe_next_page *nextpg;
  699. nextpg = (struct eth_rx_cqe_next_page *)
  700. &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
  701. nextpg->addr_hi =
  702. cpu_to_le32(U64_HI(fp->rx_comp_mapping +
  703. BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
  704. nextpg->addr_lo =
  705. cpu_to_le32(U64_LO(fp->rx_comp_mapping +
  706. BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
  707. }
  708. /* Allocate SGEs and initialize the ring elements */
  709. for (i = 0, ring_prod = 0;
  710. i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
  711. if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
  712. BNX2X_ERR("was only able to allocate "
  713. "%d rx sges\n", i);
  714. BNX2X_ERR("disabling TPA for queue[%d]\n", j);
  715. /* Cleanup already allocated elements */
  716. bnx2x_free_rx_sge_range(bp, fp, ring_prod);
  717. bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
  718. fp->disable_tpa = 1;
  719. ring_prod = 0;
  720. break;
  721. }
  722. ring_prod = NEXT_SGE_IDX(ring_prod);
  723. }
  724. fp->rx_sge_prod = ring_prod;
  725. /* Allocate BDs and initialize BD ring */
  726. fp->rx_comp_cons = 0;
  727. cqe_ring_prod = ring_prod = 0;
  728. for (i = 0; i < bp->rx_ring_size; i++) {
  729. if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
  730. BNX2X_ERR("was only able to allocate "
  731. "%d rx skbs on queue[%d]\n", i, j);
  732. fp->eth_q_stats.rx_skb_alloc_failed++;
  733. break;
  734. }
  735. ring_prod = NEXT_RX_IDX(ring_prod);
  736. cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
  737. WARN_ON(ring_prod <= i);
  738. }
  739. fp->rx_bd_prod = ring_prod;
  740. /* must not have more available CQEs than BDs */
  741. fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
  742. cqe_ring_prod);
  743. fp->rx_pkt = fp->rx_calls = 0;
  744. /* Warning!
  745. * this will generate an interrupt (to the TSTORM)
  746. * must only be done after chip is initialized
  747. */
  748. bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
  749. fp->rx_sge_prod);
  750. if (j != 0)
  751. continue;
  752. REG_WR(bp, BAR_USTRORM_INTMEM +
  753. USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
  754. U64_LO(fp->rx_comp_mapping));
  755. REG_WR(bp, BAR_USTRORM_INTMEM +
  756. USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
  757. U64_HI(fp->rx_comp_mapping));
  758. }
  759. }
  760. static void bnx2x_free_tx_skbs(struct bnx2x *bp)
  761. {
  762. int i;
  763. for_each_queue(bp, i) {
  764. struct bnx2x_fastpath *fp = &bp->fp[i];
  765. u16 bd_cons = fp->tx_bd_cons;
  766. u16 sw_prod = fp->tx_pkt_prod;
  767. u16 sw_cons = fp->tx_pkt_cons;
  768. while (sw_cons != sw_prod) {
  769. bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
  770. sw_cons++;
  771. }
  772. }
  773. }
  774. static void bnx2x_free_rx_skbs(struct bnx2x *bp)
  775. {
  776. int i, j;
  777. for_each_queue(bp, j) {
  778. struct bnx2x_fastpath *fp = &bp->fp[j];
  779. for (i = 0; i < NUM_RX_BD; i++) {
  780. struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
  781. struct sk_buff *skb = rx_buf->skb;
  782. if (skb == NULL)
  783. continue;
  784. dma_unmap_single(&bp->pdev->dev,
  785. dma_unmap_addr(rx_buf, mapping),
  786. bp->rx_buf_size, DMA_FROM_DEVICE);
  787. rx_buf->skb = NULL;
  788. dev_kfree_skb(skb);
  789. }
  790. if (!fp->disable_tpa)
  791. bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
  792. ETH_MAX_AGGREGATION_QUEUES_E1 :
  793. ETH_MAX_AGGREGATION_QUEUES_E1H);
  794. }
  795. }
  796. void bnx2x_free_skbs(struct bnx2x *bp)
  797. {
  798. bnx2x_free_tx_skbs(bp);
  799. bnx2x_free_rx_skbs(bp);
  800. }
  801. static void bnx2x_free_msix_irqs(struct bnx2x *bp)
  802. {
  803. int i, offset = 1;
  804. free_irq(bp->msix_table[0].vector, bp->dev);
  805. DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
  806. bp->msix_table[0].vector);
  807. #ifdef BCM_CNIC
  808. offset++;
  809. #endif
  810. for_each_queue(bp, i) {
  811. DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
  812. "state %x\n", i, bp->msix_table[i + offset].vector,
  813. bnx2x_fp(bp, i, state));
  814. free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
  815. }
  816. }
  817. void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
  818. {
  819. if (bp->flags & USING_MSIX_FLAG) {
  820. if (!disable_only)
  821. bnx2x_free_msix_irqs(bp);
  822. pci_disable_msix(bp->pdev);
  823. bp->flags &= ~USING_MSIX_FLAG;
  824. } else if (bp->flags & USING_MSI_FLAG) {
  825. if (!disable_only)
  826. free_irq(bp->pdev->irq, bp->dev);
  827. pci_disable_msi(bp->pdev);
  828. bp->flags &= ~USING_MSI_FLAG;
  829. } else if (!disable_only)
  830. free_irq(bp->pdev->irq, bp->dev);
  831. }
  832. static int bnx2x_enable_msix(struct bnx2x *bp)
  833. {
  834. int i, rc, offset = 1;
  835. int igu_vec = 0;
  836. bp->msix_table[0].entry = igu_vec;
  837. DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
  838. #ifdef BCM_CNIC
  839. igu_vec = BP_L_ID(bp) + offset;
  840. bp->msix_table[1].entry = igu_vec;
  841. DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
  842. offset++;
  843. #endif
  844. for_each_queue(bp, i) {
  845. igu_vec = BP_L_ID(bp) + offset + i;
  846. bp->msix_table[i + offset].entry = igu_vec;
  847. DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
  848. "(fastpath #%u)\n", i + offset, igu_vec, i);
  849. }
  850. rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
  851. BNX2X_NUM_QUEUES(bp) + offset);
  852. /*
  853. * reconfigure number of tx/rx queues according to available
  854. * MSI-X vectors
  855. */
  856. if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
  857. /* vectors available for FP */
  858. int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
  859. DP(NETIF_MSG_IFUP,
  860. "Trying to use less MSI-X vectors: %d\n", rc);
  861. rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
  862. if (rc) {
  863. DP(NETIF_MSG_IFUP,
  864. "MSI-X is not attainable rc %d\n", rc);
  865. return rc;
  866. }
  867. bp->num_queues = min(bp->num_queues, fp_vec);
  868. DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
  869. bp->num_queues);
  870. } else if (rc) {
  871. DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
  872. return rc;
  873. }
  874. bp->flags |= USING_MSIX_FLAG;
  875. return 0;
  876. }
  877. static int bnx2x_req_msix_irqs(struct bnx2x *bp)
  878. {
  879. int i, rc, offset = 1;
  880. rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
  881. bp->dev->name, bp->dev);
  882. if (rc) {
  883. BNX2X_ERR("request sp irq failed\n");
  884. return -EBUSY;
  885. }
  886. #ifdef BCM_CNIC
  887. offset++;
  888. #endif
  889. for_each_queue(bp, i) {
  890. struct bnx2x_fastpath *fp = &bp->fp[i];
  891. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  892. bp->dev->name, i);
  893. rc = request_irq(bp->msix_table[i + offset].vector,
  894. bnx2x_msix_fp_int, 0, fp->name, fp);
  895. if (rc) {
  896. BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
  897. bnx2x_free_msix_irqs(bp);
  898. return -EBUSY;
  899. }
  900. fp->state = BNX2X_FP_STATE_IRQ;
  901. }
  902. i = BNX2X_NUM_QUEUES(bp);
  903. netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
  904. " ... fp[%d] %d\n",
  905. bp->msix_table[0].vector,
  906. 0, bp->msix_table[offset].vector,
  907. i - 1, bp->msix_table[offset + i - 1].vector);
  908. return 0;
  909. }
  910. static int bnx2x_enable_msi(struct bnx2x *bp)
  911. {
  912. int rc;
  913. rc = pci_enable_msi(bp->pdev);
  914. if (rc) {
  915. DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
  916. return -1;
  917. }
  918. bp->flags |= USING_MSI_FLAG;
  919. return 0;
  920. }
  921. static int bnx2x_req_irq(struct bnx2x *bp)
  922. {
  923. unsigned long flags;
  924. int rc;
  925. if (bp->flags & USING_MSI_FLAG)
  926. flags = 0;
  927. else
  928. flags = IRQF_SHARED;
  929. rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
  930. bp->dev->name, bp->dev);
  931. if (!rc)
  932. bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
  933. return rc;
  934. }
  935. static void bnx2x_napi_enable(struct bnx2x *bp)
  936. {
  937. int i;
  938. for_each_queue(bp, i)
  939. napi_enable(&bnx2x_fp(bp, i, napi));
  940. }
  941. static void bnx2x_napi_disable(struct bnx2x *bp)
  942. {
  943. int i;
  944. for_each_queue(bp, i)
  945. napi_disable(&bnx2x_fp(bp, i, napi));
  946. }
  947. void bnx2x_netif_start(struct bnx2x *bp)
  948. {
  949. int intr_sem;
  950. intr_sem = atomic_dec_and_test(&bp->intr_sem);
  951. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  952. if (intr_sem) {
  953. if (netif_running(bp->dev)) {
  954. bnx2x_napi_enable(bp);
  955. bnx2x_int_enable(bp);
  956. if (bp->state == BNX2X_STATE_OPEN)
  957. netif_tx_wake_all_queues(bp->dev);
  958. }
  959. }
  960. }
  961. void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
  962. {
  963. bnx2x_int_disable_sync(bp, disable_hw);
  964. bnx2x_napi_disable(bp);
  965. netif_tx_disable(bp->dev);
  966. }
  967. static int bnx2x_set_num_queues(struct bnx2x *bp)
  968. {
  969. int rc = 0;
  970. switch (bp->int_mode) {
  971. case INT_MODE_INTx:
  972. case INT_MODE_MSI:
  973. bp->num_queues = 1;
  974. DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
  975. break;
  976. default:
  977. /* Set number of queues according to bp->multi_mode value */
  978. bnx2x_set_num_queues_msix(bp);
  979. DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
  980. bp->num_queues);
  981. /* if we can't use MSI-X we only need one fp,
  982. * so try to enable MSI-X with the requested number of fp's
  983. * and fallback to MSI or legacy INTx with one fp
  984. */
  985. rc = bnx2x_enable_msix(bp);
  986. if (rc)
  987. /* failed to enable MSI-X */
  988. bp->num_queues = 1;
  989. break;
  990. }
  991. bp->dev->real_num_tx_queues = bp->num_queues;
  992. return rc;
  993. }
  994. /* must be called with rtnl_lock */
  995. int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
  996. {
  997. u32 load_code;
  998. int i, rc;
  999. #ifdef BNX2X_STOP_ON_ERROR
  1000. if (unlikely(bp->panic))
  1001. return -EPERM;
  1002. #endif
  1003. bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
  1004. rc = bnx2x_set_num_queues(bp);
  1005. if (bnx2x_alloc_mem(bp)) {
  1006. bnx2x_free_irq(bp, true);
  1007. return -ENOMEM;
  1008. }
  1009. for_each_queue(bp, i)
  1010. bnx2x_fp(bp, i, disable_tpa) =
  1011. ((bp->flags & TPA_ENABLE_FLAG) == 0);
  1012. for_each_queue(bp, i)
  1013. netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  1014. bnx2x_poll, 128);
  1015. bnx2x_napi_enable(bp);
  1016. if (bp->flags & USING_MSIX_FLAG) {
  1017. rc = bnx2x_req_msix_irqs(bp);
  1018. if (rc) {
  1019. bnx2x_free_irq(bp, true);
  1020. goto load_error1;
  1021. }
  1022. } else {
  1023. /* Fall to INTx if failed to enable MSI-X due to lack of
  1024. memory (in bnx2x_set_num_queues()) */
  1025. if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
  1026. bnx2x_enable_msi(bp);
  1027. bnx2x_ack_int(bp);
  1028. rc = bnx2x_req_irq(bp);
  1029. if (rc) {
  1030. BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
  1031. bnx2x_free_irq(bp, true);
  1032. goto load_error1;
  1033. }
  1034. if (bp->flags & USING_MSI_FLAG) {
  1035. bp->dev->irq = bp->pdev->irq;
  1036. netdev_info(bp->dev, "using MSI IRQ %d\n",
  1037. bp->pdev->irq);
  1038. }
  1039. }
  1040. /* Send LOAD_REQUEST command to MCP
  1041. Returns the type of LOAD command:
  1042. if it is the first port to be initialized
  1043. common blocks should be initialized, otherwise - not
  1044. */
  1045. if (!BP_NOMCP(bp)) {
  1046. load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
  1047. if (!load_code) {
  1048. BNX2X_ERR("MCP response failure, aborting\n");
  1049. rc = -EBUSY;
  1050. goto load_error2;
  1051. }
  1052. if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
  1053. rc = -EBUSY; /* other port in diagnostic mode */
  1054. goto load_error2;
  1055. }
  1056. } else {
  1057. int port = BP_PORT(bp);
  1058. DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
  1059. load_count[0], load_count[1], load_count[2]);
  1060. load_count[0]++;
  1061. load_count[1 + port]++;
  1062. DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
  1063. load_count[0], load_count[1], load_count[2]);
  1064. if (load_count[0] == 1)
  1065. load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
  1066. else if (load_count[1 + port] == 1)
  1067. load_code = FW_MSG_CODE_DRV_LOAD_PORT;
  1068. else
  1069. load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
  1070. }
  1071. if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
  1072. (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
  1073. bp->port.pmf = 1;
  1074. else
  1075. bp->port.pmf = 0;
  1076. DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
  1077. /* Initialize HW */
  1078. rc = bnx2x_init_hw(bp, load_code);
  1079. if (rc) {
  1080. BNX2X_ERR("HW init failed, aborting\n");
  1081. bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
  1082. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
  1083. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  1084. goto load_error2;
  1085. }
  1086. /* Setup NIC internals and enable interrupts */
  1087. bnx2x_nic_init(bp, load_code);
  1088. if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
  1089. (bp->common.shmem2_base))
  1090. SHMEM2_WR(bp, dcc_support,
  1091. (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
  1092. SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
  1093. /* Send LOAD_DONE command to MCP */
  1094. if (!BP_NOMCP(bp)) {
  1095. load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
  1096. if (!load_code) {
  1097. BNX2X_ERR("MCP response failure, aborting\n");
  1098. rc = -EBUSY;
  1099. goto load_error3;
  1100. }
  1101. }
  1102. bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
  1103. rc = bnx2x_setup_leading(bp);
  1104. if (rc) {
  1105. BNX2X_ERR("Setup leading failed!\n");
  1106. #ifndef BNX2X_STOP_ON_ERROR
  1107. goto load_error3;
  1108. #else
  1109. bp->panic = 1;
  1110. return -EBUSY;
  1111. #endif
  1112. }
  1113. if (CHIP_IS_E1H(bp))
  1114. if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
  1115. DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
  1116. bp->flags |= MF_FUNC_DIS;
  1117. }
  1118. if (bp->state == BNX2X_STATE_OPEN) {
  1119. #ifdef BCM_CNIC
  1120. /* Enable Timer scan */
  1121. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
  1122. #endif
  1123. for_each_nondefault_queue(bp, i) {
  1124. rc = bnx2x_setup_multi(bp, i);
  1125. if (rc)
  1126. #ifdef BCM_CNIC
  1127. goto load_error4;
  1128. #else
  1129. goto load_error3;
  1130. #endif
  1131. }
  1132. if (CHIP_IS_E1(bp))
  1133. bnx2x_set_eth_mac_addr_e1(bp, 1);
  1134. else
  1135. bnx2x_set_eth_mac_addr_e1h(bp, 1);
  1136. #ifdef BCM_CNIC
  1137. /* Set iSCSI L2 MAC */
  1138. mutex_lock(&bp->cnic_mutex);
  1139. if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
  1140. bnx2x_set_iscsi_eth_mac_addr(bp, 1);
  1141. bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
  1142. bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
  1143. CNIC_SB_ID(bp));
  1144. }
  1145. mutex_unlock(&bp->cnic_mutex);
  1146. #endif
  1147. }
  1148. if (bp->port.pmf)
  1149. bnx2x_initial_phy_init(bp, load_mode);
  1150. /* Start fast path */
  1151. switch (load_mode) {
  1152. case LOAD_NORMAL:
  1153. if (bp->state == BNX2X_STATE_OPEN) {
  1154. /* Tx queue should be only reenabled */
  1155. netif_tx_wake_all_queues(bp->dev);
  1156. }
  1157. /* Initialize the receive filter. */
  1158. bnx2x_set_rx_mode(bp->dev);
  1159. break;
  1160. case LOAD_OPEN:
  1161. netif_tx_start_all_queues(bp->dev);
  1162. if (bp->state != BNX2X_STATE_OPEN)
  1163. netif_tx_disable(bp->dev);
  1164. /* Initialize the receive filter. */
  1165. bnx2x_set_rx_mode(bp->dev);
  1166. break;
  1167. case LOAD_DIAG:
  1168. /* Initialize the receive filter. */
  1169. bnx2x_set_rx_mode(bp->dev);
  1170. bp->state = BNX2X_STATE_DIAG;
  1171. break;
  1172. default:
  1173. break;
  1174. }
  1175. if (!bp->port.pmf)
  1176. bnx2x__link_status_update(bp);
  1177. /* start the timer */
  1178. mod_timer(&bp->timer, jiffies + bp->current_interval);
  1179. #ifdef BCM_CNIC
  1180. bnx2x_setup_cnic_irq_info(bp);
  1181. if (bp->state == BNX2X_STATE_OPEN)
  1182. bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
  1183. #endif
  1184. bnx2x_inc_load_cnt(bp);
  1185. return 0;
  1186. #ifdef BCM_CNIC
  1187. load_error4:
  1188. /* Disable Timer scan */
  1189. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
  1190. #endif
  1191. load_error3:
  1192. bnx2x_int_disable_sync(bp, 1);
  1193. if (!BP_NOMCP(bp)) {
  1194. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
  1195. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  1196. }
  1197. bp->port.pmf = 0;
  1198. /* Free SKBs, SGEs, TPA pool and driver internals */
  1199. bnx2x_free_skbs(bp);
  1200. for_each_queue(bp, i)
  1201. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  1202. load_error2:
  1203. /* Release IRQs */
  1204. bnx2x_free_irq(bp, false);
  1205. load_error1:
  1206. bnx2x_napi_disable(bp);
  1207. for_each_queue(bp, i)
  1208. netif_napi_del(&bnx2x_fp(bp, i, napi));
  1209. bnx2x_free_mem(bp);
  1210. return rc;
  1211. }
  1212. /* must be called with rtnl_lock */
  1213. int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
  1214. {
  1215. int i;
  1216. if (bp->state == BNX2X_STATE_CLOSED) {
  1217. /* Interface has been removed - nothing to recover */
  1218. bp->recovery_state = BNX2X_RECOVERY_DONE;
  1219. bp->is_leader = 0;
  1220. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
  1221. smp_wmb();
  1222. return -EINVAL;
  1223. }
  1224. #ifdef BCM_CNIC
  1225. bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
  1226. #endif
  1227. bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
  1228. /* Set "drop all" */
  1229. bp->rx_mode = BNX2X_RX_MODE_NONE;
  1230. bnx2x_set_storm_rx_mode(bp);
  1231. /* Disable HW interrupts, NAPI and Tx */
  1232. bnx2x_netif_stop(bp, 1);
  1233. netif_carrier_off(bp->dev);
  1234. del_timer_sync(&bp->timer);
  1235. SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
  1236. (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
  1237. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1238. /* Release IRQs */
  1239. bnx2x_free_irq(bp, false);
  1240. /* Cleanup the chip if needed */
  1241. if (unload_mode != UNLOAD_RECOVERY)
  1242. bnx2x_chip_cleanup(bp, unload_mode);
  1243. bp->port.pmf = 0;
  1244. /* Free SKBs, SGEs, TPA pool and driver internals */
  1245. bnx2x_free_skbs(bp);
  1246. for_each_queue(bp, i)
  1247. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  1248. for_each_queue(bp, i)
  1249. netif_napi_del(&bnx2x_fp(bp, i, napi));
  1250. bnx2x_free_mem(bp);
  1251. bp->state = BNX2X_STATE_CLOSED;
  1252. /* The last driver must disable a "close the gate" if there is no
  1253. * parity attention or "process kill" pending.
  1254. */
  1255. if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
  1256. bnx2x_reset_is_done(bp))
  1257. bnx2x_disable_close_the_gate(bp);
  1258. /* Reset MCP mail box sequence if there is on going recovery */
  1259. if (unload_mode == UNLOAD_RECOVERY)
  1260. bp->fw_seq = 0;
  1261. return 0;
  1262. }
  1263. int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
  1264. {
  1265. u16 pmcsr;
  1266. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
  1267. switch (state) {
  1268. case PCI_D0:
  1269. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  1270. ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
  1271. PCI_PM_CTRL_PME_STATUS));
  1272. if (pmcsr & PCI_PM_CTRL_STATE_MASK)
  1273. /* delay required during transition out of D3hot */
  1274. msleep(20);
  1275. break;
  1276. case PCI_D3hot:
  1277. /* If there are other clients above don't
  1278. shut down the power */
  1279. if (atomic_read(&bp->pdev->enable_cnt) != 1)
  1280. return 0;
  1281. /* Don't shut down the power for emulation and FPGA */
  1282. if (CHIP_REV_IS_SLOW(bp))
  1283. return 0;
  1284. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  1285. pmcsr |= 3;
  1286. if (bp->wol)
  1287. pmcsr |= PCI_PM_CTRL_PME_ENABLE;
  1288. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  1289. pmcsr);
  1290. /* No more memory access after this point until
  1291. * device is brought back to D0.
  1292. */
  1293. break;
  1294. default:
  1295. return -EINVAL;
  1296. }
  1297. return 0;
  1298. }
  1299. /*
  1300. * net_device service functions
  1301. */
  1302. static int bnx2x_poll(struct napi_struct *napi, int budget)
  1303. {
  1304. int work_done = 0;
  1305. struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
  1306. napi);
  1307. struct bnx2x *bp = fp->bp;
  1308. while (1) {
  1309. #ifdef BNX2X_STOP_ON_ERROR
  1310. if (unlikely(bp->panic)) {
  1311. napi_complete(napi);
  1312. return 0;
  1313. }
  1314. #endif
  1315. if (bnx2x_has_tx_work(fp))
  1316. bnx2x_tx_int(fp);
  1317. if (bnx2x_has_rx_work(fp)) {
  1318. work_done += bnx2x_rx_int(fp, budget - work_done);
  1319. /* must not complete if we consumed full budget */
  1320. if (work_done >= budget)
  1321. break;
  1322. }
  1323. /* Fall out from the NAPI loop if needed */
  1324. if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
  1325. bnx2x_update_fpsb_idx(fp);
  1326. /* bnx2x_has_rx_work() reads the status block, thus we need
  1327. * to ensure that status block indices have been actually read
  1328. * (bnx2x_update_fpsb_idx) prior to this check
  1329. * (bnx2x_has_rx_work) so that we won't write the "newer"
  1330. * value of the status block to IGU (if there was a DMA right
  1331. * after bnx2x_has_rx_work and if there is no rmb, the memory
  1332. * reading (bnx2x_update_fpsb_idx) may be postponed to right
  1333. * before bnx2x_ack_sb). In this case there will never be
  1334. * another interrupt until there is another update of the
  1335. * status block, while there is still unhandled work.
  1336. */
  1337. rmb();
  1338. if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
  1339. napi_complete(napi);
  1340. /* Re-enable interrupts */
  1341. bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
  1342. le16_to_cpu(fp->fp_c_idx),
  1343. IGU_INT_NOP, 1);
  1344. bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
  1345. le16_to_cpu(fp->fp_u_idx),
  1346. IGU_INT_ENABLE, 1);
  1347. break;
  1348. }
  1349. }
  1350. }
  1351. return work_done;
  1352. }
  1353. /* we split the first BD into headers and data BDs
  1354. * to ease the pain of our fellow microcode engineers
  1355. * we use one mapping for both BDs
  1356. * So far this has only been observed to happen
  1357. * in Other Operating Systems(TM)
  1358. */
  1359. static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
  1360. struct bnx2x_fastpath *fp,
  1361. struct sw_tx_bd *tx_buf,
  1362. struct eth_tx_start_bd **tx_bd, u16 hlen,
  1363. u16 bd_prod, int nbd)
  1364. {
  1365. struct eth_tx_start_bd *h_tx_bd = *tx_bd;
  1366. struct eth_tx_bd *d_tx_bd;
  1367. dma_addr_t mapping;
  1368. int old_len = le16_to_cpu(h_tx_bd->nbytes);
  1369. /* first fix first BD */
  1370. h_tx_bd->nbd = cpu_to_le16(nbd);
  1371. h_tx_bd->nbytes = cpu_to_le16(hlen);
  1372. DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
  1373. "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
  1374. h_tx_bd->addr_lo, h_tx_bd->nbd);
  1375. /* now get a new data BD
  1376. * (after the pbd) and fill it */
  1377. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  1378. d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
  1379. mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
  1380. le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
  1381. d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  1382. d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  1383. d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
  1384. /* this marks the BD as one that has no individual mapping */
  1385. tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
  1386. DP(NETIF_MSG_TX_QUEUED,
  1387. "TSO split data size is %d (%x:%x)\n",
  1388. d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
  1389. /* update tx_bd */
  1390. *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
  1391. return bd_prod;
  1392. }
  1393. static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
  1394. {
  1395. if (fix > 0)
  1396. csum = (u16) ~csum_fold(csum_sub(csum,
  1397. csum_partial(t_header - fix, fix, 0)));
  1398. else if (fix < 0)
  1399. csum = (u16) ~csum_fold(csum_add(csum,
  1400. csum_partial(t_header, -fix, 0)));
  1401. return swab16(csum);
  1402. }
  1403. static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
  1404. {
  1405. u32 rc;
  1406. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1407. rc = XMIT_PLAIN;
  1408. else {
  1409. if (skb->protocol == htons(ETH_P_IPV6)) {
  1410. rc = XMIT_CSUM_V6;
  1411. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  1412. rc |= XMIT_CSUM_TCP;
  1413. } else {
  1414. rc = XMIT_CSUM_V4;
  1415. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  1416. rc |= XMIT_CSUM_TCP;
  1417. }
  1418. }
  1419. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  1420. rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
  1421. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  1422. rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
  1423. return rc;
  1424. }
  1425. #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
  1426. /* check if packet requires linearization (packet is too fragmented)
  1427. no need to check fragmentation if page size > 8K (there will be no
  1428. violation to FW restrictions) */
  1429. static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
  1430. u32 xmit_type)
  1431. {
  1432. int to_copy = 0;
  1433. int hlen = 0;
  1434. int first_bd_sz = 0;
  1435. /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
  1436. if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
  1437. if (xmit_type & XMIT_GSO) {
  1438. unsigned short lso_mss = skb_shinfo(skb)->gso_size;
  1439. /* Check if LSO packet needs to be copied:
  1440. 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
  1441. int wnd_size = MAX_FETCH_BD - 3;
  1442. /* Number of windows to check */
  1443. int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
  1444. int wnd_idx = 0;
  1445. int frag_idx = 0;
  1446. u32 wnd_sum = 0;
  1447. /* Headers length */
  1448. hlen = (int)(skb_transport_header(skb) - skb->data) +
  1449. tcp_hdrlen(skb);
  1450. /* Amount of data (w/o headers) on linear part of SKB*/
  1451. first_bd_sz = skb_headlen(skb) - hlen;
  1452. wnd_sum = first_bd_sz;
  1453. /* Calculate the first sum - it's special */
  1454. for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
  1455. wnd_sum +=
  1456. skb_shinfo(skb)->frags[frag_idx].size;
  1457. /* If there was data on linear skb data - check it */
  1458. if (first_bd_sz > 0) {
  1459. if (unlikely(wnd_sum < lso_mss)) {
  1460. to_copy = 1;
  1461. goto exit_lbl;
  1462. }
  1463. wnd_sum -= first_bd_sz;
  1464. }
  1465. /* Others are easier: run through the frag list and
  1466. check all windows */
  1467. for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
  1468. wnd_sum +=
  1469. skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
  1470. if (unlikely(wnd_sum < lso_mss)) {
  1471. to_copy = 1;
  1472. break;
  1473. }
  1474. wnd_sum -=
  1475. skb_shinfo(skb)->frags[wnd_idx].size;
  1476. }
  1477. } else {
  1478. /* in non-LSO too fragmented packet should always
  1479. be linearized */
  1480. to_copy = 1;
  1481. }
  1482. }
  1483. exit_lbl:
  1484. if (unlikely(to_copy))
  1485. DP(NETIF_MSG_TX_QUEUED,
  1486. "Linearization IS REQUIRED for %s packet. "
  1487. "num_frags %d hlen %d first_bd_sz %d\n",
  1488. (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
  1489. skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
  1490. return to_copy;
  1491. }
  1492. #endif
  1493. /* called with netif_tx_lock
  1494. * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
  1495. * netif_wake_queue()
  1496. */
  1497. netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1498. {
  1499. struct bnx2x *bp = netdev_priv(dev);
  1500. struct bnx2x_fastpath *fp;
  1501. struct netdev_queue *txq;
  1502. struct sw_tx_bd *tx_buf;
  1503. struct eth_tx_start_bd *tx_start_bd;
  1504. struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
  1505. struct eth_tx_parse_bd *pbd = NULL;
  1506. u16 pkt_prod, bd_prod;
  1507. int nbd, fp_index;
  1508. dma_addr_t mapping;
  1509. u32 xmit_type = bnx2x_xmit_type(bp, skb);
  1510. int i;
  1511. u8 hlen = 0;
  1512. __le16 pkt_size = 0;
  1513. struct ethhdr *eth;
  1514. u8 mac_type = UNICAST_ADDRESS;
  1515. #ifdef BNX2X_STOP_ON_ERROR
  1516. if (unlikely(bp->panic))
  1517. return NETDEV_TX_BUSY;
  1518. #endif
  1519. fp_index = skb_get_queue_mapping(skb);
  1520. txq = netdev_get_tx_queue(dev, fp_index);
  1521. fp = &bp->fp[fp_index];
  1522. if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
  1523. fp->eth_q_stats.driver_xoff++;
  1524. netif_tx_stop_queue(txq);
  1525. BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
  1526. return NETDEV_TX_BUSY;
  1527. }
  1528. DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
  1529. " gso type %x xmit_type %x\n",
  1530. skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
  1531. ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
  1532. eth = (struct ethhdr *)skb->data;
  1533. /* set flag according to packet type (UNICAST_ADDRESS is default)*/
  1534. if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
  1535. if (is_broadcast_ether_addr(eth->h_dest))
  1536. mac_type = BROADCAST_ADDRESS;
  1537. else
  1538. mac_type = MULTICAST_ADDRESS;
  1539. }
  1540. #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
  1541. /* First, check if we need to linearize the skb (due to FW
  1542. restrictions). No need to check fragmentation if page size > 8K
  1543. (there will be no violation to FW restrictions) */
  1544. if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
  1545. /* Statistics of linearization */
  1546. bp->lin_cnt++;
  1547. if (skb_linearize(skb) != 0) {
  1548. DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
  1549. "silently dropping this SKB\n");
  1550. dev_kfree_skb_any(skb);
  1551. return NETDEV_TX_OK;
  1552. }
  1553. }
  1554. #endif
  1555. /*
  1556. Please read carefully. First we use one BD which we mark as start,
  1557. then we have a parsing info BD (used for TSO or xsum),
  1558. and only then we have the rest of the TSO BDs.
  1559. (don't forget to mark the last one as last,
  1560. and to unmap only AFTER you write to the BD ...)
  1561. And above all, all pdb sizes are in words - NOT DWORDS!
  1562. */
  1563. pkt_prod = fp->tx_pkt_prod++;
  1564. bd_prod = TX_BD(fp->tx_bd_prod);
  1565. /* get a tx_buf and first BD */
  1566. tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
  1567. tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
  1568. tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
  1569. tx_start_bd->general_data = (mac_type <<
  1570. ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
  1571. /* header nbd */
  1572. tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
  1573. /* remember the first BD of the packet */
  1574. tx_buf->first_bd = fp->tx_bd_prod;
  1575. tx_buf->skb = skb;
  1576. tx_buf->flags = 0;
  1577. DP(NETIF_MSG_TX_QUEUED,
  1578. "sending pkt %u @%p next_idx %u bd %u @%p\n",
  1579. pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
  1580. #ifdef BCM_VLAN
  1581. if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
  1582. (bp->flags & HW_VLAN_TX_FLAG)) {
  1583. tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
  1584. tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
  1585. } else
  1586. #endif
  1587. tx_start_bd->vlan = cpu_to_le16(pkt_prod);
  1588. /* turn on parsing and get a BD */
  1589. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  1590. pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
  1591. memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
  1592. if (xmit_type & XMIT_CSUM) {
  1593. hlen = (skb_network_header(skb) - skb->data) / 2;
  1594. /* for now NS flag is not used in Linux */
  1595. pbd->global_data =
  1596. (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
  1597. ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
  1598. pbd->ip_hlen = (skb_transport_header(skb) -
  1599. skb_network_header(skb)) / 2;
  1600. hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
  1601. pbd->total_hlen = cpu_to_le16(hlen);
  1602. hlen = hlen*2;
  1603. tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
  1604. if (xmit_type & XMIT_CSUM_V4)
  1605. tx_start_bd->bd_flags.as_bitfield |=
  1606. ETH_TX_BD_FLAGS_IP_CSUM;
  1607. else
  1608. tx_start_bd->bd_flags.as_bitfield |=
  1609. ETH_TX_BD_FLAGS_IPV6;
  1610. if (xmit_type & XMIT_CSUM_TCP) {
  1611. pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
  1612. } else {
  1613. s8 fix = SKB_CS_OFF(skb); /* signed! */
  1614. pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
  1615. DP(NETIF_MSG_TX_QUEUED,
  1616. "hlen %d fix %d csum before fix %x\n",
  1617. le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
  1618. /* HW bug: fixup the CSUM */
  1619. pbd->tcp_pseudo_csum =
  1620. bnx2x_csum_fix(skb_transport_header(skb),
  1621. SKB_CS(skb), fix);
  1622. DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
  1623. pbd->tcp_pseudo_csum);
  1624. }
  1625. }
  1626. mapping = dma_map_single(&bp->pdev->dev, skb->data,
  1627. skb_headlen(skb), DMA_TO_DEVICE);
  1628. tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  1629. tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  1630. nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
  1631. tx_start_bd->nbd = cpu_to_le16(nbd);
  1632. tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
  1633. pkt_size = tx_start_bd->nbytes;
  1634. DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
  1635. " nbytes %d flags %x vlan %x\n",
  1636. tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
  1637. le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
  1638. tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
  1639. if (xmit_type & XMIT_GSO) {
  1640. DP(NETIF_MSG_TX_QUEUED,
  1641. "TSO packet len %d hlen %d total len %d tso size %d\n",
  1642. skb->len, hlen, skb_headlen(skb),
  1643. skb_shinfo(skb)->gso_size);
  1644. tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
  1645. if (unlikely(skb_headlen(skb) > hlen))
  1646. bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
  1647. hlen, bd_prod, ++nbd);
  1648. pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  1649. pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
  1650. pbd->tcp_flags = pbd_tcp_flags(skb);
  1651. if (xmit_type & XMIT_GSO_V4) {
  1652. pbd->ip_id = swab16(ip_hdr(skb)->id);
  1653. pbd->tcp_pseudo_csum =
  1654. swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  1655. ip_hdr(skb)->daddr,
  1656. 0, IPPROTO_TCP, 0));
  1657. } else
  1658. pbd->tcp_pseudo_csum =
  1659. swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  1660. &ipv6_hdr(skb)->daddr,
  1661. 0, IPPROTO_TCP, 0));
  1662. pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
  1663. }
  1664. tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
  1665. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1666. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1667. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  1668. tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
  1669. if (total_pkt_bd == NULL)
  1670. total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
  1671. mapping = dma_map_page(&bp->pdev->dev, frag->page,
  1672. frag->page_offset,
  1673. frag->size, DMA_TO_DEVICE);
  1674. tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  1675. tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  1676. tx_data_bd->nbytes = cpu_to_le16(frag->size);
  1677. le16_add_cpu(&pkt_size, frag->size);
  1678. DP(NETIF_MSG_TX_QUEUED,
  1679. "frag %d bd @%p addr (%x:%x) nbytes %d\n",
  1680. i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
  1681. le16_to_cpu(tx_data_bd->nbytes));
  1682. }
  1683. DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
  1684. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  1685. /* now send a tx doorbell, counting the next BD
  1686. * if the packet contains or ends with it
  1687. */
  1688. if (TX_BD_POFF(bd_prod) < nbd)
  1689. nbd++;
  1690. if (total_pkt_bd != NULL)
  1691. total_pkt_bd->total_pkt_bytes = pkt_size;
  1692. if (pbd)
  1693. DP(NETIF_MSG_TX_QUEUED,
  1694. "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
  1695. " tcp_flags %x xsum %x seq %u hlen %u\n",
  1696. pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
  1697. pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
  1698. pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
  1699. DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
  1700. /*
  1701. * Make sure that the BD data is updated before updating the producer
  1702. * since FW might read the BD right after the producer is updated.
  1703. * This is only applicable for weak-ordered memory model archs such
  1704. * as IA-64. The following barrier is also mandatory since FW will
  1705. * assumes packets must have BDs.
  1706. */
  1707. wmb();
  1708. fp->tx_db.data.prod += nbd;
  1709. barrier();
  1710. DOORBELL(bp, fp->index, fp->tx_db.raw);
  1711. mmiowb();
  1712. fp->tx_bd_prod += nbd;
  1713. if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
  1714. netif_tx_stop_queue(txq);
  1715. /* paired memory barrier is in bnx2x_tx_int(), we have to keep
  1716. * ordering of set_bit() in netif_tx_stop_queue() and read of
  1717. * fp->bd_tx_cons */
  1718. smp_mb();
  1719. fp->eth_q_stats.driver_xoff++;
  1720. if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
  1721. netif_tx_wake_queue(txq);
  1722. }
  1723. fp->tx_pkt++;
  1724. return NETDEV_TX_OK;
  1725. }
  1726. /* called with rtnl_lock */
  1727. int bnx2x_change_mac_addr(struct net_device *dev, void *p)
  1728. {
  1729. struct sockaddr *addr = p;
  1730. struct bnx2x *bp = netdev_priv(dev);
  1731. if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
  1732. return -EINVAL;
  1733. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1734. if (netif_running(dev)) {
  1735. if (CHIP_IS_E1(bp))
  1736. bnx2x_set_eth_mac_addr_e1(bp, 1);
  1737. else
  1738. bnx2x_set_eth_mac_addr_e1h(bp, 1);
  1739. }
  1740. return 0;
  1741. }
  1742. /* called with rtnl_lock */
  1743. int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
  1744. {
  1745. struct bnx2x *bp = netdev_priv(dev);
  1746. int rc = 0;
  1747. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  1748. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  1749. return -EAGAIN;
  1750. }
  1751. if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
  1752. ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
  1753. return -EINVAL;
  1754. /* This does not race with packet allocation
  1755. * because the actual alloc size is
  1756. * only updated as part of load
  1757. */
  1758. dev->mtu = new_mtu;
  1759. if (netif_running(dev)) {
  1760. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  1761. rc = bnx2x_nic_load(bp, LOAD_NORMAL);
  1762. }
  1763. return rc;
  1764. }
  1765. void bnx2x_tx_timeout(struct net_device *dev)
  1766. {
  1767. struct bnx2x *bp = netdev_priv(dev);
  1768. #ifdef BNX2X_STOP_ON_ERROR
  1769. if (!bp->panic)
  1770. bnx2x_panic();
  1771. #endif
  1772. /* This allows the netif to be shutdown gracefully before resetting */
  1773. schedule_delayed_work(&bp->reset_task, 0);
  1774. }
  1775. #ifdef BCM_VLAN
  1776. /* called with rtnl_lock */
  1777. void bnx2x_vlan_rx_register(struct net_device *dev,
  1778. struct vlan_group *vlgrp)
  1779. {
  1780. struct bnx2x *bp = netdev_priv(dev);
  1781. bp->vlgrp = vlgrp;
  1782. /* Set flags according to the required capabilities */
  1783. bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
  1784. if (dev->features & NETIF_F_HW_VLAN_TX)
  1785. bp->flags |= HW_VLAN_TX_FLAG;
  1786. if (dev->features & NETIF_F_HW_VLAN_RX)
  1787. bp->flags |= HW_VLAN_RX_FLAG;
  1788. if (netif_running(dev))
  1789. bnx2x_set_client_config(bp);
  1790. }
  1791. #endif
  1792. int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
  1793. {
  1794. struct net_device *dev = pci_get_drvdata(pdev);
  1795. struct bnx2x *bp;
  1796. if (!dev) {
  1797. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  1798. return -ENODEV;
  1799. }
  1800. bp = netdev_priv(dev);
  1801. rtnl_lock();
  1802. pci_save_state(pdev);
  1803. if (!netif_running(dev)) {
  1804. rtnl_unlock();
  1805. return 0;
  1806. }
  1807. netif_device_detach(dev);
  1808. bnx2x_nic_unload(bp, UNLOAD_CLOSE);
  1809. bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
  1810. rtnl_unlock();
  1811. return 0;
  1812. }
  1813. int bnx2x_resume(struct pci_dev *pdev)
  1814. {
  1815. struct net_device *dev = pci_get_drvdata(pdev);
  1816. struct bnx2x *bp;
  1817. int rc;
  1818. if (!dev) {
  1819. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  1820. return -ENODEV;
  1821. }
  1822. bp = netdev_priv(dev);
  1823. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  1824. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  1825. return -EAGAIN;
  1826. }
  1827. rtnl_lock();
  1828. pci_restore_state(pdev);
  1829. if (!netif_running(dev)) {
  1830. rtnl_unlock();
  1831. return 0;
  1832. }
  1833. bnx2x_set_power_state(bp, PCI_D0);
  1834. netif_device_attach(dev);
  1835. rc = bnx2x_nic_load(bp, LOAD_OPEN);
  1836. rtnl_unlock();
  1837. return rc;
  1838. }