qlcnic_io.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. #include <linux/netdevice.h>
  2. #include <linux/if_vlan.h>
  3. #include <net/ip.h>
  4. #include <linux/ipv6.h>
  5. #include "qlcnic.h"
  6. #define QLCNIC_MAC_HASH(MAC)\
  7. ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
  8. #define TX_ETHER_PKT 0x01
  9. #define TX_TCP_PKT 0x02
  10. #define TX_UDP_PKT 0x03
  11. #define TX_IP_PKT 0x04
  12. #define TX_TCP_LSO 0x05
  13. #define TX_TCP_LSO6 0x06
  14. #define TX_TCPV6_PKT 0x0b
  15. #define TX_UDPV6_PKT 0x0c
  16. #define FLAGS_VLAN_TAGGED 0x10
  17. #define FLAGS_VLAN_OOB 0x40
  18. #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
  19. (cmd_desc)->vlan_TCI = cpu_to_le16(v);
  20. #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
  21. ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
  22. #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
  23. ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
  24. #define qlcnic_set_tx_port(_desc, _port) \
  25. ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
  26. #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
  27. ((_desc)->flags_opcode |= \
  28. cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
  29. #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
  30. ((_desc)->nfrags__length = \
  31. cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
  32. /* owner bits of status_desc */
  33. #define STATUS_OWNER_HOST (0x1ULL << 56)
  34. #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
  35. /* Status descriptor:
  36. 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
  37. 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
  38. 53-55 desc_cnt, 56-57 owner, 58-63 opcode
  39. */
  40. #define qlcnic_get_sts_port(sts_data) \
  41. ((sts_data) & 0x0F)
  42. #define qlcnic_get_sts_status(sts_data) \
  43. (((sts_data) >> 4) & 0x0F)
  44. #define qlcnic_get_sts_type(sts_data) \
  45. (((sts_data) >> 8) & 0x0F)
  46. #define qlcnic_get_sts_totallength(sts_data) \
  47. (((sts_data) >> 12) & 0xFFFF)
  48. #define qlcnic_get_sts_refhandle(sts_data) \
  49. (((sts_data) >> 28) & 0xFFFF)
  50. #define qlcnic_get_sts_prot(sts_data) \
  51. (((sts_data) >> 44) & 0x0F)
  52. #define qlcnic_get_sts_pkt_offset(sts_data) \
  53. (((sts_data) >> 48) & 0x1F)
  54. #define qlcnic_get_sts_desc_cnt(sts_data) \
  55. (((sts_data) >> 53) & 0x7)
  56. #define qlcnic_get_sts_opcode(sts_data) \
  57. (((sts_data) >> 58) & 0x03F)
  58. #define qlcnic_get_lro_sts_refhandle(sts_data) \
  59. ((sts_data) & 0x07FFF)
  60. #define qlcnic_get_lro_sts_length(sts_data) \
  61. (((sts_data) >> 16) & 0x0FFFF)
  62. #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
  63. (((sts_data) >> 32) & 0x0FF)
  64. #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
  65. (((sts_data) >> 40) & 0x0FF)
  66. #define qlcnic_get_lro_sts_timestamp(sts_data) \
  67. (((sts_data) >> 48) & 0x1)
  68. #define qlcnic_get_lro_sts_type(sts_data) \
  69. (((sts_data) >> 49) & 0x7)
  70. #define qlcnic_get_lro_sts_push_flag(sts_data) \
  71. (((sts_data) >> 52) & 0x1)
  72. #define qlcnic_get_lro_sts_seq_number(sts_data) \
  73. ((sts_data) & 0x0FFFFFFFF)
  74. #define qlcnic_get_lro_sts_mss(sts_data1) \
  75. ((sts_data1 >> 32) & 0x0FFFF)
  76. /* opcode field in status_desc */
  77. #define QLCNIC_SYN_OFFLOAD 0x03
  78. #define QLCNIC_RXPKT_DESC 0x04
  79. #define QLCNIC_OLD_RXPKT_DESC 0x3f
  80. #define QLCNIC_RESPONSE_DESC 0x05
  81. #define QLCNIC_LRO_DESC 0x12
  82. /* for status field in status_desc */
  83. #define STATUS_CKSUM_LOOP 0
  84. #define STATUS_CKSUM_OK 2
  85. static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
  86. u64 uaddr, __le16 vlan_id,
  87. struct qlcnic_host_tx_ring *tx_ring)
  88. {
  89. struct cmd_desc_type0 *hwdesc;
  90. struct qlcnic_nic_req *req;
  91. struct qlcnic_mac_req *mac_req;
  92. struct qlcnic_vlan_req *vlan_req;
  93. u32 producer;
  94. u64 word;
  95. producer = tx_ring->producer;
  96. hwdesc = &tx_ring->desc_head[tx_ring->producer];
  97. req = (struct qlcnic_nic_req *)hwdesc;
  98. memset(req, 0, sizeof(struct qlcnic_nic_req));
  99. req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
  100. word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
  101. req->req_hdr = cpu_to_le64(word);
  102. mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
  103. mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
  104. memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
  105. vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
  106. vlan_req->vlan_id = vlan_id;
  107. tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
  108. smp_mb();
  109. }
  110. static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
  111. struct qlcnic_host_tx_ring *tx_ring,
  112. struct cmd_desc_type0 *first_desc,
  113. struct sk_buff *skb)
  114. {
  115. struct ethhdr *phdr = (struct ethhdr *)(skb->data);
  116. struct qlcnic_filter *fil, *tmp_fil;
  117. struct hlist_node *tmp_hnode, *n;
  118. struct hlist_head *head;
  119. u64 src_addr = 0;
  120. __le16 vlan_id = 0;
  121. u8 hindex;
  122. if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
  123. return;
  124. if (adapter->fhash.fnum >= adapter->fhash.fmax)
  125. return;
  126. /* Only NPAR capable devices support vlan based learning*/
  127. if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
  128. vlan_id = first_desc->vlan_TCI;
  129. memcpy(&src_addr, phdr->h_source, ETH_ALEN);
  130. hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
  131. head = &(adapter->fhash.fhead[hindex]);
  132. hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
  133. if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
  134. tmp_fil->vlan_id == vlan_id) {
  135. if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
  136. qlcnic_change_filter(adapter, src_addr, vlan_id,
  137. tx_ring);
  138. tmp_fil->ftime = jiffies;
  139. return;
  140. }
  141. }
  142. fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
  143. if (!fil)
  144. return;
  145. qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
  146. fil->ftime = jiffies;
  147. fil->vlan_id = vlan_id;
  148. memcpy(fil->faddr, &src_addr, ETH_ALEN);
  149. spin_lock(&adapter->mac_learn_lock);
  150. hlist_add_head(&(fil->fnode), head);
  151. adapter->fhash.fnum++;
  152. spin_unlock(&adapter->mac_learn_lock);
  153. }
  154. static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
  155. struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
  156. {
  157. u8 l4proto, opcode = 0, hdr_len = 0;
  158. u16 flags = 0, vlan_tci = 0;
  159. int copied, offset, copy_len, size;
  160. struct cmd_desc_type0 *hwdesc;
  161. struct vlan_ethhdr *vh;
  162. struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
  163. u16 protocol = ntohs(skb->protocol);
  164. u32 producer = tx_ring->producer;
  165. if (protocol == ETH_P_8021Q) {
  166. vh = (struct vlan_ethhdr *)skb->data;
  167. flags = FLAGS_VLAN_TAGGED;
  168. vlan_tci = ntohs(vh->h_vlan_TCI);
  169. protocol = ntohs(vh->h_vlan_encapsulated_proto);
  170. } else if (vlan_tx_tag_present(skb)) {
  171. flags = FLAGS_VLAN_OOB;
  172. vlan_tci = vlan_tx_tag_get(skb);
  173. }
  174. if (unlikely(adapter->pvid)) {
  175. if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
  176. return -EIO;
  177. if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
  178. goto set_flags;
  179. flags = FLAGS_VLAN_OOB;
  180. vlan_tci = adapter->pvid;
  181. }
  182. set_flags:
  183. qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
  184. qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
  185. if (*(skb->data) & BIT_0) {
  186. flags |= BIT_0;
  187. memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
  188. }
  189. opcode = TX_ETHER_PKT;
  190. if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
  191. skb_shinfo(skb)->gso_size > 0) {
  192. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  193. first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  194. first_desc->total_hdr_length = hdr_len;
  195. opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
  196. /* For LSO, we need to copy the MAC/IP/TCP headers into
  197. * the descriptor ring */
  198. copied = 0;
  199. offset = 2;
  200. if (flags & FLAGS_VLAN_OOB) {
  201. first_desc->total_hdr_length += VLAN_HLEN;
  202. first_desc->tcp_hdr_offset = VLAN_HLEN;
  203. first_desc->ip_hdr_offset = VLAN_HLEN;
  204. /* Only in case of TSO on vlan device */
  205. flags |= FLAGS_VLAN_TAGGED;
  206. /* Create a TSO vlan header template for firmware */
  207. hwdesc = &tx_ring->desc_head[producer];
  208. tx_ring->cmd_buf_arr[producer].skb = NULL;
  209. copy_len = min((int)sizeof(struct cmd_desc_type0) -
  210. offset, hdr_len + VLAN_HLEN);
  211. vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
  212. skb_copy_from_linear_data(skb, vh, 12);
  213. vh->h_vlan_proto = htons(ETH_P_8021Q);
  214. vh->h_vlan_TCI = htons(vlan_tci);
  215. skb_copy_from_linear_data_offset(skb, 12,
  216. (char *)vh + 16,
  217. copy_len - 16);
  218. copied = copy_len - VLAN_HLEN;
  219. offset = 0;
  220. producer = get_next_index(producer, tx_ring->num_desc);
  221. }
  222. while (copied < hdr_len) {
  223. size = (int)sizeof(struct cmd_desc_type0) - offset;
  224. copy_len = min(size, (hdr_len - copied));
  225. hwdesc = &tx_ring->desc_head[producer];
  226. tx_ring->cmd_buf_arr[producer].skb = NULL;
  227. skb_copy_from_linear_data_offset(skb, copied,
  228. (char *)hwdesc +
  229. offset, copy_len);
  230. copied += copy_len;
  231. offset = 0;
  232. producer = get_next_index(producer, tx_ring->num_desc);
  233. }
  234. tx_ring->producer = producer;
  235. smp_mb();
  236. adapter->stats.lso_frames++;
  237. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  238. if (protocol == ETH_P_IP) {
  239. l4proto = ip_hdr(skb)->protocol;
  240. if (l4proto == IPPROTO_TCP)
  241. opcode = TX_TCP_PKT;
  242. else if (l4proto == IPPROTO_UDP)
  243. opcode = TX_UDP_PKT;
  244. } else if (protocol == ETH_P_IPV6) {
  245. l4proto = ipv6_hdr(skb)->nexthdr;
  246. if (l4proto == IPPROTO_TCP)
  247. opcode = TX_TCPV6_PKT;
  248. else if (l4proto == IPPROTO_UDP)
  249. opcode = TX_UDPV6_PKT;
  250. }
  251. }
  252. first_desc->tcp_hdr_offset += skb_transport_offset(skb);
  253. first_desc->ip_hdr_offset += skb_network_offset(skb);
  254. qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
  255. return 0;
  256. }
  257. static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
  258. struct qlcnic_cmd_buffer *pbuf)
  259. {
  260. struct qlcnic_skb_frag *nf;
  261. struct skb_frag_struct *frag;
  262. int i, nr_frags;
  263. dma_addr_t map;
  264. nr_frags = skb_shinfo(skb)->nr_frags;
  265. nf = &pbuf->frag_array[0];
  266. map = pci_map_single(pdev, skb->data, skb_headlen(skb),
  267. PCI_DMA_TODEVICE);
  268. if (pci_dma_mapping_error(pdev, map))
  269. goto out_err;
  270. nf->dma = map;
  271. nf->length = skb_headlen(skb);
  272. for (i = 0; i < nr_frags; i++) {
  273. frag = &skb_shinfo(skb)->frags[i];
  274. nf = &pbuf->frag_array[i+1];
  275. map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
  276. DMA_TO_DEVICE);
  277. if (dma_mapping_error(&pdev->dev, map))
  278. goto unwind;
  279. nf->dma = map;
  280. nf->length = skb_frag_size(frag);
  281. }
  282. return 0;
  283. unwind:
  284. while (--i >= 0) {
  285. nf = &pbuf->frag_array[i+1];
  286. pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
  287. }
  288. nf = &pbuf->frag_array[0];
  289. pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
  290. out_err:
  291. return -ENOMEM;
  292. }
  293. static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
  294. struct qlcnic_cmd_buffer *pbuf)
  295. {
  296. struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
  297. int i, nr_frags = skb_shinfo(skb)->nr_frags;
  298. for (i = 0; i < nr_frags; i++) {
  299. nf = &pbuf->frag_array[i+1];
  300. pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
  301. }
  302. nf = &pbuf->frag_array[0];
  303. pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
  304. pbuf->skb = NULL;
  305. }
  306. static inline void qlcnic_clear_cmddesc(u64 *desc)
  307. {
  308. desc[0] = 0ULL;
  309. desc[2] = 0ULL;
  310. desc[7] = 0ULL;
  311. }
  312. netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  313. {
  314. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  315. struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
  316. struct qlcnic_cmd_buffer *pbuf;
  317. struct qlcnic_skb_frag *buffrag;
  318. struct cmd_desc_type0 *hwdesc, *first_desc;
  319. struct pci_dev *pdev;
  320. struct ethhdr *phdr;
  321. int i, k, frag_count, delta = 0;
  322. u32 producer, num_txd;
  323. num_txd = tx_ring->num_desc;
  324. if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
  325. netif_stop_queue(netdev);
  326. return NETDEV_TX_BUSY;
  327. }
  328. if (adapter->flags & QLCNIC_MACSPOOF) {
  329. phdr = (struct ethhdr *)skb->data;
  330. if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
  331. goto drop_packet;
  332. }
  333. frag_count = skb_shinfo(skb)->nr_frags + 1;
  334. /* 14 frags supported for normal packet and
  335. * 32 frags supported for TSO packet
  336. */
  337. if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
  338. for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
  339. delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
  340. if (!__pskb_pull_tail(skb, delta))
  341. goto drop_packet;
  342. frag_count = 1 + skb_shinfo(skb)->nr_frags;
  343. }
  344. if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
  345. netif_stop_queue(netdev);
  346. if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
  347. netif_start_queue(netdev);
  348. } else {
  349. adapter->stats.xmit_off++;
  350. return NETDEV_TX_BUSY;
  351. }
  352. }
  353. producer = tx_ring->producer;
  354. pbuf = &tx_ring->cmd_buf_arr[producer];
  355. pdev = adapter->pdev;
  356. first_desc = &tx_ring->desc_head[producer];
  357. hwdesc = &tx_ring->desc_head[producer];
  358. qlcnic_clear_cmddesc((u64 *)hwdesc);
  359. if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
  360. adapter->stats.tx_dma_map_error++;
  361. goto drop_packet;
  362. }
  363. pbuf->skb = skb;
  364. pbuf->frag_count = frag_count;
  365. qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
  366. qlcnic_set_tx_port(first_desc, adapter->portnum);
  367. for (i = 0; i < frag_count; i++) {
  368. k = i % 4;
  369. if ((k == 0) && (i > 0)) {
  370. /* move to next desc.*/
  371. producer = get_next_index(producer, num_txd);
  372. hwdesc = &tx_ring->desc_head[producer];
  373. qlcnic_clear_cmddesc((u64 *)hwdesc);
  374. tx_ring->cmd_buf_arr[producer].skb = NULL;
  375. }
  376. buffrag = &pbuf->frag_array[i];
  377. hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
  378. switch (k) {
  379. case 0:
  380. hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
  381. break;
  382. case 1:
  383. hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
  384. break;
  385. case 2:
  386. hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
  387. break;
  388. case 3:
  389. hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
  390. break;
  391. }
  392. }
  393. tx_ring->producer = get_next_index(producer, num_txd);
  394. smp_mb();
  395. if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
  396. goto unwind_buff;
  397. if (adapter->mac_learn)
  398. qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
  399. adapter->stats.txbytes += skb->len;
  400. adapter->stats.xmitcalled++;
  401. qlcnic_update_cmd_producer(tx_ring);
  402. return NETDEV_TX_OK;
  403. unwind_buff:
  404. qlcnic_unmap_buffers(pdev, skb, pbuf);
  405. drop_packet:
  406. adapter->stats.txdropped++;
  407. dev_kfree_skb_any(skb);
  408. return NETDEV_TX_OK;
  409. }
  410. void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
  411. {
  412. struct net_device *netdev = adapter->netdev;
  413. if (adapter->ahw->linkup && !linkup) {
  414. netdev_info(netdev, "NIC Link is down\n");
  415. adapter->ahw->linkup = 0;
  416. if (netif_running(netdev)) {
  417. netif_carrier_off(netdev);
  418. netif_stop_queue(netdev);
  419. }
  420. } else if (!adapter->ahw->linkup && linkup) {
  421. netdev_info(netdev, "NIC Link is up\n");
  422. adapter->ahw->linkup = 1;
  423. if (netif_running(netdev)) {
  424. netif_carrier_on(netdev);
  425. netif_wake_queue(netdev);
  426. }
  427. }
  428. }
  429. static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
  430. struct qlcnic_host_rds_ring *rds_ring,
  431. struct qlcnic_rx_buffer *buffer)
  432. {
  433. struct sk_buff *skb;
  434. dma_addr_t dma;
  435. struct pci_dev *pdev = adapter->pdev;
  436. skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
  437. if (!skb) {
  438. adapter->stats.skb_alloc_failure++;
  439. return -ENOMEM;
  440. }
  441. skb_reserve(skb, NET_IP_ALIGN);
  442. dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
  443. PCI_DMA_FROMDEVICE);
  444. if (pci_dma_mapping_error(pdev, dma)) {
  445. adapter->stats.rx_dma_map_error++;
  446. dev_kfree_skb_any(skb);
  447. return -ENOMEM;
  448. }
  449. buffer->skb = skb;
  450. buffer->dma = dma;
  451. return 0;
  452. }
  453. static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
  454. struct qlcnic_host_rds_ring *rds_ring)
  455. {
  456. struct rcv_desc *pdesc;
  457. struct qlcnic_rx_buffer *buffer;
  458. int count = 0;
  459. uint32_t producer;
  460. struct list_head *head;
  461. if (!spin_trylock(&rds_ring->lock))
  462. return;
  463. producer = rds_ring->producer;
  464. head = &rds_ring->free_list;
  465. while (!list_empty(head)) {
  466. buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
  467. if (!buffer->skb) {
  468. if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
  469. break;
  470. }
  471. count++;
  472. list_del(&buffer->list);
  473. /* make a rcv descriptor */
  474. pdesc = &rds_ring->desc_head[producer];
  475. pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
  476. pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
  477. pdesc->addr_buffer = cpu_to_le64(buffer->dma);
  478. producer = get_next_index(producer, rds_ring->num_desc);
  479. }
  480. if (count) {
  481. rds_ring->producer = producer;
  482. writel((producer - 1) & (rds_ring->num_desc - 1),
  483. rds_ring->crb_rcv_producer);
  484. }
  485. spin_unlock(&rds_ring->lock);
  486. }
  487. static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
  488. {
  489. u32 sw_consumer, hw_consumer;
  490. int i, done, count = 0;
  491. struct qlcnic_cmd_buffer *buffer;
  492. struct pci_dev *pdev = adapter->pdev;
  493. struct net_device *netdev = adapter->netdev;
  494. struct qlcnic_skb_frag *frag;
  495. struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
  496. if (!spin_trylock(&adapter->tx_clean_lock))
  497. return 1;
  498. sw_consumer = tx_ring->sw_consumer;
  499. hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
  500. while (sw_consumer != hw_consumer) {
  501. buffer = &tx_ring->cmd_buf_arr[sw_consumer];
  502. if (buffer->skb) {
  503. frag = &buffer->frag_array[0];
  504. pci_unmap_single(pdev, frag->dma, frag->length,
  505. PCI_DMA_TODEVICE);
  506. frag->dma = 0ULL;
  507. for (i = 1; i < buffer->frag_count; i++) {
  508. frag++;
  509. pci_unmap_page(pdev, frag->dma, frag->length,
  510. PCI_DMA_TODEVICE);
  511. frag->dma = 0ULL;
  512. }
  513. adapter->stats.xmitfinished++;
  514. dev_kfree_skb_any(buffer->skb);
  515. buffer->skb = NULL;
  516. }
  517. sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
  518. if (++count >= MAX_STATUS_HANDLE)
  519. break;
  520. }
  521. if (count && netif_running(netdev)) {
  522. tx_ring->sw_consumer = sw_consumer;
  523. smp_mb();
  524. if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
  525. if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
  526. netif_wake_queue(netdev);
  527. adapter->stats.xmit_on++;
  528. }
  529. }
  530. adapter->tx_timeo_cnt = 0;
  531. }
  532. /*
  533. * If everything is freed up to consumer then check if the ring is full
  534. * If the ring is full then check if more needs to be freed and
  535. * schedule the call back again.
  536. *
  537. * This happens when there are 2 CPUs. One could be freeing and the
  538. * other filling it. If the ring is full when we get out of here and
  539. * the card has already interrupted the host then the host can miss the
  540. * interrupt.
  541. *
  542. * There is still a possible race condition and the host could miss an
  543. * interrupt. The card has to take care of this.
  544. */
  545. hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
  546. done = (sw_consumer == hw_consumer);
  547. spin_unlock(&adapter->tx_clean_lock);
  548. return done;
  549. }
  550. static int qlcnic_poll(struct napi_struct *napi, int budget)
  551. {
  552. struct qlcnic_host_sds_ring *sds_ring;
  553. struct qlcnic_adapter *adapter;
  554. int tx_complete, work_done;
  555. sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
  556. adapter = sds_ring->adapter;
  557. tx_complete = qlcnic_process_cmd_ring(adapter);
  558. work_done = qlcnic_process_rcv_ring(sds_ring, budget);
  559. if ((work_done < budget) && tx_complete) {
  560. napi_complete(&sds_ring->napi);
  561. if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
  562. qlcnic_enable_int(sds_ring);
  563. }
  564. return work_done;
  565. }
  566. static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
  567. {
  568. struct qlcnic_host_sds_ring *sds_ring;
  569. struct qlcnic_adapter *adapter;
  570. int work_done;
  571. sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
  572. adapter = sds_ring->adapter;
  573. work_done = qlcnic_process_rcv_ring(sds_ring, budget);
  574. if (work_done < budget) {
  575. napi_complete(&sds_ring->napi);
  576. if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
  577. qlcnic_enable_int(sds_ring);
  578. }
  579. return work_done;
  580. }
  581. static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
  582. struct qlcnic_fw_msg *msg)
  583. {
  584. u32 cable_OUI;
  585. u16 cable_len, link_speed;
  586. u8 link_status, module, duplex, autoneg, lb_status = 0;
  587. struct net_device *netdev = adapter->netdev;
  588. adapter->ahw->has_link_events = 1;
  589. cable_OUI = msg->body[1] & 0xffffffff;
  590. cable_len = (msg->body[1] >> 32) & 0xffff;
  591. link_speed = (msg->body[1] >> 48) & 0xffff;
  592. link_status = msg->body[2] & 0xff;
  593. duplex = (msg->body[2] >> 16) & 0xff;
  594. autoneg = (msg->body[2] >> 24) & 0xff;
  595. lb_status = (msg->body[2] >> 32) & 0x3;
  596. module = (msg->body[2] >> 8) & 0xff;
  597. if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
  598. dev_info(&netdev->dev,
  599. "unsupported cable: OUI 0x%x, length %d\n",
  600. cable_OUI, cable_len);
  601. else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
  602. dev_info(&netdev->dev, "unsupported cable length %d\n",
  603. cable_len);
  604. if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
  605. lb_status == QLCNIC_ELB_MODE))
  606. adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
  607. qlcnic_advert_link_change(adapter, link_status);
  608. if (duplex == LINKEVENT_FULL_DUPLEX)
  609. adapter->ahw->link_duplex = DUPLEX_FULL;
  610. else
  611. adapter->ahw->link_duplex = DUPLEX_HALF;
  612. adapter->ahw->module_type = module;
  613. adapter->ahw->link_autoneg = autoneg;
  614. if (link_status) {
  615. adapter->ahw->link_speed = link_speed;
  616. } else {
  617. adapter->ahw->link_speed = SPEED_UNKNOWN;
  618. adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
  619. }
  620. }
  621. static void qlcnic_handle_fw_message(int desc_cnt, int index,
  622. struct qlcnic_host_sds_ring *sds_ring)
  623. {
  624. struct qlcnic_fw_msg msg;
  625. struct status_desc *desc;
  626. struct qlcnic_adapter *adapter;
  627. struct device *dev;
  628. int i = 0, opcode, ret;
  629. while (desc_cnt > 0 && i < 8) {
  630. desc = &sds_ring->desc_head[index];
  631. msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
  632. msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
  633. index = get_next_index(index, sds_ring->num_desc);
  634. desc_cnt--;
  635. }
  636. adapter = sds_ring->adapter;
  637. dev = &adapter->pdev->dev;
  638. opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
  639. switch (opcode) {
  640. case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
  641. qlcnic_handle_linkevent(adapter, &msg);
  642. break;
  643. case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
  644. ret = (u32)(msg.body[1]);
  645. switch (ret) {
  646. case 0:
  647. adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
  648. break;
  649. case 1:
  650. dev_info(dev, "loopback already in progress\n");
  651. adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
  652. break;
  653. case 2:
  654. dev_info(dev, "loopback cable is not connected\n");
  655. adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
  656. break;
  657. default:
  658. dev_info(dev,
  659. "loopback configure request failed, err %x\n",
  660. ret);
  661. adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
  662. break;
  663. }
  664. break;
  665. default:
  666. break;
  667. }
  668. }
  669. static struct sk_buff *
  670. qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
  671. struct qlcnic_host_rds_ring *rds_ring, u16 index,
  672. u16 cksum)
  673. {
  674. struct qlcnic_rx_buffer *buffer;
  675. struct sk_buff *skb;
  676. buffer = &rds_ring->rx_buf_arr[index];
  677. if (unlikely(buffer->skb == NULL)) {
  678. WARN_ON(1);
  679. return NULL;
  680. }
  681. pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
  682. PCI_DMA_FROMDEVICE);
  683. skb = buffer->skb;
  684. if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
  685. (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
  686. adapter->stats.csummed++;
  687. skb->ip_summed = CHECKSUM_UNNECESSARY;
  688. } else {
  689. skb_checksum_none_assert(skb);
  690. }
  691. buffer->skb = NULL;
  692. return skb;
  693. }
  694. static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
  695. struct sk_buff *skb, u16 *vlan_tag)
  696. {
  697. struct ethhdr *eth_hdr;
  698. if (!__vlan_get_tag(skb, vlan_tag)) {
  699. eth_hdr = (struct ethhdr *)skb->data;
  700. memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
  701. skb_pull(skb, VLAN_HLEN);
  702. }
  703. if (!adapter->pvid)
  704. return 0;
  705. if (*vlan_tag == adapter->pvid) {
  706. /* Outer vlan tag. Packet should follow non-vlan path */
  707. *vlan_tag = 0xffff;
  708. return 0;
  709. }
  710. if (adapter->flags & QLCNIC_TAGGING_ENABLED)
  711. return 0;
  712. return -EINVAL;
  713. }
  714. static struct qlcnic_rx_buffer *
  715. qlcnic_process_rcv(struct qlcnic_adapter *adapter,
  716. struct qlcnic_host_sds_ring *sds_ring, int ring,
  717. u64 sts_data0)
  718. {
  719. struct net_device *netdev = adapter->netdev;
  720. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  721. struct qlcnic_rx_buffer *buffer;
  722. struct sk_buff *skb;
  723. struct qlcnic_host_rds_ring *rds_ring;
  724. int index, length, cksum, pkt_offset;
  725. u16 vid = 0xffff;
  726. if (unlikely(ring >= adapter->max_rds_rings))
  727. return NULL;
  728. rds_ring = &recv_ctx->rds_rings[ring];
  729. index = qlcnic_get_sts_refhandle(sts_data0);
  730. if (unlikely(index >= rds_ring->num_desc))
  731. return NULL;
  732. buffer = &rds_ring->rx_buf_arr[index];
  733. length = qlcnic_get_sts_totallength(sts_data0);
  734. cksum = qlcnic_get_sts_status(sts_data0);
  735. pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
  736. skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
  737. if (!skb)
  738. return buffer;
  739. if (length > rds_ring->skb_size)
  740. skb_put(skb, rds_ring->skb_size);
  741. else
  742. skb_put(skb, length);
  743. if (pkt_offset)
  744. skb_pull(skb, pkt_offset);
  745. if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
  746. adapter->stats.rxdropped++;
  747. dev_kfree_skb(skb);
  748. return buffer;
  749. }
  750. skb->protocol = eth_type_trans(skb, netdev);
  751. if (vid != 0xffff)
  752. __vlan_hwaccel_put_tag(skb, vid);
  753. napi_gro_receive(&sds_ring->napi, skb);
  754. adapter->stats.rx_pkts++;
  755. adapter->stats.rxbytes += length;
  756. return buffer;
  757. }
  758. #define QLC_TCP_HDR_SIZE 20
  759. #define QLC_TCP_TS_OPTION_SIZE 12
  760. #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
  761. static struct qlcnic_rx_buffer *
  762. qlcnic_process_lro(struct qlcnic_adapter *adapter,
  763. int ring, u64 sts_data0, u64 sts_data1)
  764. {
  765. struct net_device *netdev = adapter->netdev;
  766. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  767. struct qlcnic_rx_buffer *buffer;
  768. struct sk_buff *skb;
  769. struct qlcnic_host_rds_ring *rds_ring;
  770. struct iphdr *iph;
  771. struct tcphdr *th;
  772. bool push, timestamp;
  773. int index, l2_hdr_offset, l4_hdr_offset;
  774. u16 lro_length, length, data_offset, vid = 0xffff;
  775. u32 seq_number;
  776. if (unlikely(ring > adapter->max_rds_rings))
  777. return NULL;
  778. rds_ring = &recv_ctx->rds_rings[ring];
  779. index = qlcnic_get_lro_sts_refhandle(sts_data0);
  780. if (unlikely(index > rds_ring->num_desc))
  781. return NULL;
  782. buffer = &rds_ring->rx_buf_arr[index];
  783. timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
  784. lro_length = qlcnic_get_lro_sts_length(sts_data0);
  785. l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
  786. l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
  787. push = qlcnic_get_lro_sts_push_flag(sts_data0);
  788. seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
  789. skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
  790. if (!skb)
  791. return buffer;
  792. if (timestamp)
  793. data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
  794. else
  795. data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
  796. skb_put(skb, lro_length + data_offset);
  797. skb_pull(skb, l2_hdr_offset);
  798. if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
  799. adapter->stats.rxdropped++;
  800. dev_kfree_skb(skb);
  801. return buffer;
  802. }
  803. skb->protocol = eth_type_trans(skb, netdev);
  804. iph = (struct iphdr *)skb->data;
  805. th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
  806. length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
  807. iph->tot_len = htons(length);
  808. iph->check = 0;
  809. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  810. th->psh = push;
  811. th->seq = htonl(seq_number);
  812. length = skb->len;
  813. if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
  814. skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
  815. if (vid != 0xffff)
  816. __vlan_hwaccel_put_tag(skb, vid);
  817. netif_receive_skb(skb);
  818. adapter->stats.lro_pkts++;
  819. adapter->stats.lrobytes += length;
  820. return buffer;
  821. }
  822. int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
  823. {
  824. struct qlcnic_host_rds_ring *rds_ring;
  825. struct qlcnic_adapter *adapter = sds_ring->adapter;
  826. struct list_head *cur;
  827. struct status_desc *desc;
  828. struct qlcnic_rx_buffer *rxbuf;
  829. u64 sts_data0, sts_data1;
  830. __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
  831. int opcode, ring, desc_cnt, count = 0;
  832. u32 consumer = sds_ring->consumer;
  833. while (count < max) {
  834. desc = &sds_ring->desc_head[consumer];
  835. sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
  836. if (!(sts_data0 & STATUS_OWNER_HOST))
  837. break;
  838. desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
  839. opcode = qlcnic_get_sts_opcode(sts_data0);
  840. switch (opcode) {
  841. case QLCNIC_RXPKT_DESC:
  842. case QLCNIC_OLD_RXPKT_DESC:
  843. case QLCNIC_SYN_OFFLOAD:
  844. ring = qlcnic_get_sts_type(sts_data0);
  845. rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
  846. sts_data0);
  847. break;
  848. case QLCNIC_LRO_DESC:
  849. ring = qlcnic_get_lro_sts_type(sts_data0);
  850. sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
  851. rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
  852. sts_data1);
  853. break;
  854. case QLCNIC_RESPONSE_DESC:
  855. qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
  856. default:
  857. goto skip;
  858. }
  859. WARN_ON(desc_cnt > 1);
  860. if (likely(rxbuf))
  861. list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
  862. else
  863. adapter->stats.null_rxbuf++;
  864. skip:
  865. for (; desc_cnt > 0; desc_cnt--) {
  866. desc = &sds_ring->desc_head[consumer];
  867. desc->status_desc_data[0] = owner_phantom;
  868. consumer = get_next_index(consumer, sds_ring->num_desc);
  869. }
  870. count++;
  871. }
  872. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  873. rds_ring = &adapter->recv_ctx->rds_rings[ring];
  874. if (!list_empty(&sds_ring->free_list[ring])) {
  875. list_for_each(cur, &sds_ring->free_list[ring]) {
  876. rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
  877. list);
  878. qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
  879. }
  880. spin_lock(&rds_ring->lock);
  881. list_splice_tail_init(&sds_ring->free_list[ring],
  882. &rds_ring->free_list);
  883. spin_unlock(&rds_ring->lock);
  884. }
  885. qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
  886. }
  887. if (count) {
  888. sds_ring->consumer = consumer;
  889. writel(consumer, sds_ring->crb_sts_consumer);
  890. }
  891. return count;
  892. }
  893. void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
  894. struct qlcnic_host_rds_ring *rds_ring)
  895. {
  896. struct rcv_desc *pdesc;
  897. struct qlcnic_rx_buffer *buffer;
  898. int count = 0;
  899. u32 producer;
  900. struct list_head *head;
  901. producer = rds_ring->producer;
  902. head = &rds_ring->free_list;
  903. while (!list_empty(head)) {
  904. buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
  905. if (!buffer->skb) {
  906. if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
  907. break;
  908. }
  909. count++;
  910. list_del(&buffer->list);
  911. /* make a rcv descriptor */
  912. pdesc = &rds_ring->desc_head[producer];
  913. pdesc->addr_buffer = cpu_to_le64(buffer->dma);
  914. pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
  915. pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
  916. producer = get_next_index(producer, rds_ring->num_desc);
  917. }
  918. if (count) {
  919. rds_ring->producer = producer;
  920. writel((producer-1) & (rds_ring->num_desc-1),
  921. rds_ring->crb_rcv_producer);
  922. }
  923. }
  924. static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
  925. {
  926. int i;
  927. unsigned char *data = skb->data;
  928. pr_info(KERN_INFO "\n");
  929. for (i = 0; i < skb->len; i++) {
  930. QLCDB(adapter, DRV, "%02x ", data[i]);
  931. if ((i & 0x0f) == 8)
  932. pr_info(KERN_INFO "\n");
  933. }
  934. }
  935. static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
  936. u64 sts_data0)
  937. {
  938. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  939. struct sk_buff *skb;
  940. struct qlcnic_host_rds_ring *rds_ring;
  941. int index, length, cksum, pkt_offset;
  942. if (unlikely(ring >= adapter->max_rds_rings))
  943. return;
  944. rds_ring = &recv_ctx->rds_rings[ring];
  945. index = qlcnic_get_sts_refhandle(sts_data0);
  946. length = qlcnic_get_sts_totallength(sts_data0);
  947. if (unlikely(index >= rds_ring->num_desc))
  948. return;
  949. cksum = qlcnic_get_sts_status(sts_data0);
  950. pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
  951. skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
  952. if (!skb)
  953. return;
  954. if (length > rds_ring->skb_size)
  955. skb_put(skb, rds_ring->skb_size);
  956. else
  957. skb_put(skb, length);
  958. if (pkt_offset)
  959. skb_pull(skb, pkt_offset);
  960. if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
  961. adapter->ahw->diag_cnt++;
  962. else
  963. dump_skb(skb, adapter);
  964. dev_kfree_skb_any(skb);
  965. adapter->stats.rx_pkts++;
  966. adapter->stats.rxbytes += length;
  967. return;
  968. }
  969. void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
  970. {
  971. struct qlcnic_adapter *adapter = sds_ring->adapter;
  972. struct status_desc *desc;
  973. u64 sts_data0;
  974. int ring, opcode, desc_cnt;
  975. u32 consumer = sds_ring->consumer;
  976. desc = &sds_ring->desc_head[consumer];
  977. sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
  978. if (!(sts_data0 & STATUS_OWNER_HOST))
  979. return;
  980. desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
  981. opcode = qlcnic_get_sts_opcode(sts_data0);
  982. switch (opcode) {
  983. case QLCNIC_RESPONSE_DESC:
  984. qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
  985. break;
  986. default:
  987. ring = qlcnic_get_sts_type(sts_data0);
  988. qlcnic_process_rcv_diag(adapter, ring, sts_data0);
  989. break;
  990. }
  991. for (; desc_cnt > 0; desc_cnt--) {
  992. desc = &sds_ring->desc_head[consumer];
  993. desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
  994. consumer = get_next_index(consumer, sds_ring->num_desc);
  995. }
  996. sds_ring->consumer = consumer;
  997. writel(consumer, sds_ring->crb_sts_consumer);
  998. }
  999. void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
  1000. {
  1001. u32 mac_low, mac_high;
  1002. int i;
  1003. mac_low = off1;
  1004. mac_high = off2;
  1005. if (alt_mac) {
  1006. mac_low |= (mac_low >> 16) | (mac_high << 16);
  1007. mac_high >>= 16;
  1008. }
  1009. for (i = 0; i < 2; i++)
  1010. mac[i] = (u8)(mac_high >> ((1 - i) * 8));
  1011. for (i = 2; i < 6; i++)
  1012. mac[i] = (u8)(mac_low >> ((5 - i) * 8));
  1013. }
  1014. int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
  1015. {
  1016. int ring, max_sds_rings;
  1017. struct qlcnic_host_sds_ring *sds_ring;
  1018. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  1019. if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
  1020. return -ENOMEM;
  1021. max_sds_rings = adapter->max_sds_rings;
  1022. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  1023. sds_ring = &recv_ctx->sds_rings[ring];
  1024. if (ring == max_sds_rings - 1)
  1025. netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
  1026. QLCNIC_NETDEV_WEIGHT / max_sds_rings);
  1027. else
  1028. netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
  1029. QLCNIC_NETDEV_WEIGHT*2);
  1030. }
  1031. return 0;
  1032. }
  1033. void qlcnic_napi_del(struct qlcnic_adapter *adapter)
  1034. {
  1035. int ring;
  1036. struct qlcnic_host_sds_ring *sds_ring;
  1037. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  1038. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  1039. sds_ring = &recv_ctx->sds_rings[ring];
  1040. netif_napi_del(&sds_ring->napi);
  1041. }
  1042. qlcnic_free_sds_rings(adapter->recv_ctx);
  1043. }
  1044. void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
  1045. {
  1046. int ring;
  1047. struct qlcnic_host_sds_ring *sds_ring;
  1048. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  1049. if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
  1050. return;
  1051. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  1052. sds_ring = &recv_ctx->sds_rings[ring];
  1053. napi_enable(&sds_ring->napi);
  1054. qlcnic_enable_int(sds_ring);
  1055. }
  1056. }
  1057. void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
  1058. {
  1059. int ring;
  1060. struct qlcnic_host_sds_ring *sds_ring;
  1061. struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  1062. if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
  1063. return;
  1064. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  1065. sds_ring = &recv_ctx->sds_rings[ring];
  1066. qlcnic_disable_int(sds_ring);
  1067. napi_synchronize(&sds_ring->napi);
  1068. napi_disable(&sds_ring->napi);
  1069. }
  1070. }