be_main.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065
  1. /*
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include <asm/div64.h>
  19. MODULE_VERSION(DRV_VER);
  20. MODULE_DEVICE_TABLE(pci, be_dev_ids);
  21. MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
  22. MODULE_AUTHOR("ServerEngines Corporation");
  23. MODULE_LICENSE("GPL");
  24. static unsigned int rx_frag_size = 2048;
  25. module_param(rx_frag_size, uint, S_IRUGO);
  26. MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
  27. static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
  28. { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
  29. { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
  30. { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
  31. { 0 }
  32. };
  33. MODULE_DEVICE_TABLE(pci, be_dev_ids);
  34. static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
  35. {
  36. struct be_dma_mem *mem = &q->dma_mem;
  37. if (mem->va)
  38. pci_free_consistent(adapter->pdev, mem->size,
  39. mem->va, mem->dma);
  40. }
  41. static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
  42. u16 len, u16 entry_size)
  43. {
  44. struct be_dma_mem *mem = &q->dma_mem;
  45. memset(q, 0, sizeof(*q));
  46. q->len = len;
  47. q->entry_size = entry_size;
  48. mem->size = len * entry_size;
  49. mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
  50. if (!mem->va)
  51. return -1;
  52. memset(mem->va, 0, mem->size);
  53. return 0;
  54. }
  55. static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
  56. {
  57. u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
  58. u32 reg = ioread32(addr);
  59. u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  60. if (!enabled && enable) {
  61. reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  62. } else if (enabled && !enable) {
  63. reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  64. } else {
  65. printk(KERN_WARNING DRV_NAME
  66. ": bad value in membar_int_ctrl reg=0x%x\n", reg);
  67. return;
  68. }
  69. iowrite32(reg, addr);
  70. }
  71. static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
  72. {
  73. u32 val = 0;
  74. val |= qid & DB_RQ_RING_ID_MASK;
  75. val |= posted << DB_RQ_NUM_POSTED_SHIFT;
  76. iowrite32(val, ctrl->db + DB_RQ_OFFSET);
  77. }
  78. static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
  79. {
  80. u32 val = 0;
  81. val |= qid & DB_TXULP_RING_ID_MASK;
  82. val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
  83. iowrite32(val, ctrl->db + DB_TXULP1_OFFSET);
  84. }
  85. static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
  86. bool arm, bool clear_int, u16 num_popped)
  87. {
  88. u32 val = 0;
  89. val |= qid & DB_EQ_RING_ID_MASK;
  90. if (arm)
  91. val |= 1 << DB_EQ_REARM_SHIFT;
  92. if (clear_int)
  93. val |= 1 << DB_EQ_CLR_SHIFT;
  94. val |= 1 << DB_EQ_EVNT_SHIFT;
  95. val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
  96. iowrite32(val, ctrl->db + DB_EQ_OFFSET);
  97. }
  98. void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
  99. bool arm, u16 num_popped)
  100. {
  101. u32 val = 0;
  102. val |= qid & DB_CQ_RING_ID_MASK;
  103. if (arm)
  104. val |= 1 << DB_CQ_REARM_SHIFT;
  105. val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
  106. iowrite32(val, ctrl->db + DB_CQ_OFFSET);
  107. }
  108. static int be_mac_addr_set(struct net_device *netdev, void *p)
  109. {
  110. struct be_adapter *adapter = netdev_priv(netdev);
  111. struct sockaddr *addr = p;
  112. int status = 0;
  113. if (netif_running(netdev)) {
  114. status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle,
  115. adapter->pmac_id);
  116. if (status)
  117. return status;
  118. status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data,
  119. adapter->if_handle, &adapter->pmac_id);
  120. }
  121. if (!status)
  122. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  123. return status;
  124. }
  125. static void netdev_stats_update(struct be_adapter *adapter)
  126. {
  127. struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
  128. struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
  129. struct be_port_rxf_stats *port_stats =
  130. &rxf_stats->port[adapter->port_num];
  131. struct net_device_stats *dev_stats = &adapter->stats.net_stats;
  132. struct be_erx_stats *erx_stats = &hw_stats->erx;
  133. dev_stats->rx_packets = port_stats->rx_total_frames;
  134. dev_stats->tx_packets = port_stats->tx_unicastframes +
  135. port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
  136. dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
  137. (u64) port_stats->rx_bytes_lsd;
  138. dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
  139. (u64) port_stats->tx_bytes_lsd;
  140. /* bad pkts received */
  141. dev_stats->rx_errors = port_stats->rx_crc_errors +
  142. port_stats->rx_alignment_symbol_errors +
  143. port_stats->rx_in_range_errors +
  144. port_stats->rx_out_range_errors +
  145. port_stats->rx_frame_too_long +
  146. port_stats->rx_dropped_too_small +
  147. port_stats->rx_dropped_too_short +
  148. port_stats->rx_dropped_header_too_small +
  149. port_stats->rx_dropped_tcp_length +
  150. port_stats->rx_dropped_runt +
  151. port_stats->rx_tcp_checksum_errs +
  152. port_stats->rx_ip_checksum_errs +
  153. port_stats->rx_udp_checksum_errs;
  154. /* no space in linux buffers: best possible approximation */
  155. dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
  156. /* detailed rx errors */
  157. dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
  158. port_stats->rx_out_range_errors +
  159. port_stats->rx_frame_too_long;
  160. /* receive ring buffer overflow */
  161. dev_stats->rx_over_errors = 0;
  162. dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
  163. /* frame alignment errors */
  164. dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
  165. /* receiver fifo overrun */
  166. /* drops_no_pbuf is no per i/f, it's per BE card */
  167. dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
  168. port_stats->rx_input_fifo_overflow +
  169. rxf_stats->rx_drops_no_pbuf;
  170. /* receiver missed packetd */
  171. dev_stats->rx_missed_errors = 0;
  172. /* packet transmit problems */
  173. dev_stats->tx_errors = 0;
  174. /* no space available in linux */
  175. dev_stats->tx_dropped = 0;
  176. dev_stats->multicast = port_stats->tx_multicastframes;
  177. dev_stats->collisions = 0;
  178. /* detailed tx_errors */
  179. dev_stats->tx_aborted_errors = 0;
  180. dev_stats->tx_carrier_errors = 0;
  181. dev_stats->tx_fifo_errors = 0;
  182. dev_stats->tx_heartbeat_errors = 0;
  183. dev_stats->tx_window_errors = 0;
  184. }
  185. static void be_link_status_update(struct be_adapter *adapter)
  186. {
  187. struct be_link_info *prev = &adapter->link;
  188. struct be_link_info now = { 0 };
  189. struct net_device *netdev = adapter->netdev;
  190. be_cmd_link_status_query(&adapter->ctrl, &now);
  191. /* If link came up or went down */
  192. if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
  193. prev->speed == PHY_LINK_SPEED_ZERO)) {
  194. if (now.speed == PHY_LINK_SPEED_ZERO) {
  195. netif_stop_queue(netdev);
  196. netif_carrier_off(netdev);
  197. printk(KERN_INFO "%s: Link down\n", netdev->name);
  198. } else {
  199. netif_start_queue(netdev);
  200. netif_carrier_on(netdev);
  201. printk(KERN_INFO "%s: Link up\n", netdev->name);
  202. }
  203. }
  204. *prev = now;
  205. }
  206. /* Update the EQ delay n BE based on the RX frags consumed / sec */
  207. static void be_rx_eqd_update(struct be_adapter *adapter)
  208. {
  209. struct be_ctrl_info *ctrl = &adapter->ctrl;
  210. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  211. struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
  212. ulong now = jiffies;
  213. u32 eqd;
  214. if (!rx_eq->enable_aic)
  215. return;
  216. /* Wrapped around */
  217. if (time_before(now, stats->rx_fps_jiffies)) {
  218. stats->rx_fps_jiffies = now;
  219. return;
  220. }
  221. /* Update once a second */
  222. if ((now - stats->rx_fps_jiffies) < HZ)
  223. return;
  224. stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
  225. ((now - stats->rx_fps_jiffies) / HZ);
  226. stats->rx_fps_jiffies = now;
  227. stats->be_prev_rx_frags = stats->be_rx_frags;
  228. eqd = stats->be_rx_fps / 110000;
  229. eqd = eqd << 3;
  230. if (eqd > rx_eq->max_eqd)
  231. eqd = rx_eq->max_eqd;
  232. if (eqd < rx_eq->min_eqd)
  233. eqd = rx_eq->min_eqd;
  234. if (eqd < 10)
  235. eqd = 0;
  236. if (eqd != rx_eq->cur_eqd)
  237. be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd);
  238. rx_eq->cur_eqd = eqd;
  239. }
  240. static struct net_device_stats *be_get_stats(struct net_device *dev)
  241. {
  242. struct be_adapter *adapter = netdev_priv(dev);
  243. return &adapter->stats.net_stats;
  244. }
  245. static u32 be_calc_rate(u64 bytes, unsigned long ticks)
  246. {
  247. u64 rate = bytes;
  248. do_div(rate, ticks / HZ);
  249. rate <<= 3; /* bytes/sec -> bits/sec */
  250. do_div(rate, 1000000ul); /* MB/Sec */
  251. return rate;
  252. }
  253. static void be_tx_rate_update(struct be_adapter *adapter)
  254. {
  255. struct be_drvr_stats *stats = drvr_stats(adapter);
  256. ulong now = jiffies;
  257. /* Wrapped around? */
  258. if (time_before(now, stats->be_tx_jiffies)) {
  259. stats->be_tx_jiffies = now;
  260. return;
  261. }
  262. /* Update tx rate once in two seconds */
  263. if ((now - stats->be_tx_jiffies) > 2 * HZ) {
  264. stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
  265. - stats->be_tx_bytes_prev,
  266. now - stats->be_tx_jiffies);
  267. stats->be_tx_jiffies = now;
  268. stats->be_tx_bytes_prev = stats->be_tx_bytes;
  269. }
  270. }
  271. static void be_tx_stats_update(struct be_adapter *adapter,
  272. u32 wrb_cnt, u32 copied, bool stopped)
  273. {
  274. struct be_drvr_stats *stats = drvr_stats(adapter);
  275. stats->be_tx_reqs++;
  276. stats->be_tx_wrbs += wrb_cnt;
  277. stats->be_tx_bytes += copied;
  278. if (stopped)
  279. stats->be_tx_stops++;
  280. }
  281. /* Determine number of WRB entries needed to xmit data in an skb */
  282. static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
  283. {
  284. int cnt = (skb->len > skb->data_len);
  285. cnt += skb_shinfo(skb)->nr_frags;
  286. /* to account for hdr wrb */
  287. cnt++;
  288. if (cnt & 1) {
  289. /* add a dummy to make it an even num */
  290. cnt++;
  291. *dummy = true;
  292. } else
  293. *dummy = false;
  294. BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
  295. return cnt;
  296. }
  297. static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
  298. {
  299. wrb->frag_pa_hi = upper_32_bits(addr);
  300. wrb->frag_pa_lo = addr & 0xFFFFFFFF;
  301. wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
  302. }
  303. static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
  304. bool vlan, u32 wrb_cnt, u32 len)
  305. {
  306. memset(hdr, 0, sizeof(*hdr));
  307. AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
  308. if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
  309. AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
  310. AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
  311. hdr, skb_shinfo(skb)->gso_size);
  312. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  313. if (is_tcp_pkt(skb))
  314. AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
  315. else if (is_udp_pkt(skb))
  316. AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
  317. }
  318. if (vlan && vlan_tx_tag_present(skb)) {
  319. AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
  320. AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
  321. hdr, vlan_tx_tag_get(skb));
  322. }
  323. AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
  324. AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
  325. AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
  326. AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
  327. }
  328. static int make_tx_wrbs(struct be_adapter *adapter,
  329. struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
  330. {
  331. u64 busaddr;
  332. u32 i, copied = 0;
  333. struct pci_dev *pdev = adapter->pdev;
  334. struct sk_buff *first_skb = skb;
  335. struct be_queue_info *txq = &adapter->tx_obj.q;
  336. struct be_eth_wrb *wrb;
  337. struct be_eth_hdr_wrb *hdr;
  338. atomic_add(wrb_cnt, &txq->used);
  339. hdr = queue_head_node(txq);
  340. queue_head_inc(txq);
  341. if (skb->len > skb->data_len) {
  342. int len = skb->len - skb->data_len;
  343. busaddr = pci_map_single(pdev, skb->data, len,
  344. PCI_DMA_TODEVICE);
  345. wrb = queue_head_node(txq);
  346. wrb_fill(wrb, busaddr, len);
  347. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  348. queue_head_inc(txq);
  349. copied += len;
  350. }
  351. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  352. struct skb_frag_struct *frag =
  353. &skb_shinfo(skb)->frags[i];
  354. busaddr = pci_map_page(pdev, frag->page,
  355. frag->page_offset,
  356. frag->size, PCI_DMA_TODEVICE);
  357. wrb = queue_head_node(txq);
  358. wrb_fill(wrb, busaddr, frag->size);
  359. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  360. queue_head_inc(txq);
  361. copied += frag->size;
  362. }
  363. if (dummy_wrb) {
  364. wrb = queue_head_node(txq);
  365. wrb_fill(wrb, 0, 0);
  366. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  367. queue_head_inc(txq);
  368. }
  369. wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
  370. wrb_cnt, copied);
  371. be_dws_cpu_to_le(hdr, sizeof(*hdr));
  372. return copied;
  373. }
  374. static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
  375. {
  376. struct be_adapter *adapter = netdev_priv(netdev);
  377. struct be_tx_obj *tx_obj = &adapter->tx_obj;
  378. struct be_queue_info *txq = &tx_obj->q;
  379. u32 wrb_cnt = 0, copied = 0;
  380. u32 start = txq->head;
  381. bool dummy_wrb, stopped = false;
  382. wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
  383. copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
  384. /* record the sent skb in the sent_skb table */
  385. BUG_ON(tx_obj->sent_skb_list[start]);
  386. tx_obj->sent_skb_list[start] = skb;
  387. /* Ensure that txq has space for the next skb; Else stop the queue
  388. * *BEFORE* ringing the tx doorbell, so that we serialze the
  389. * tx compls of the current transmit which'll wake up the queue
  390. */
  391. if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
  392. netif_stop_queue(netdev);
  393. stopped = true;
  394. }
  395. be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
  396. be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
  397. return NETDEV_TX_OK;
  398. }
  399. static int be_change_mtu(struct net_device *netdev, int new_mtu)
  400. {
  401. struct be_adapter *adapter = netdev_priv(netdev);
  402. if (new_mtu < BE_MIN_MTU ||
  403. new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
  404. dev_info(&adapter->pdev->dev,
  405. "MTU must be between %d and %d bytes\n",
  406. BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
  407. return -EINVAL;
  408. }
  409. dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
  410. netdev->mtu, new_mtu);
  411. netdev->mtu = new_mtu;
  412. return 0;
  413. }
  414. /*
  415. * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
  416. * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
  417. * set the BE in promiscuous VLAN mode.
  418. */
  419. static void be_vid_config(struct net_device *netdev)
  420. {
  421. struct be_adapter *adapter = netdev_priv(netdev);
  422. u16 vtag[BE_NUM_VLANS_SUPPORTED];
  423. u16 ntags = 0, i;
  424. if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
  425. /* Construct VLAN Table to give to HW */
  426. for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
  427. if (adapter->vlan_tag[i]) {
  428. vtag[ntags] = cpu_to_le16(i);
  429. ntags++;
  430. }
  431. }
  432. be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
  433. vtag, ntags, 1, 0);
  434. } else {
  435. be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
  436. NULL, 0, 1, 1);
  437. }
  438. }
  439. static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
  440. {
  441. struct be_adapter *adapter = netdev_priv(netdev);
  442. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  443. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  444. struct be_ctrl_info *ctrl = &adapter->ctrl;
  445. be_eq_notify(ctrl, rx_eq->q.id, false, false, 0);
  446. be_eq_notify(ctrl, tx_eq->q.id, false, false, 0);
  447. adapter->vlan_grp = grp;
  448. be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
  449. be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
  450. }
  451. static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
  452. {
  453. struct be_adapter *adapter = netdev_priv(netdev);
  454. adapter->num_vlans++;
  455. adapter->vlan_tag[vid] = 1;
  456. be_vid_config(netdev);
  457. }
  458. static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
  459. {
  460. struct be_adapter *adapter = netdev_priv(netdev);
  461. adapter->num_vlans--;
  462. adapter->vlan_tag[vid] = 0;
  463. vlan_group_set_device(adapter->vlan_grp, vid, NULL);
  464. be_vid_config(netdev);
  465. }
  466. static void be_set_multicast_filter(struct net_device *netdev)
  467. {
  468. struct be_adapter *adapter = netdev_priv(netdev);
  469. struct dev_mc_list *mc_ptr;
  470. u8 mac_addr[32][ETH_ALEN];
  471. int i = 0;
  472. if (netdev->flags & IFF_ALLMULTI) {
  473. /* set BE in Multicast promiscuous */
  474. be_cmd_mcast_mac_set(&adapter->ctrl,
  475. adapter->if_handle, NULL, 0, true);
  476. return;
  477. }
  478. for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
  479. memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
  480. if (++i >= 32) {
  481. be_cmd_mcast_mac_set(&adapter->ctrl,
  482. adapter->if_handle, &mac_addr[0][0], i, false);
  483. i = 0;
  484. }
  485. }
  486. if (i) {
  487. /* reset the promiscuous mode also. */
  488. be_cmd_mcast_mac_set(&adapter->ctrl,
  489. adapter->if_handle, &mac_addr[0][0], i, false);
  490. }
  491. }
  492. static void be_set_multicast_list(struct net_device *netdev)
  493. {
  494. struct be_adapter *adapter = netdev_priv(netdev);
  495. if (netdev->flags & IFF_PROMISC) {
  496. be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
  497. } else {
  498. be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
  499. be_set_multicast_filter(netdev);
  500. }
  501. }
  502. static void be_rx_rate_update(struct be_adapter *adapter)
  503. {
  504. struct be_drvr_stats *stats = drvr_stats(adapter);
  505. ulong now = jiffies;
  506. /* Wrapped around */
  507. if (time_before(now, stats->be_rx_jiffies)) {
  508. stats->be_rx_jiffies = now;
  509. return;
  510. }
  511. /* Update the rate once in two seconds */
  512. if ((now - stats->be_rx_jiffies) < 2 * HZ)
  513. return;
  514. stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
  515. - stats->be_rx_bytes_prev,
  516. now - stats->be_rx_jiffies);
  517. stats->be_rx_jiffies = now;
  518. stats->be_rx_bytes_prev = stats->be_rx_bytes;
  519. }
  520. static void be_rx_stats_update(struct be_adapter *adapter,
  521. u32 pktsize, u16 numfrags)
  522. {
  523. struct be_drvr_stats *stats = drvr_stats(adapter);
  524. stats->be_rx_compl++;
  525. stats->be_rx_frags += numfrags;
  526. stats->be_rx_bytes += pktsize;
  527. }
  528. static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
  529. {
  530. u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
  531. l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
  532. ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
  533. ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
  534. if (ip_version) {
  535. tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
  536. udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
  537. }
  538. ipv6_chk = (ip_version && (tcpf || udpf));
  539. return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
  540. }
  541. static struct be_rx_page_info *
  542. get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
  543. {
  544. struct be_rx_page_info *rx_page_info;
  545. struct be_queue_info *rxq = &adapter->rx_obj.q;
  546. rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
  547. BUG_ON(!rx_page_info->page);
  548. if (rx_page_info->last_page_user)
  549. pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
  550. adapter->big_page_size, PCI_DMA_FROMDEVICE);
  551. atomic_dec(&rxq->used);
  552. return rx_page_info;
  553. }
  554. /* Throwaway the data in the Rx completion */
  555. static void be_rx_compl_discard(struct be_adapter *adapter,
  556. struct be_eth_rx_compl *rxcp)
  557. {
  558. struct be_queue_info *rxq = &adapter->rx_obj.q;
  559. struct be_rx_page_info *page_info;
  560. u16 rxq_idx, i, num_rcvd;
  561. rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
  562. num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
  563. for (i = 0; i < num_rcvd; i++) {
  564. page_info = get_rx_page_info(adapter, rxq_idx);
  565. put_page(page_info->page);
  566. memset(page_info, 0, sizeof(*page_info));
  567. index_inc(&rxq_idx, rxq->len);
  568. }
  569. }
  570. /*
  571. * skb_fill_rx_data forms a complete skb for an ether frame
  572. * indicated by rxcp.
  573. */
  574. static void skb_fill_rx_data(struct be_adapter *adapter,
  575. struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
  576. {
  577. struct be_queue_info *rxq = &adapter->rx_obj.q;
  578. struct be_rx_page_info *page_info;
  579. u16 rxq_idx, i, num_rcvd;
  580. u32 pktsize, hdr_len, curr_frag_len;
  581. u8 *start;
  582. rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
  583. pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
  584. num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
  585. page_info = get_rx_page_info(adapter, rxq_idx);
  586. start = page_address(page_info->page) + page_info->page_offset;
  587. prefetch(start);
  588. /* Copy data in the first descriptor of this completion */
  589. curr_frag_len = min(pktsize, rx_frag_size);
  590. /* Copy the header portion into skb_data */
  591. hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
  592. memcpy(skb->data, start, hdr_len);
  593. skb->len = curr_frag_len;
  594. if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
  595. /* Complete packet has now been moved to data */
  596. put_page(page_info->page);
  597. skb->data_len = 0;
  598. skb->tail += curr_frag_len;
  599. } else {
  600. skb_shinfo(skb)->nr_frags = 1;
  601. skb_shinfo(skb)->frags[0].page = page_info->page;
  602. skb_shinfo(skb)->frags[0].page_offset =
  603. page_info->page_offset + hdr_len;
  604. skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
  605. skb->data_len = curr_frag_len - hdr_len;
  606. skb->tail += hdr_len;
  607. }
  608. memset(page_info, 0, sizeof(*page_info));
  609. if (pktsize <= rx_frag_size) {
  610. BUG_ON(num_rcvd != 1);
  611. goto done;
  612. }
  613. /* More frags present for this completion */
  614. pktsize -= curr_frag_len; /* account for above copied frag */
  615. for (i = 1; i < num_rcvd; i++) {
  616. index_inc(&rxq_idx, rxq->len);
  617. page_info = get_rx_page_info(adapter, rxq_idx);
  618. curr_frag_len = min(pktsize, rx_frag_size);
  619. skb_shinfo(skb)->frags[i].page = page_info->page;
  620. skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
  621. skb_shinfo(skb)->frags[i].size = curr_frag_len;
  622. skb->len += curr_frag_len;
  623. skb->data_len += curr_frag_len;
  624. skb_shinfo(skb)->nr_frags++;
  625. pktsize -= curr_frag_len;
  626. memset(page_info, 0, sizeof(*page_info));
  627. }
  628. done:
  629. be_rx_stats_update(adapter, pktsize, num_rcvd);
  630. return;
  631. }
  632. /* Process the RX completion indicated by rxcp when LRO is disabled */
  633. static void be_rx_compl_process(struct be_adapter *adapter,
  634. struct be_eth_rx_compl *rxcp)
  635. {
  636. struct sk_buff *skb;
  637. u32 vtp, vid;
  638. vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
  639. skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
  640. if (!skb) {
  641. if (net_ratelimit())
  642. dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
  643. be_rx_compl_discard(adapter, rxcp);
  644. return;
  645. }
  646. skb_reserve(skb, NET_IP_ALIGN);
  647. skb_fill_rx_data(adapter, skb, rxcp);
  648. if (do_pkt_csum(rxcp, adapter->rx_csum))
  649. skb->ip_summed = CHECKSUM_NONE;
  650. else
  651. skb->ip_summed = CHECKSUM_UNNECESSARY;
  652. skb->truesize = skb->len + sizeof(struct sk_buff);
  653. skb->protocol = eth_type_trans(skb, adapter->netdev);
  654. skb->dev = adapter->netdev;
  655. if (vtp) {
  656. if (!adapter->vlan_grp || adapter->num_vlans == 0) {
  657. kfree_skb(skb);
  658. return;
  659. }
  660. vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
  661. vid = be16_to_cpu(vid);
  662. vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
  663. } else {
  664. netif_receive_skb(skb);
  665. }
  666. adapter->netdev->last_rx = jiffies;
  667. return;
  668. }
  669. /* Process the RX completion indicated by rxcp when LRO is enabled */
  670. static void be_rx_compl_process_lro(struct be_adapter *adapter,
  671. struct be_eth_rx_compl *rxcp)
  672. {
  673. struct be_rx_page_info *page_info;
  674. struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
  675. struct be_queue_info *rxq = &adapter->rx_obj.q;
  676. u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
  677. u16 i, rxq_idx = 0, vid;
  678. num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
  679. pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
  680. vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
  681. rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
  682. remaining = pkt_size;
  683. for (i = 0; i < num_rcvd; i++) {
  684. page_info = get_rx_page_info(adapter, rxq_idx);
  685. curr_frag_len = min(remaining, rx_frag_size);
  686. rx_frags[i].page = page_info->page;
  687. rx_frags[i].page_offset = page_info->page_offset;
  688. rx_frags[i].size = curr_frag_len;
  689. remaining -= curr_frag_len;
  690. index_inc(&rxq_idx, rxq->len);
  691. memset(page_info, 0, sizeof(*page_info));
  692. }
  693. if (likely(!vlanf)) {
  694. lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
  695. pkt_size, NULL, 0);
  696. } else {
  697. vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
  698. vid = be16_to_cpu(vid);
  699. if (!adapter->vlan_grp || adapter->num_vlans == 0)
  700. return;
  701. lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr,
  702. rx_frags, pkt_size, pkt_size, adapter->vlan_grp,
  703. vid, NULL, 0);
  704. }
  705. be_rx_stats_update(adapter, pkt_size, num_rcvd);
  706. return;
  707. }
  708. static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
  709. {
  710. struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
  711. if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
  712. return NULL;
  713. be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
  714. queue_tail_inc(&adapter->rx_obj.cq);
  715. return rxcp;
  716. }
  717. /* To reset the valid bit, we need to reset the whole word as
  718. * when walking the queue the valid entries are little-endian
  719. * and invalid entries are host endian
  720. */
  721. static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
  722. {
  723. rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
  724. }
  725. static inline struct page *be_alloc_pages(u32 size)
  726. {
  727. gfp_t alloc_flags = GFP_ATOMIC;
  728. u32 order = get_order(size);
  729. if (order > 0)
  730. alloc_flags |= __GFP_COMP;
  731. return alloc_pages(alloc_flags, order);
  732. }
  733. /*
  734. * Allocate a page, split it to fragments of size rx_frag_size and post as
  735. * receive buffers to BE
  736. */
  737. static void be_post_rx_frags(struct be_adapter *adapter)
  738. {
  739. struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
  740. struct be_rx_page_info *page_info = NULL;
  741. struct be_queue_info *rxq = &adapter->rx_obj.q;
  742. struct page *pagep = NULL;
  743. struct be_eth_rx_d *rxd;
  744. u64 page_dmaaddr = 0, frag_dmaaddr;
  745. u32 posted, page_offset = 0;
  746. page_info = &page_info_tbl[rxq->head];
  747. for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
  748. if (!pagep) {
  749. pagep = be_alloc_pages(adapter->big_page_size);
  750. if (unlikely(!pagep)) {
  751. drvr_stats(adapter)->be_ethrx_post_fail++;
  752. break;
  753. }
  754. page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
  755. adapter->big_page_size,
  756. PCI_DMA_FROMDEVICE);
  757. page_info->page_offset = 0;
  758. } else {
  759. get_page(pagep);
  760. page_info->page_offset = page_offset + rx_frag_size;
  761. }
  762. page_offset = page_info->page_offset;
  763. page_info->page = pagep;
  764. pci_unmap_addr_set(page_info, bus, page_dmaaddr);
  765. frag_dmaaddr = page_dmaaddr + page_info->page_offset;
  766. rxd = queue_head_node(rxq);
  767. rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
  768. rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
  769. queue_head_inc(rxq);
  770. /* Any space left in the current big page for another frag? */
  771. if ((page_offset + rx_frag_size + rx_frag_size) >
  772. adapter->big_page_size) {
  773. pagep = NULL;
  774. page_info->last_page_user = true;
  775. }
  776. page_info = &page_info_tbl[rxq->head];
  777. }
  778. if (pagep)
  779. page_info->last_page_user = true;
  780. if (posted) {
  781. atomic_add(posted, &rxq->used);
  782. be_rxq_notify(&adapter->ctrl, rxq->id, posted);
  783. } else if (atomic_read(&rxq->used) == 0) {
  784. /* Let be_worker replenish when memory is available */
  785. adapter->rx_post_starved = true;
  786. }
  787. return;
  788. }
  789. static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
  790. {
  791. struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
  792. if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
  793. return NULL;
  794. be_dws_le_to_cpu(txcp, sizeof(*txcp));
  795. txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
  796. queue_tail_inc(tx_cq);
  797. return txcp;
  798. }
  799. static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
  800. {
  801. struct be_queue_info *txq = &adapter->tx_obj.q;
  802. struct be_eth_wrb *wrb;
  803. struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
  804. struct sk_buff *sent_skb;
  805. u64 busaddr;
  806. u16 cur_index, num_wrbs = 0;
  807. cur_index = txq->tail;
  808. sent_skb = sent_skbs[cur_index];
  809. BUG_ON(!sent_skb);
  810. sent_skbs[cur_index] = NULL;
  811. do {
  812. cur_index = txq->tail;
  813. wrb = queue_tail_node(txq);
  814. be_dws_le_to_cpu(wrb, sizeof(*wrb));
  815. busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
  816. if (busaddr != 0) {
  817. pci_unmap_single(adapter->pdev, busaddr,
  818. wrb->frag_len, PCI_DMA_TODEVICE);
  819. }
  820. num_wrbs++;
  821. queue_tail_inc(txq);
  822. } while (cur_index != last_index);
  823. atomic_sub(num_wrbs, &txq->used);
  824. kfree_skb(sent_skb);
  825. }
  826. static void be_rx_q_clean(struct be_adapter *adapter)
  827. {
  828. struct be_rx_page_info *page_info;
  829. struct be_queue_info *rxq = &adapter->rx_obj.q;
  830. struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
  831. struct be_eth_rx_compl *rxcp;
  832. u16 tail;
  833. /* First cleanup pending rx completions */
  834. while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
  835. be_rx_compl_discard(adapter, rxcp);
  836. be_rx_compl_reset(rxcp);
  837. be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
  838. }
  839. /* Then free posted rx buffer that were not used */
  840. tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
  841. for (; tail != rxq->head; index_inc(&tail, rxq->len)) {
  842. page_info = get_rx_page_info(adapter, tail);
  843. put_page(page_info->page);
  844. memset(page_info, 0, sizeof(*page_info));
  845. }
  846. BUG_ON(atomic_read(&rxq->used));
  847. }
  848. static void be_tx_q_clean(struct be_adapter *adapter)
  849. {
  850. struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
  851. struct sk_buff *sent_skb;
  852. struct be_queue_info *txq = &adapter->tx_obj.q;
  853. u16 last_index;
  854. bool dummy_wrb;
  855. while (atomic_read(&txq->used)) {
  856. sent_skb = sent_skbs[txq->tail];
  857. last_index = txq->tail;
  858. index_adv(&last_index,
  859. wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
  860. be_tx_compl_process(adapter, last_index);
  861. }
  862. }
  863. static void be_mcc_queues_destroy(struct be_adapter *adapter)
  864. {
  865. struct be_queue_info *q;
  866. struct be_ctrl_info *ctrl = &adapter->ctrl;
  867. q = &ctrl->mcc_obj.q;
  868. if (q->created)
  869. be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
  870. be_queue_free(adapter, q);
  871. q = &ctrl->mcc_obj.cq;
  872. if (q->created)
  873. be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
  874. be_queue_free(adapter, q);
  875. }
  876. /* Must be called only after TX qs are created as MCC shares TX EQ */
  877. static int be_mcc_queues_create(struct be_adapter *adapter)
  878. {
  879. struct be_queue_info *q, *cq;
  880. struct be_ctrl_info *ctrl = &adapter->ctrl;
  881. /* Alloc MCC compl queue */
  882. cq = &ctrl->mcc_obj.cq;
  883. if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
  884. sizeof(struct be_mcc_cq_entry)))
  885. goto err;
  886. /* Ask BE to create MCC compl queue; share TX's eq */
  887. if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
  888. goto mcc_cq_free;
  889. /* Alloc MCC queue */
  890. q = &ctrl->mcc_obj.q;
  891. if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
  892. goto mcc_cq_destroy;
  893. /* Ask BE to create MCC queue */
  894. if (be_cmd_mccq_create(ctrl, q, cq))
  895. goto mcc_q_free;
  896. return 0;
  897. mcc_q_free:
  898. be_queue_free(adapter, q);
  899. mcc_cq_destroy:
  900. be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
  901. mcc_cq_free:
  902. be_queue_free(adapter, cq);
  903. err:
  904. return -1;
  905. }
  906. static void be_tx_queues_destroy(struct be_adapter *adapter)
  907. {
  908. struct be_queue_info *q;
  909. q = &adapter->tx_obj.q;
  910. if (q->created) {
  911. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
  912. /* No more tx completions can be rcvd now; clean up if there
  913. * are any pending completions or pending tx requests */
  914. be_tx_q_clean(adapter);
  915. }
  916. be_queue_free(adapter, q);
  917. q = &adapter->tx_obj.cq;
  918. if (q->created)
  919. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
  920. be_queue_free(adapter, q);
  921. q = &adapter->tx_eq.q;
  922. if (q->created)
  923. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
  924. be_queue_free(adapter, q);
  925. }
  926. static int be_tx_queues_create(struct be_adapter *adapter)
  927. {
  928. struct be_queue_info *eq, *q, *cq;
  929. adapter->tx_eq.max_eqd = 0;
  930. adapter->tx_eq.min_eqd = 0;
  931. adapter->tx_eq.cur_eqd = 96;
  932. adapter->tx_eq.enable_aic = false;
  933. /* Alloc Tx Event queue */
  934. eq = &adapter->tx_eq.q;
  935. if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
  936. return -1;
  937. /* Ask BE to create Tx Event queue */
  938. if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd))
  939. goto tx_eq_free;
  940. /* Alloc TX eth compl queue */
  941. cq = &adapter->tx_obj.cq;
  942. if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
  943. sizeof(struct be_eth_tx_compl)))
  944. goto tx_eq_destroy;
  945. /* Ask BE to create Tx eth compl queue */
  946. if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3))
  947. goto tx_cq_free;
  948. /* Alloc TX eth queue */
  949. q = &adapter->tx_obj.q;
  950. if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
  951. goto tx_cq_destroy;
  952. /* Ask BE to create Tx eth queue */
  953. if (be_cmd_txq_create(&adapter->ctrl, q, cq))
  954. goto tx_q_free;
  955. return 0;
  956. tx_q_free:
  957. be_queue_free(adapter, q);
  958. tx_cq_destroy:
  959. be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
  960. tx_cq_free:
  961. be_queue_free(adapter, cq);
  962. tx_eq_destroy:
  963. be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
  964. tx_eq_free:
  965. be_queue_free(adapter, eq);
  966. return -1;
  967. }
  968. static void be_rx_queues_destroy(struct be_adapter *adapter)
  969. {
  970. struct be_queue_info *q;
  971. q = &adapter->rx_obj.q;
  972. if (q->created) {
  973. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ);
  974. be_rx_q_clean(adapter);
  975. }
  976. be_queue_free(adapter, q);
  977. q = &adapter->rx_obj.cq;
  978. if (q->created)
  979. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
  980. be_queue_free(adapter, q);
  981. q = &adapter->rx_eq.q;
  982. if (q->created)
  983. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
  984. be_queue_free(adapter, q);
  985. }
  986. static int be_rx_queues_create(struct be_adapter *adapter)
  987. {
  988. struct be_queue_info *eq, *q, *cq;
  989. int rc;
  990. adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
  991. adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
  992. adapter->rx_eq.max_eqd = BE_MAX_EQD;
  993. adapter->rx_eq.min_eqd = 0;
  994. adapter->rx_eq.cur_eqd = 0;
  995. adapter->rx_eq.enable_aic = true;
  996. /* Alloc Rx Event queue */
  997. eq = &adapter->rx_eq.q;
  998. rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
  999. sizeof(struct be_eq_entry));
  1000. if (rc)
  1001. return rc;
  1002. /* Ask BE to create Rx Event queue */
  1003. rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd);
  1004. if (rc)
  1005. goto rx_eq_free;
  1006. /* Alloc RX eth compl queue */
  1007. cq = &adapter->rx_obj.cq;
  1008. rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
  1009. sizeof(struct be_eth_rx_compl));
  1010. if (rc)
  1011. goto rx_eq_destroy;
  1012. /* Ask BE to create Rx eth compl queue */
  1013. rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3);
  1014. if (rc)
  1015. goto rx_cq_free;
  1016. /* Alloc RX eth queue */
  1017. q = &adapter->rx_obj.q;
  1018. rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
  1019. if (rc)
  1020. goto rx_cq_destroy;
  1021. /* Ask BE to create Rx eth queue */
  1022. rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size,
  1023. BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
  1024. if (rc)
  1025. goto rx_q_free;
  1026. return 0;
  1027. rx_q_free:
  1028. be_queue_free(adapter, q);
  1029. rx_cq_destroy:
  1030. be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
  1031. rx_cq_free:
  1032. be_queue_free(adapter, cq);
  1033. rx_eq_destroy:
  1034. be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
  1035. rx_eq_free:
  1036. be_queue_free(adapter, eq);
  1037. return rc;
  1038. }
  1039. static bool event_get(struct be_eq_obj *eq_obj, u16 *rid)
  1040. {
  1041. struct be_eq_entry *entry = queue_tail_node(&eq_obj->q);
  1042. u32 evt = entry->evt;
  1043. if (!evt)
  1044. return false;
  1045. evt = le32_to_cpu(evt);
  1046. *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK;
  1047. entry->evt = 0;
  1048. queue_tail_inc(&eq_obj->q);
  1049. return true;
  1050. }
  1051. static int event_handle(struct be_ctrl_info *ctrl,
  1052. struct be_eq_obj *eq_obj)
  1053. {
  1054. u16 rid = 0, num = 0;
  1055. while (event_get(eq_obj, &rid))
  1056. num++;
  1057. /* We can see an interrupt and no event */
  1058. be_eq_notify(ctrl, eq_obj->q.id, true, true, num);
  1059. if (num)
  1060. napi_schedule(&eq_obj->napi);
  1061. return num;
  1062. }
  1063. static irqreturn_t be_intx(int irq, void *dev)
  1064. {
  1065. struct be_adapter *adapter = dev;
  1066. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1067. int rx, tx;
  1068. tx = event_handle(ctrl, &adapter->tx_eq);
  1069. rx = event_handle(ctrl, &adapter->rx_eq);
  1070. if (rx || tx)
  1071. return IRQ_HANDLED;
  1072. else
  1073. return IRQ_NONE;
  1074. }
  1075. static irqreturn_t be_msix_rx(int irq, void *dev)
  1076. {
  1077. struct be_adapter *adapter = dev;
  1078. event_handle(&adapter->ctrl, &adapter->rx_eq);
  1079. return IRQ_HANDLED;
  1080. }
  1081. static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
  1082. {
  1083. struct be_adapter *adapter = dev;
  1084. event_handle(&adapter->ctrl, &adapter->tx_eq);
  1085. return IRQ_HANDLED;
  1086. }
  1087. static inline bool do_lro(struct be_adapter *adapter,
  1088. struct be_eth_rx_compl *rxcp)
  1089. {
  1090. int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
  1091. int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
  1092. if (err)
  1093. drvr_stats(adapter)->be_rxcp_err++;
  1094. return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ?
  1095. false : true;
  1096. }
  1097. int be_poll_rx(struct napi_struct *napi, int budget)
  1098. {
  1099. struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
  1100. struct be_adapter *adapter =
  1101. container_of(rx_eq, struct be_adapter, rx_eq);
  1102. struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
  1103. struct be_eth_rx_compl *rxcp;
  1104. u32 work_done;
  1105. for (work_done = 0; work_done < budget; work_done++) {
  1106. rxcp = be_rx_compl_get(adapter);
  1107. if (!rxcp)
  1108. break;
  1109. if (do_lro(adapter, rxcp))
  1110. be_rx_compl_process_lro(adapter, rxcp);
  1111. else
  1112. be_rx_compl_process(adapter, rxcp);
  1113. be_rx_compl_reset(rxcp);
  1114. }
  1115. lro_flush_all(&adapter->rx_obj.lro_mgr);
  1116. /* Refill the queue */
  1117. if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
  1118. be_post_rx_frags(adapter);
  1119. /* All consumed */
  1120. if (work_done < budget) {
  1121. napi_complete(napi);
  1122. be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done);
  1123. } else {
  1124. /* More to be consumed; continue with interrupts disabled */
  1125. be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done);
  1126. }
  1127. return work_done;
  1128. }
  1129. void be_process_tx(struct be_adapter *adapter)
  1130. {
  1131. struct be_queue_info *txq = &adapter->tx_obj.q;
  1132. struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
  1133. struct be_eth_tx_compl *txcp;
  1134. u32 num_cmpl = 0;
  1135. u16 end_idx;
  1136. while ((txcp = be_tx_compl_get(tx_cq))) {
  1137. end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
  1138. wrb_index, txcp);
  1139. be_tx_compl_process(adapter, end_idx);
  1140. num_cmpl++;
  1141. }
  1142. if (num_cmpl) {
  1143. be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
  1144. /* As Tx wrbs have been freed up, wake up netdev queue if
  1145. * it was stopped due to lack of tx wrbs.
  1146. */
  1147. if (netif_queue_stopped(adapter->netdev) &&
  1148. atomic_read(&txq->used) < txq->len / 2) {
  1149. netif_wake_queue(adapter->netdev);
  1150. }
  1151. drvr_stats(adapter)->be_tx_events++;
  1152. drvr_stats(adapter)->be_tx_compl += num_cmpl;
  1153. }
  1154. }
  1155. /* As TX and MCC share the same EQ check for both TX and MCC completions.
  1156. * For TX/MCC we don't honour budget; consume everything
  1157. */
  1158. static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
  1159. {
  1160. struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
  1161. struct be_adapter *adapter =
  1162. container_of(tx_eq, struct be_adapter, tx_eq);
  1163. napi_complete(napi);
  1164. be_process_tx(adapter);
  1165. be_process_mcc(&adapter->ctrl);
  1166. return 1;
  1167. }
  1168. static void be_worker(struct work_struct *work)
  1169. {
  1170. struct be_adapter *adapter =
  1171. container_of(work, struct be_adapter, work.work);
  1172. int status;
  1173. /* Check link */
  1174. be_link_status_update(adapter);
  1175. /* Get Stats */
  1176. status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
  1177. if (!status)
  1178. netdev_stats_update(adapter);
  1179. /* Set EQ delay */
  1180. be_rx_eqd_update(adapter);
  1181. be_tx_rate_update(adapter);
  1182. be_rx_rate_update(adapter);
  1183. if (adapter->rx_post_starved) {
  1184. adapter->rx_post_starved = false;
  1185. be_post_rx_frags(adapter);
  1186. }
  1187. schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
  1188. }
  1189. static void be_msix_enable(struct be_adapter *adapter)
  1190. {
  1191. int i, status;
  1192. for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
  1193. adapter->msix_entries[i].entry = i;
  1194. status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
  1195. BE_NUM_MSIX_VECTORS);
  1196. if (status == 0)
  1197. adapter->msix_enabled = true;
  1198. return;
  1199. }
  1200. static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
  1201. {
  1202. return adapter->msix_entries[eq_id -
  1203. 8 * adapter->ctrl.pci_func].vector;
  1204. }
  1205. static int be_msix_register(struct be_adapter *adapter)
  1206. {
  1207. struct net_device *netdev = adapter->netdev;
  1208. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  1209. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  1210. int status, vec;
  1211. sprintf(tx_eq->desc, "%s-tx", netdev->name);
  1212. vec = be_msix_vec_get(adapter, tx_eq->q.id);
  1213. status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
  1214. if (status)
  1215. goto err;
  1216. sprintf(rx_eq->desc, "%s-rx", netdev->name);
  1217. vec = be_msix_vec_get(adapter, rx_eq->q.id);
  1218. status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter);
  1219. if (status) { /* Free TX IRQ */
  1220. vec = be_msix_vec_get(adapter, tx_eq->q.id);
  1221. free_irq(vec, adapter);
  1222. goto err;
  1223. }
  1224. return 0;
  1225. err:
  1226. dev_warn(&adapter->pdev->dev,
  1227. "MSIX Request IRQ failed - err %d\n", status);
  1228. pci_disable_msix(adapter->pdev);
  1229. adapter->msix_enabled = false;
  1230. return status;
  1231. }
  1232. static int be_irq_register(struct be_adapter *adapter)
  1233. {
  1234. struct net_device *netdev = adapter->netdev;
  1235. int status;
  1236. if (adapter->msix_enabled) {
  1237. status = be_msix_register(adapter);
  1238. if (status == 0)
  1239. goto done;
  1240. }
  1241. /* INTx */
  1242. netdev->irq = adapter->pdev->irq;
  1243. status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
  1244. adapter);
  1245. if (status) {
  1246. dev_err(&adapter->pdev->dev,
  1247. "INTx request IRQ failed - err %d\n", status);
  1248. return status;
  1249. }
  1250. done:
  1251. adapter->isr_registered = true;
  1252. return 0;
  1253. }
  1254. static void be_irq_unregister(struct be_adapter *adapter)
  1255. {
  1256. struct net_device *netdev = adapter->netdev;
  1257. int vec;
  1258. if (!adapter->isr_registered)
  1259. return;
  1260. /* INTx */
  1261. if (!adapter->msix_enabled) {
  1262. free_irq(netdev->irq, adapter);
  1263. goto done;
  1264. }
  1265. /* MSIx */
  1266. vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id);
  1267. free_irq(vec, adapter);
  1268. vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id);
  1269. free_irq(vec, adapter);
  1270. done:
  1271. adapter->isr_registered = false;
  1272. return;
  1273. }
  1274. static int be_open(struct net_device *netdev)
  1275. {
  1276. struct be_adapter *adapter = netdev_priv(netdev);
  1277. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1278. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  1279. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  1280. /* First time posting */
  1281. be_post_rx_frags(adapter);
  1282. napi_enable(&rx_eq->napi);
  1283. napi_enable(&tx_eq->napi);
  1284. be_irq_register(adapter);
  1285. be_intr_set(ctrl, true);
  1286. /* The evt queues are created in unarmed state; arm them */
  1287. be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
  1288. be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
  1289. /* Rx compl queue may be in unarmed state; rearm it */
  1290. be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
  1291. be_link_status_update(adapter);
  1292. schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
  1293. return 0;
  1294. }
  1295. static int be_setup(struct be_adapter *adapter)
  1296. {
  1297. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1298. struct net_device *netdev = adapter->netdev;
  1299. u32 if_flags;
  1300. int status;
  1301. if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
  1302. BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
  1303. BE_IF_FLAGS_PASS_L3L4_ERRORS;
  1304. status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr,
  1305. false/* pmac_invalid */, &adapter->if_handle,
  1306. &adapter->pmac_id);
  1307. if (status != 0)
  1308. goto do_none;
  1309. be_vid_config(netdev);
  1310. status = be_cmd_set_flow_control(ctrl, true, true);
  1311. if (status != 0)
  1312. goto if_destroy;
  1313. status = be_tx_queues_create(adapter);
  1314. if (status != 0)
  1315. goto if_destroy;
  1316. status = be_rx_queues_create(adapter);
  1317. if (status != 0)
  1318. goto tx_qs_destroy;
  1319. status = be_mcc_queues_create(adapter);
  1320. if (status != 0)
  1321. goto rx_qs_destroy;
  1322. return 0;
  1323. rx_qs_destroy:
  1324. be_rx_queues_destroy(adapter);
  1325. tx_qs_destroy:
  1326. be_tx_queues_destroy(adapter);
  1327. if_destroy:
  1328. be_cmd_if_destroy(ctrl, adapter->if_handle);
  1329. do_none:
  1330. return status;
  1331. }
  1332. static int be_clear(struct be_adapter *adapter)
  1333. {
  1334. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1335. be_rx_queues_destroy(adapter);
  1336. be_tx_queues_destroy(adapter);
  1337. be_cmd_if_destroy(ctrl, adapter->if_handle);
  1338. be_mcc_queues_destroy(adapter);
  1339. return 0;
  1340. }
  1341. static int be_close(struct net_device *netdev)
  1342. {
  1343. struct be_adapter *adapter = netdev_priv(netdev);
  1344. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1345. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  1346. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  1347. int vec;
  1348. cancel_delayed_work_sync(&adapter->work);
  1349. netif_stop_queue(netdev);
  1350. netif_carrier_off(netdev);
  1351. adapter->link.speed = PHY_LINK_SPEED_ZERO;
  1352. be_intr_set(ctrl, false);
  1353. if (adapter->msix_enabled) {
  1354. vec = be_msix_vec_get(adapter, tx_eq->q.id);
  1355. synchronize_irq(vec);
  1356. vec = be_msix_vec_get(adapter, rx_eq->q.id);
  1357. synchronize_irq(vec);
  1358. } else {
  1359. synchronize_irq(netdev->irq);
  1360. }
  1361. be_irq_unregister(adapter);
  1362. napi_disable(&rx_eq->napi);
  1363. napi_disable(&tx_eq->napi);
  1364. return 0;
  1365. }
  1366. static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
  1367. void **ip_hdr, void **tcpudp_hdr,
  1368. u64 *hdr_flags, void *priv)
  1369. {
  1370. struct ethhdr *eh;
  1371. struct vlan_ethhdr *veh;
  1372. struct iphdr *iph;
  1373. u8 *va = page_address(frag->page) + frag->page_offset;
  1374. unsigned long ll_hlen;
  1375. prefetch(va);
  1376. eh = (struct ethhdr *)va;
  1377. *mac_hdr = eh;
  1378. ll_hlen = ETH_HLEN;
  1379. if (eh->h_proto != htons(ETH_P_IP)) {
  1380. if (eh->h_proto == htons(ETH_P_8021Q)) {
  1381. veh = (struct vlan_ethhdr *)va;
  1382. if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
  1383. return -1;
  1384. ll_hlen += VLAN_HLEN;
  1385. } else {
  1386. return -1;
  1387. }
  1388. }
  1389. *hdr_flags = LRO_IPV4;
  1390. iph = (struct iphdr *)(va + ll_hlen);
  1391. *ip_hdr = iph;
  1392. if (iph->protocol != IPPROTO_TCP)
  1393. return -1;
  1394. *hdr_flags |= LRO_TCP;
  1395. *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
  1396. return 0;
  1397. }
  1398. static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
  1399. {
  1400. struct net_lro_mgr *lro_mgr;
  1401. lro_mgr = &adapter->rx_obj.lro_mgr;
  1402. lro_mgr->dev = netdev;
  1403. lro_mgr->features = LRO_F_NAPI;
  1404. lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
  1405. lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
  1406. lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
  1407. lro_mgr->lro_arr = adapter->rx_obj.lro_desc;
  1408. lro_mgr->get_frag_header = be_get_frag_header;
  1409. lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
  1410. }
  1411. static struct net_device_ops be_netdev_ops = {
  1412. .ndo_open = be_open,
  1413. .ndo_stop = be_close,
  1414. .ndo_start_xmit = be_xmit,
  1415. .ndo_get_stats = be_get_stats,
  1416. .ndo_set_rx_mode = be_set_multicast_list,
  1417. .ndo_set_mac_address = be_mac_addr_set,
  1418. .ndo_change_mtu = be_change_mtu,
  1419. .ndo_validate_addr = eth_validate_addr,
  1420. .ndo_vlan_rx_register = be_vlan_register,
  1421. .ndo_vlan_rx_add_vid = be_vlan_add_vid,
  1422. .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
  1423. };
  1424. static void be_netdev_init(struct net_device *netdev)
  1425. {
  1426. struct be_adapter *adapter = netdev_priv(netdev);
  1427. netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
  1428. NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
  1429. NETIF_F_IPV6_CSUM;
  1430. netdev->flags |= IFF_MULTICAST;
  1431. adapter->rx_csum = true;
  1432. BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
  1433. SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
  1434. be_lro_init(adapter, netdev);
  1435. netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
  1436. BE_NAPI_WEIGHT);
  1437. netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
  1438. BE_NAPI_WEIGHT);
  1439. netif_carrier_off(netdev);
  1440. netif_stop_queue(netdev);
  1441. }
  1442. static void be_unmap_pci_bars(struct be_adapter *adapter)
  1443. {
  1444. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1445. if (ctrl->csr)
  1446. iounmap(ctrl->csr);
  1447. if (ctrl->db)
  1448. iounmap(ctrl->db);
  1449. if (ctrl->pcicfg)
  1450. iounmap(ctrl->pcicfg);
  1451. }
  1452. static int be_map_pci_bars(struct be_adapter *adapter)
  1453. {
  1454. u8 __iomem *addr;
  1455. addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
  1456. pci_resource_len(adapter->pdev, 2));
  1457. if (addr == NULL)
  1458. return -ENOMEM;
  1459. adapter->ctrl.csr = addr;
  1460. addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
  1461. 128 * 1024);
  1462. if (addr == NULL)
  1463. goto pci_map_err;
  1464. adapter->ctrl.db = addr;
  1465. addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
  1466. pci_resource_len(adapter->pdev, 1));
  1467. if (addr == NULL)
  1468. goto pci_map_err;
  1469. adapter->ctrl.pcicfg = addr;
  1470. return 0;
  1471. pci_map_err:
  1472. be_unmap_pci_bars(adapter);
  1473. return -ENOMEM;
  1474. }
  1475. static void be_ctrl_cleanup(struct be_adapter *adapter)
  1476. {
  1477. struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced;
  1478. be_unmap_pci_bars(adapter);
  1479. if (mem->va)
  1480. pci_free_consistent(adapter->pdev, mem->size,
  1481. mem->va, mem->dma);
  1482. }
  1483. /* Initialize the mbox required to send cmds to BE */
  1484. static int be_ctrl_init(struct be_adapter *adapter)
  1485. {
  1486. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1487. struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
  1488. struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
  1489. int status;
  1490. u32 val;
  1491. status = be_map_pci_bars(adapter);
  1492. if (status)
  1493. return status;
  1494. mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
  1495. mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
  1496. mbox_mem_alloc->size, &mbox_mem_alloc->dma);
  1497. if (!mbox_mem_alloc->va) {
  1498. be_unmap_pci_bars(adapter);
  1499. return -1;
  1500. }
  1501. mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
  1502. mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
  1503. mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
  1504. memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
  1505. spin_lock_init(&ctrl->mbox_lock);
  1506. spin_lock_init(&ctrl->mcc_lock);
  1507. spin_lock_init(&ctrl->mcc_cq_lock);
  1508. val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
  1509. ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
  1510. MEMBAR_CTRL_INT_CTRL_PFUNC_MASK;
  1511. return 0;
  1512. }
  1513. static void be_stats_cleanup(struct be_adapter *adapter)
  1514. {
  1515. struct be_stats_obj *stats = &adapter->stats;
  1516. struct be_dma_mem *cmd = &stats->cmd;
  1517. if (cmd->va)
  1518. pci_free_consistent(adapter->pdev, cmd->size,
  1519. cmd->va, cmd->dma);
  1520. }
  1521. static int be_stats_init(struct be_adapter *adapter)
  1522. {
  1523. struct be_stats_obj *stats = &adapter->stats;
  1524. struct be_dma_mem *cmd = &stats->cmd;
  1525. cmd->size = sizeof(struct be_cmd_req_get_stats);
  1526. cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
  1527. if (cmd->va == NULL)
  1528. return -1;
  1529. return 0;
  1530. }
  1531. static void __devexit be_remove(struct pci_dev *pdev)
  1532. {
  1533. struct be_adapter *adapter = pci_get_drvdata(pdev);
  1534. if (!adapter)
  1535. return;
  1536. unregister_netdev(adapter->netdev);
  1537. be_clear(adapter);
  1538. be_stats_cleanup(adapter);
  1539. be_ctrl_cleanup(adapter);
  1540. if (adapter->msix_enabled) {
  1541. pci_disable_msix(adapter->pdev);
  1542. adapter->msix_enabled = false;
  1543. }
  1544. pci_set_drvdata(pdev, NULL);
  1545. pci_release_regions(pdev);
  1546. pci_disable_device(pdev);
  1547. free_netdev(adapter->netdev);
  1548. }
  1549. static int be_hw_up(struct be_adapter *adapter)
  1550. {
  1551. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1552. int status;
  1553. status = be_cmd_POST(ctrl);
  1554. if (status)
  1555. return status;
  1556. status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver);
  1557. if (status)
  1558. return status;
  1559. status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num);
  1560. return status;
  1561. }
  1562. static int __devinit be_probe(struct pci_dev *pdev,
  1563. const struct pci_device_id *pdev_id)
  1564. {
  1565. int status = 0;
  1566. struct be_adapter *adapter;
  1567. struct net_device *netdev;
  1568. struct be_ctrl_info *ctrl;
  1569. u8 mac[ETH_ALEN];
  1570. status = pci_enable_device(pdev);
  1571. if (status)
  1572. goto do_none;
  1573. status = pci_request_regions(pdev, DRV_NAME);
  1574. if (status)
  1575. goto disable_dev;
  1576. pci_set_master(pdev);
  1577. netdev = alloc_etherdev(sizeof(struct be_adapter));
  1578. if (netdev == NULL) {
  1579. status = -ENOMEM;
  1580. goto rel_reg;
  1581. }
  1582. adapter = netdev_priv(netdev);
  1583. adapter->pdev = pdev;
  1584. pci_set_drvdata(pdev, adapter);
  1585. adapter->netdev = netdev;
  1586. be_msix_enable(adapter);
  1587. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1588. if (!status) {
  1589. netdev->features |= NETIF_F_HIGHDMA;
  1590. } else {
  1591. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1592. if (status) {
  1593. dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
  1594. goto free_netdev;
  1595. }
  1596. }
  1597. ctrl = &adapter->ctrl;
  1598. status = be_ctrl_init(adapter);
  1599. if (status)
  1600. goto free_netdev;
  1601. status = be_stats_init(adapter);
  1602. if (status)
  1603. goto ctrl_clean;
  1604. status = be_hw_up(adapter);
  1605. if (status)
  1606. goto stats_clean;
  1607. status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK,
  1608. true /* permanent */, 0);
  1609. if (status)
  1610. goto stats_clean;
  1611. memcpy(netdev->dev_addr, mac, ETH_ALEN);
  1612. INIT_DELAYED_WORK(&adapter->work, be_worker);
  1613. be_netdev_init(netdev);
  1614. SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
  1615. status = be_setup(adapter);
  1616. if (status)
  1617. goto stats_clean;
  1618. status = register_netdev(netdev);
  1619. if (status != 0)
  1620. goto unsetup;
  1621. dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
  1622. return 0;
  1623. unsetup:
  1624. be_clear(adapter);
  1625. stats_clean:
  1626. be_stats_cleanup(adapter);
  1627. ctrl_clean:
  1628. be_ctrl_cleanup(adapter);
  1629. free_netdev:
  1630. free_netdev(adapter->netdev);
  1631. rel_reg:
  1632. pci_release_regions(pdev);
  1633. disable_dev:
  1634. pci_disable_device(pdev);
  1635. do_none:
  1636. dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
  1637. return status;
  1638. }
  1639. static int be_suspend(struct pci_dev *pdev, pm_message_t state)
  1640. {
  1641. struct be_adapter *adapter = pci_get_drvdata(pdev);
  1642. struct net_device *netdev = adapter->netdev;
  1643. netif_device_detach(netdev);
  1644. if (netif_running(netdev)) {
  1645. rtnl_lock();
  1646. be_close(netdev);
  1647. be_clear(adapter);
  1648. rtnl_unlock();
  1649. }
  1650. pci_save_state(pdev);
  1651. pci_disable_device(pdev);
  1652. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1653. return 0;
  1654. }
  1655. static int be_resume(struct pci_dev *pdev)
  1656. {
  1657. int status = 0;
  1658. struct be_adapter *adapter = pci_get_drvdata(pdev);
  1659. struct net_device *netdev = adapter->netdev;
  1660. netif_device_detach(netdev);
  1661. status = pci_enable_device(pdev);
  1662. if (status)
  1663. return status;
  1664. pci_set_power_state(pdev, 0);
  1665. pci_restore_state(pdev);
  1666. if (netif_running(netdev)) {
  1667. rtnl_lock();
  1668. be_setup(adapter);
  1669. be_open(netdev);
  1670. rtnl_unlock();
  1671. }
  1672. netif_device_attach(netdev);
  1673. return 0;
  1674. }
  1675. static struct pci_driver be_driver = {
  1676. .name = DRV_NAME,
  1677. .id_table = be_dev_ids,
  1678. .probe = be_probe,
  1679. .remove = be_remove,
  1680. .suspend = be_suspend,
  1681. .resume = be_resume
  1682. };
  1683. static int __init be_init_module(void)
  1684. {
  1685. if (rx_frag_size != 8192 && rx_frag_size != 4096
  1686. && rx_frag_size != 2048) {
  1687. printk(KERN_WARNING DRV_NAME
  1688. " : Module param rx_frag_size must be 2048/4096/8192."
  1689. " Using 2048\n");
  1690. rx_frag_size = 2048;
  1691. }
  1692. /* Ensure rx_frag_size is aligned to chache line */
  1693. if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) {
  1694. printk(KERN_WARNING DRV_NAME
  1695. " : Bad module param rx_frag_size. Using 2048\n");
  1696. rx_frag_size = 2048;
  1697. }
  1698. return pci_register_driver(&be_driver);
  1699. }
  1700. module_init(be_init_module);
  1701. static void __exit be_exit_module(void)
  1702. {
  1703. pci_unregister_driver(&be_driver);
  1704. }
  1705. module_exit(be_exit_module);