be_main.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969
  1. /*
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include <asm/div64.h>
  19. MODULE_VERSION(DRV_VER);
  20. MODULE_DEVICE_TABLE(pci, be_dev_ids);
  21. MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
  22. MODULE_AUTHOR("ServerEngines Corporation");
  23. MODULE_LICENSE("GPL");
  24. static unsigned int rx_frag_size = 2048;
  25. module_param(rx_frag_size, uint, S_IRUGO);
  26. MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
  27. static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
  28. { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
  29. { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
  30. { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
  31. { 0 }
  32. };
  33. MODULE_DEVICE_TABLE(pci, be_dev_ids);
  34. static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
  35. {
  36. struct be_dma_mem *mem = &q->dma_mem;
  37. if (mem->va)
  38. pci_free_consistent(adapter->pdev, mem->size,
  39. mem->va, mem->dma);
  40. }
  41. static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
  42. u16 len, u16 entry_size)
  43. {
  44. struct be_dma_mem *mem = &q->dma_mem;
  45. memset(q, 0, sizeof(*q));
  46. q->len = len;
  47. q->entry_size = entry_size;
  48. mem->size = len * entry_size;
  49. mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
  50. if (!mem->va)
  51. return -1;
  52. memset(mem->va, 0, mem->size);
  53. return 0;
  54. }
  55. static inline void *queue_head_node(struct be_queue_info *q)
  56. {
  57. return q->dma_mem.va + q->head * q->entry_size;
  58. }
  59. static inline void *queue_tail_node(struct be_queue_info *q)
  60. {
  61. return q->dma_mem.va + q->tail * q->entry_size;
  62. }
  63. static inline void queue_head_inc(struct be_queue_info *q)
  64. {
  65. index_inc(&q->head, q->len);
  66. }
  67. static inline void queue_tail_inc(struct be_queue_info *q)
  68. {
  69. index_inc(&q->tail, q->len);
  70. }
  71. static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
  72. {
  73. u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
  74. u32 reg = ioread32(addr);
  75. u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  76. if (!enabled && enable) {
  77. reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  78. } else if (enabled && !enable) {
  79. reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  80. } else {
  81. printk(KERN_WARNING DRV_NAME
  82. ": bad value in membar_int_ctrl reg=0x%x\n", reg);
  83. return;
  84. }
  85. iowrite32(reg, addr);
  86. }
  87. static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
  88. {
  89. u32 val = 0;
  90. val |= qid & DB_RQ_RING_ID_MASK;
  91. val |= posted << DB_RQ_NUM_POSTED_SHIFT;
  92. iowrite32(val, ctrl->db + DB_RQ_OFFSET);
  93. }
  94. static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
  95. {
  96. u32 val = 0;
  97. val |= qid & DB_TXULP_RING_ID_MASK;
  98. val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
  99. iowrite32(val, ctrl->db + DB_TXULP1_OFFSET);
  100. }
  101. static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
  102. bool arm, bool clear_int, u16 num_popped)
  103. {
  104. u32 val = 0;
  105. val |= qid & DB_EQ_RING_ID_MASK;
  106. if (arm)
  107. val |= 1 << DB_EQ_REARM_SHIFT;
  108. if (clear_int)
  109. val |= 1 << DB_EQ_CLR_SHIFT;
  110. val |= 1 << DB_EQ_EVNT_SHIFT;
  111. val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
  112. iowrite32(val, ctrl->db + DB_EQ_OFFSET);
  113. }
  114. static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
  115. bool arm, u16 num_popped)
  116. {
  117. u32 val = 0;
  118. val |= qid & DB_CQ_RING_ID_MASK;
  119. if (arm)
  120. val |= 1 << DB_CQ_REARM_SHIFT;
  121. val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
  122. iowrite32(val, ctrl->db + DB_CQ_OFFSET);
  123. }
  124. static int be_mac_addr_set(struct net_device *netdev, void *p)
  125. {
  126. struct be_adapter *adapter = netdev_priv(netdev);
  127. struct sockaddr *addr = p;
  128. int status = 0;
  129. if (netif_running(netdev)) {
  130. status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle,
  131. adapter->pmac_id);
  132. if (status)
  133. return status;
  134. status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data,
  135. adapter->if_handle, &adapter->pmac_id);
  136. }
  137. if (!status)
  138. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  139. return status;
  140. }
  141. static void netdev_stats_update(struct be_adapter *adapter)
  142. {
  143. struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
  144. struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
  145. struct be_port_rxf_stats *port_stats =
  146. &rxf_stats->port[adapter->port_num];
  147. struct net_device_stats *dev_stats = &adapter->stats.net_stats;
  148. dev_stats->rx_packets = port_stats->rx_total_frames;
  149. dev_stats->tx_packets = port_stats->tx_unicastframes +
  150. port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
  151. dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
  152. (u64) port_stats->rx_bytes_lsd;
  153. dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
  154. (u64) port_stats->tx_bytes_lsd;
  155. /* bad pkts received */
  156. dev_stats->rx_errors = port_stats->rx_crc_errors +
  157. port_stats->rx_alignment_symbol_errors +
  158. port_stats->rx_in_range_errors +
  159. port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
  160. /* packet transmit problems */
  161. dev_stats->tx_errors = 0;
  162. /* no space in linux buffers */
  163. dev_stats->rx_dropped = 0;
  164. /* no space available in linux */
  165. dev_stats->tx_dropped = 0;
  166. dev_stats->multicast = port_stats->tx_multicastframes;
  167. dev_stats->collisions = 0;
  168. /* detailed rx errors */
  169. dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
  170. port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
  171. /* receive ring buffer overflow */
  172. dev_stats->rx_over_errors = 0;
  173. dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
  174. /* frame alignment errors */
  175. dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
  176. /* receiver fifo overrun */
  177. /* drops_no_pbuf is no per i/f, it's per BE card */
  178. dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
  179. port_stats->rx_input_fifo_overflow +
  180. rxf_stats->rx_drops_no_pbuf;
  181. /* receiver missed packetd */
  182. dev_stats->rx_missed_errors = 0;
  183. /* detailed tx_errors */
  184. dev_stats->tx_aborted_errors = 0;
  185. dev_stats->tx_carrier_errors = 0;
  186. dev_stats->tx_fifo_errors = 0;
  187. dev_stats->tx_heartbeat_errors = 0;
  188. dev_stats->tx_window_errors = 0;
  189. }
  190. static void be_link_status_update(struct be_adapter *adapter)
  191. {
  192. struct be_link_info *prev = &adapter->link;
  193. struct be_link_info now = { 0 };
  194. struct net_device *netdev = adapter->netdev;
  195. be_cmd_link_status_query(&adapter->ctrl, &now);
  196. /* If link came up or went down */
  197. if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
  198. prev->speed == PHY_LINK_SPEED_ZERO)) {
  199. if (now.speed == PHY_LINK_SPEED_ZERO) {
  200. netif_stop_queue(netdev);
  201. netif_carrier_off(netdev);
  202. printk(KERN_INFO "%s: Link down\n", netdev->name);
  203. } else {
  204. netif_start_queue(netdev);
  205. netif_carrier_on(netdev);
  206. printk(KERN_INFO "%s: Link up\n", netdev->name);
  207. }
  208. }
  209. *prev = now;
  210. }
  211. /* Update the EQ delay n BE based on the RX frags consumed / sec */
  212. static void be_rx_eqd_update(struct be_adapter *adapter)
  213. {
  214. struct be_ctrl_info *ctrl = &adapter->ctrl;
  215. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  216. struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
  217. ulong now = jiffies;
  218. u32 eqd;
  219. if (!rx_eq->enable_aic)
  220. return;
  221. /* Wrapped around */
  222. if (time_before(now, stats->rx_fps_jiffies)) {
  223. stats->rx_fps_jiffies = now;
  224. return;
  225. }
  226. /* Update once a second */
  227. if ((now - stats->rx_fps_jiffies) < HZ)
  228. return;
  229. stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
  230. ((now - stats->rx_fps_jiffies) / HZ);
  231. stats->rx_fps_jiffies = now;
  232. stats->be_prev_rx_frags = stats->be_rx_frags;
  233. eqd = stats->be_rx_fps / 110000;
  234. eqd = eqd << 3;
  235. if (eqd > rx_eq->max_eqd)
  236. eqd = rx_eq->max_eqd;
  237. if (eqd < rx_eq->min_eqd)
  238. eqd = rx_eq->min_eqd;
  239. if (eqd < 10)
  240. eqd = 0;
  241. if (eqd != rx_eq->cur_eqd)
  242. be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd);
  243. rx_eq->cur_eqd = eqd;
  244. }
  245. static struct net_device_stats *be_get_stats(struct net_device *dev)
  246. {
  247. struct be_adapter *adapter = netdev_priv(dev);
  248. return &adapter->stats.net_stats;
  249. }
  250. static u32 be_calc_rate(u64 bytes, unsigned long ticks)
  251. {
  252. u64 rate = bytes;
  253. do_div(rate, ticks / HZ);
  254. rate <<= 3; /* bytes/sec -> bits/sec */
  255. do_div(rate, 1000000ul); /* MB/Sec */
  256. return rate;
  257. }
  258. static void be_tx_rate_update(struct be_adapter *adapter)
  259. {
  260. struct be_drvr_stats *stats = drvr_stats(adapter);
  261. ulong now = jiffies;
  262. /* Wrapped around? */
  263. if (time_before(now, stats->be_tx_jiffies)) {
  264. stats->be_tx_jiffies = now;
  265. return;
  266. }
  267. /* Update tx rate once in two seconds */
  268. if ((now - stats->be_tx_jiffies) > 2 * HZ) {
  269. stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
  270. - stats->be_tx_bytes_prev,
  271. now - stats->be_tx_jiffies);
  272. stats->be_tx_jiffies = now;
  273. stats->be_tx_bytes_prev = stats->be_tx_bytes;
  274. }
  275. }
  276. static void be_tx_stats_update(struct be_adapter *adapter,
  277. u32 wrb_cnt, u32 copied, bool stopped)
  278. {
  279. struct be_drvr_stats *stats = drvr_stats(adapter);
  280. stats->be_tx_reqs++;
  281. stats->be_tx_wrbs += wrb_cnt;
  282. stats->be_tx_bytes += copied;
  283. if (stopped)
  284. stats->be_tx_stops++;
  285. }
  286. /* Determine number of WRB entries needed to xmit data in an skb */
  287. static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
  288. {
  289. int cnt = 0;
  290. while (skb) {
  291. if (skb->len > skb->data_len)
  292. cnt++;
  293. cnt += skb_shinfo(skb)->nr_frags;
  294. skb = skb_shinfo(skb)->frag_list;
  295. }
  296. /* to account for hdr wrb */
  297. cnt++;
  298. if (cnt & 1) {
  299. /* add a dummy to make it an even num */
  300. cnt++;
  301. *dummy = true;
  302. } else
  303. *dummy = false;
  304. BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
  305. return cnt;
  306. }
  307. static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
  308. {
  309. wrb->frag_pa_hi = upper_32_bits(addr);
  310. wrb->frag_pa_lo = addr & 0xFFFFFFFF;
  311. wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
  312. }
  313. static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
  314. bool vlan, u32 wrb_cnt, u32 len)
  315. {
  316. memset(hdr, 0, sizeof(*hdr));
  317. AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
  318. if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
  319. AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
  320. AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
  321. hdr, skb_shinfo(skb)->gso_size);
  322. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  323. if (is_tcp_pkt(skb))
  324. AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
  325. else if (is_udp_pkt(skb))
  326. AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
  327. }
  328. if (vlan && vlan_tx_tag_present(skb)) {
  329. AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
  330. AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
  331. hdr, vlan_tx_tag_get(skb));
  332. }
  333. AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
  334. AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
  335. AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
  336. AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
  337. }
  338. static int make_tx_wrbs(struct be_adapter *adapter,
  339. struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
  340. {
  341. u64 busaddr;
  342. u32 i, copied = 0;
  343. struct pci_dev *pdev = adapter->pdev;
  344. struct sk_buff *first_skb = skb;
  345. struct be_queue_info *txq = &adapter->tx_obj.q;
  346. struct be_eth_wrb *wrb;
  347. struct be_eth_hdr_wrb *hdr;
  348. atomic_add(wrb_cnt, &txq->used);
  349. hdr = queue_head_node(txq);
  350. queue_head_inc(txq);
  351. while (skb) {
  352. if (skb->len > skb->data_len) {
  353. int len = skb->len - skb->data_len;
  354. busaddr = pci_map_single(pdev, skb->data, len,
  355. PCI_DMA_TODEVICE);
  356. wrb = queue_head_node(txq);
  357. wrb_fill(wrb, busaddr, len);
  358. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  359. queue_head_inc(txq);
  360. copied += len;
  361. }
  362. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  363. struct skb_frag_struct *frag =
  364. &skb_shinfo(skb)->frags[i];
  365. busaddr = pci_map_page(pdev, frag->page,
  366. frag->page_offset,
  367. frag->size, PCI_DMA_TODEVICE);
  368. wrb = queue_head_node(txq);
  369. wrb_fill(wrb, busaddr, frag->size);
  370. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  371. queue_head_inc(txq);
  372. copied += frag->size;
  373. }
  374. skb = skb_shinfo(skb)->frag_list;
  375. }
  376. if (dummy_wrb) {
  377. wrb = queue_head_node(txq);
  378. wrb_fill(wrb, 0, 0);
  379. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  380. queue_head_inc(txq);
  381. }
  382. wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
  383. wrb_cnt, copied);
  384. be_dws_cpu_to_le(hdr, sizeof(*hdr));
  385. return copied;
  386. }
  387. static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
  388. {
  389. struct be_adapter *adapter = netdev_priv(netdev);
  390. struct be_tx_obj *tx_obj = &adapter->tx_obj;
  391. struct be_queue_info *txq = &tx_obj->q;
  392. u32 wrb_cnt = 0, copied = 0;
  393. u32 start = txq->head;
  394. bool dummy_wrb, stopped = false;
  395. wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
  396. copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
  397. /* record the sent skb in the sent_skb table */
  398. BUG_ON(tx_obj->sent_skb_list[start]);
  399. tx_obj->sent_skb_list[start] = skb;
  400. /* Ensure that txq has space for the next skb; Else stop the queue
  401. * *BEFORE* ringing the tx doorbell, so that we serialze the
  402. * tx compls of the current transmit which'll wake up the queue
  403. */
  404. if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
  405. netif_stop_queue(netdev);
  406. stopped = true;
  407. }
  408. be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
  409. be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
  410. return NETDEV_TX_OK;
  411. }
  412. static int be_change_mtu(struct net_device *netdev, int new_mtu)
  413. {
  414. struct be_adapter *adapter = netdev_priv(netdev);
  415. if (new_mtu < BE_MIN_MTU ||
  416. new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
  417. dev_info(&adapter->pdev->dev,
  418. "MTU must be between %d and %d bytes\n",
  419. BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
  420. return -EINVAL;
  421. }
  422. dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
  423. netdev->mtu, new_mtu);
  424. netdev->mtu = new_mtu;
  425. return 0;
  426. }
  427. /*
  428. * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
  429. * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
  430. * set the BE in promiscuous VLAN mode.
  431. */
  432. static void be_vid_config(struct net_device *netdev)
  433. {
  434. struct be_adapter *adapter = netdev_priv(netdev);
  435. u16 vtag[BE_NUM_VLANS_SUPPORTED];
  436. u16 ntags = 0, i;
  437. if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
  438. /* Construct VLAN Table to give to HW */
  439. for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
  440. if (adapter->vlan_tag[i]) {
  441. vtag[ntags] = cpu_to_le16(i);
  442. ntags++;
  443. }
  444. }
  445. be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
  446. vtag, ntags, 1, 0);
  447. } else {
  448. be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
  449. NULL, 0, 1, 1);
  450. }
  451. }
  452. static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
  453. {
  454. struct be_adapter *adapter = netdev_priv(netdev);
  455. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  456. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  457. struct be_ctrl_info *ctrl = &adapter->ctrl;
  458. be_eq_notify(ctrl, rx_eq->q.id, false, false, 0);
  459. be_eq_notify(ctrl, tx_eq->q.id, false, false, 0);
  460. adapter->vlan_grp = grp;
  461. be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
  462. be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
  463. }
  464. static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
  465. {
  466. struct be_adapter *adapter = netdev_priv(netdev);
  467. adapter->num_vlans++;
  468. adapter->vlan_tag[vid] = 1;
  469. be_vid_config(netdev);
  470. }
  471. static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
  472. {
  473. struct be_adapter *adapter = netdev_priv(netdev);
  474. adapter->num_vlans--;
  475. adapter->vlan_tag[vid] = 0;
  476. vlan_group_set_device(adapter->vlan_grp, vid, NULL);
  477. be_vid_config(netdev);
  478. }
  479. static void be_set_multicast_filter(struct net_device *netdev)
  480. {
  481. struct be_adapter *adapter = netdev_priv(netdev);
  482. struct dev_mc_list *mc_ptr;
  483. u8 mac_addr[32][ETH_ALEN];
  484. int i = 0;
  485. if (netdev->flags & IFF_ALLMULTI) {
  486. /* set BE in Multicast promiscuous */
  487. be_cmd_mcast_mac_set(&adapter->ctrl,
  488. adapter->if_handle, NULL, 0, true);
  489. return;
  490. }
  491. for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
  492. memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
  493. if (++i >= 32) {
  494. be_cmd_mcast_mac_set(&adapter->ctrl,
  495. adapter->if_handle, &mac_addr[0][0], i, false);
  496. i = 0;
  497. }
  498. }
  499. if (i) {
  500. /* reset the promiscuous mode also. */
  501. be_cmd_mcast_mac_set(&adapter->ctrl,
  502. adapter->if_handle, &mac_addr[0][0], i, false);
  503. }
  504. }
  505. static void be_set_multicast_list(struct net_device *netdev)
  506. {
  507. struct be_adapter *adapter = netdev_priv(netdev);
  508. if (netdev->flags & IFF_PROMISC) {
  509. be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
  510. } else {
  511. be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
  512. be_set_multicast_filter(netdev);
  513. }
  514. }
  515. static void be_rx_rate_update(struct be_adapter *adapter)
  516. {
  517. struct be_drvr_stats *stats = drvr_stats(adapter);
  518. ulong now = jiffies;
  519. /* Wrapped around */
  520. if (time_before(now, stats->be_rx_jiffies)) {
  521. stats->be_rx_jiffies = now;
  522. return;
  523. }
  524. /* Update the rate once in two seconds */
  525. if ((now - stats->be_rx_jiffies) < 2 * HZ)
  526. return;
  527. stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
  528. - stats->be_rx_bytes_prev,
  529. now - stats->be_rx_jiffies);
  530. stats->be_rx_jiffies = now;
  531. stats->be_rx_bytes_prev = stats->be_rx_bytes;
  532. }
  533. static void be_rx_stats_update(struct be_adapter *adapter,
  534. u32 pktsize, u16 numfrags)
  535. {
  536. struct be_drvr_stats *stats = drvr_stats(adapter);
  537. stats->be_rx_compl++;
  538. stats->be_rx_frags += numfrags;
  539. stats->be_rx_bytes += pktsize;
  540. }
  541. static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
  542. {
  543. u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
  544. l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
  545. ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
  546. ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
  547. if (ip_version) {
  548. tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
  549. udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
  550. }
  551. ipv6_chk = (ip_version && (tcpf || udpf));
  552. return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
  553. }
  554. static struct be_rx_page_info *
  555. get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
  556. {
  557. struct be_rx_page_info *rx_page_info;
  558. struct be_queue_info *rxq = &adapter->rx_obj.q;
  559. rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
  560. BUG_ON(!rx_page_info->page);
  561. if (rx_page_info->last_page_user)
  562. pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
  563. adapter->big_page_size, PCI_DMA_FROMDEVICE);
  564. atomic_dec(&rxq->used);
  565. return rx_page_info;
  566. }
  567. /* Throwaway the data in the Rx completion */
  568. static void be_rx_compl_discard(struct be_adapter *adapter,
  569. struct be_eth_rx_compl *rxcp)
  570. {
  571. struct be_queue_info *rxq = &adapter->rx_obj.q;
  572. struct be_rx_page_info *page_info;
  573. u16 rxq_idx, i, num_rcvd;
  574. rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
  575. num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
  576. for (i = 0; i < num_rcvd; i++) {
  577. page_info = get_rx_page_info(adapter, rxq_idx);
  578. put_page(page_info->page);
  579. memset(page_info, 0, sizeof(*page_info));
  580. index_inc(&rxq_idx, rxq->len);
  581. }
  582. }
  583. /*
  584. * skb_fill_rx_data forms a complete skb for an ether frame
  585. * indicated by rxcp.
  586. */
  587. static void skb_fill_rx_data(struct be_adapter *adapter,
  588. struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
  589. {
  590. struct be_queue_info *rxq = &adapter->rx_obj.q;
  591. struct be_rx_page_info *page_info;
  592. u16 rxq_idx, i, num_rcvd;
  593. u32 pktsize, hdr_len, curr_frag_len;
  594. u8 *start;
  595. rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
  596. pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
  597. num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
  598. page_info = get_rx_page_info(adapter, rxq_idx);
  599. start = page_address(page_info->page) + page_info->page_offset;
  600. prefetch(start);
  601. /* Copy data in the first descriptor of this completion */
  602. curr_frag_len = min(pktsize, rx_frag_size);
  603. /* Copy the header portion into skb_data */
  604. hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
  605. memcpy(skb->data, start, hdr_len);
  606. skb->len = curr_frag_len;
  607. if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
  608. /* Complete packet has now been moved to data */
  609. put_page(page_info->page);
  610. skb->data_len = 0;
  611. skb->tail += curr_frag_len;
  612. } else {
  613. skb_shinfo(skb)->nr_frags = 1;
  614. skb_shinfo(skb)->frags[0].page = page_info->page;
  615. skb_shinfo(skb)->frags[0].page_offset =
  616. page_info->page_offset + hdr_len;
  617. skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
  618. skb->data_len = curr_frag_len - hdr_len;
  619. skb->tail += hdr_len;
  620. }
  621. memset(page_info, 0, sizeof(*page_info));
  622. if (pktsize <= rx_frag_size) {
  623. BUG_ON(num_rcvd != 1);
  624. return;
  625. }
  626. /* More frags present for this completion */
  627. pktsize -= curr_frag_len; /* account for above copied frag */
  628. for (i = 1; i < num_rcvd; i++) {
  629. index_inc(&rxq_idx, rxq->len);
  630. page_info = get_rx_page_info(adapter, rxq_idx);
  631. curr_frag_len = min(pktsize, rx_frag_size);
  632. skb_shinfo(skb)->frags[i].page = page_info->page;
  633. skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
  634. skb_shinfo(skb)->frags[i].size = curr_frag_len;
  635. skb->len += curr_frag_len;
  636. skb->data_len += curr_frag_len;
  637. skb_shinfo(skb)->nr_frags++;
  638. pktsize -= curr_frag_len;
  639. memset(page_info, 0, sizeof(*page_info));
  640. }
  641. be_rx_stats_update(adapter, pktsize, num_rcvd);
  642. return;
  643. }
  644. /* Process the RX completion indicated by rxcp when LRO is disabled */
  645. static void be_rx_compl_process(struct be_adapter *adapter,
  646. struct be_eth_rx_compl *rxcp)
  647. {
  648. struct sk_buff *skb;
  649. u32 vtp, vid;
  650. vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
  651. skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
  652. if (!skb) {
  653. if (net_ratelimit())
  654. dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
  655. be_rx_compl_discard(adapter, rxcp);
  656. return;
  657. }
  658. skb_reserve(skb, NET_IP_ALIGN);
  659. skb_fill_rx_data(adapter, skb, rxcp);
  660. if (do_pkt_csum(rxcp, adapter->rx_csum))
  661. skb->ip_summed = CHECKSUM_NONE;
  662. else
  663. skb->ip_summed = CHECKSUM_UNNECESSARY;
  664. skb->truesize = skb->len + sizeof(struct sk_buff);
  665. skb->protocol = eth_type_trans(skb, adapter->netdev);
  666. skb->dev = adapter->netdev;
  667. if (vtp) {
  668. if (!adapter->vlan_grp || adapter->num_vlans == 0) {
  669. kfree_skb(skb);
  670. return;
  671. }
  672. vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
  673. vid = be16_to_cpu(vid);
  674. vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
  675. } else {
  676. netif_receive_skb(skb);
  677. }
  678. adapter->netdev->last_rx = jiffies;
  679. return;
  680. }
  681. /* Process the RX completion indicated by rxcp when LRO is enabled */
  682. static void be_rx_compl_process_lro(struct be_adapter *adapter,
  683. struct be_eth_rx_compl *rxcp)
  684. {
  685. struct be_rx_page_info *page_info;
  686. struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
  687. struct be_queue_info *rxq = &adapter->rx_obj.q;
  688. u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
  689. u16 i, rxq_idx = 0, vid;
  690. num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
  691. pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
  692. vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
  693. rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
  694. remaining = pkt_size;
  695. for (i = 0; i < num_rcvd; i++) {
  696. page_info = get_rx_page_info(adapter, rxq_idx);
  697. curr_frag_len = min(remaining, rx_frag_size);
  698. rx_frags[i].page = page_info->page;
  699. rx_frags[i].page_offset = page_info->page_offset;
  700. rx_frags[i].size = curr_frag_len;
  701. remaining -= curr_frag_len;
  702. index_inc(&rxq_idx, rxq->len);
  703. memset(page_info, 0, sizeof(*page_info));
  704. }
  705. if (likely(!vlanf)) {
  706. lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
  707. pkt_size, NULL, 0);
  708. } else {
  709. vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
  710. vid = be16_to_cpu(vid);
  711. if (!adapter->vlan_grp || adapter->num_vlans == 0)
  712. return;
  713. lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr,
  714. rx_frags, pkt_size, pkt_size, adapter->vlan_grp,
  715. vid, NULL, 0);
  716. }
  717. be_rx_stats_update(adapter, pkt_size, num_rcvd);
  718. return;
  719. }
  720. static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
  721. {
  722. struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
  723. if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
  724. return NULL;
  725. be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
  726. rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
  727. queue_tail_inc(&adapter->rx_obj.cq);
  728. return rxcp;
  729. }
  730. static inline struct page *be_alloc_pages(u32 size)
  731. {
  732. gfp_t alloc_flags = GFP_ATOMIC;
  733. u32 order = get_order(size);
  734. if (order > 0)
  735. alloc_flags |= __GFP_COMP;
  736. return alloc_pages(alloc_flags, order);
  737. }
  738. /*
  739. * Allocate a page, split it to fragments of size rx_frag_size and post as
  740. * receive buffers to BE
  741. */
  742. static void be_post_rx_frags(struct be_adapter *adapter)
  743. {
  744. struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
  745. struct be_rx_page_info *page_info = NULL;
  746. struct be_queue_info *rxq = &adapter->rx_obj.q;
  747. struct page *pagep = NULL;
  748. struct be_eth_rx_d *rxd;
  749. u64 page_dmaaddr = 0, frag_dmaaddr;
  750. u32 posted, page_offset = 0;
  751. page_info = &page_info_tbl[rxq->head];
  752. for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
  753. if (!pagep) {
  754. pagep = be_alloc_pages(adapter->big_page_size);
  755. if (unlikely(!pagep)) {
  756. drvr_stats(adapter)->be_ethrx_post_fail++;
  757. break;
  758. }
  759. page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
  760. adapter->big_page_size,
  761. PCI_DMA_FROMDEVICE);
  762. page_info->page_offset = 0;
  763. } else {
  764. get_page(pagep);
  765. page_info->page_offset = page_offset + rx_frag_size;
  766. }
  767. page_offset = page_info->page_offset;
  768. page_info->page = pagep;
  769. pci_unmap_addr_set(page_info, bus, page_dmaaddr);
  770. frag_dmaaddr = page_dmaaddr + page_info->page_offset;
  771. rxd = queue_head_node(rxq);
  772. rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
  773. rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
  774. queue_head_inc(rxq);
  775. /* Any space left in the current big page for another frag? */
  776. if ((page_offset + rx_frag_size + rx_frag_size) >
  777. adapter->big_page_size) {
  778. pagep = NULL;
  779. page_info->last_page_user = true;
  780. }
  781. page_info = &page_info_tbl[rxq->head];
  782. }
  783. if (pagep)
  784. page_info->last_page_user = true;
  785. if (posted) {
  786. atomic_add(posted, &rxq->used);
  787. be_rxq_notify(&adapter->ctrl, rxq->id, posted);
  788. } else if (atomic_read(&rxq->used) == 0) {
  789. /* Let be_worker replenish when memory is available */
  790. adapter->rx_post_starved = true;
  791. }
  792. return;
  793. }
  794. static struct be_eth_tx_compl *
  795. be_tx_compl_get(struct be_adapter *adapter)
  796. {
  797. struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
  798. struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
  799. if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
  800. return NULL;
  801. be_dws_le_to_cpu(txcp, sizeof(*txcp));
  802. txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
  803. queue_tail_inc(tx_cq);
  804. return txcp;
  805. }
  806. static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
  807. {
  808. struct be_queue_info *txq = &adapter->tx_obj.q;
  809. struct be_eth_wrb *wrb;
  810. struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
  811. struct sk_buff *sent_skb;
  812. u64 busaddr;
  813. u16 cur_index, num_wrbs = 0;
  814. cur_index = txq->tail;
  815. sent_skb = sent_skbs[cur_index];
  816. BUG_ON(!sent_skb);
  817. sent_skbs[cur_index] = NULL;
  818. do {
  819. cur_index = txq->tail;
  820. wrb = queue_tail_node(txq);
  821. be_dws_le_to_cpu(wrb, sizeof(*wrb));
  822. busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
  823. if (busaddr != 0) {
  824. pci_unmap_single(adapter->pdev, busaddr,
  825. wrb->frag_len, PCI_DMA_TODEVICE);
  826. }
  827. num_wrbs++;
  828. queue_tail_inc(txq);
  829. } while (cur_index != last_index);
  830. atomic_sub(num_wrbs, &txq->used);
  831. kfree_skb(sent_skb);
  832. }
  833. static void be_rx_q_clean(struct be_adapter *adapter)
  834. {
  835. struct be_rx_page_info *page_info;
  836. struct be_queue_info *rxq = &adapter->rx_obj.q;
  837. struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
  838. struct be_eth_rx_compl *rxcp;
  839. u16 tail;
  840. /* First cleanup pending rx completions */
  841. while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
  842. be_rx_compl_discard(adapter, rxcp);
  843. be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
  844. }
  845. /* Then free posted rx buffer that were not used */
  846. tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
  847. for (; tail != rxq->head; index_inc(&tail, rxq->len)) {
  848. page_info = get_rx_page_info(adapter, tail);
  849. put_page(page_info->page);
  850. memset(page_info, 0, sizeof(*page_info));
  851. }
  852. BUG_ON(atomic_read(&rxq->used));
  853. }
  854. static void be_tx_q_clean(struct be_adapter *adapter)
  855. {
  856. struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
  857. struct sk_buff *sent_skb;
  858. struct be_queue_info *txq = &adapter->tx_obj.q;
  859. u16 last_index;
  860. bool dummy_wrb;
  861. while (atomic_read(&txq->used)) {
  862. sent_skb = sent_skbs[txq->tail];
  863. last_index = txq->tail;
  864. index_adv(&last_index,
  865. wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
  866. be_tx_compl_process(adapter, last_index);
  867. }
  868. }
  869. static void be_tx_queues_destroy(struct be_adapter *adapter)
  870. {
  871. struct be_queue_info *q;
  872. q = &adapter->tx_obj.q;
  873. if (q->created)
  874. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
  875. be_queue_free(adapter, q);
  876. q = &adapter->tx_obj.cq;
  877. if (q->created)
  878. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
  879. be_queue_free(adapter, q);
  880. /* No more tx completions can be rcvd now; clean up if there are
  881. * any pending completions or pending tx requests */
  882. be_tx_q_clean(adapter);
  883. q = &adapter->tx_eq.q;
  884. if (q->created)
  885. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
  886. be_queue_free(adapter, q);
  887. }
  888. static int be_tx_queues_create(struct be_adapter *adapter)
  889. {
  890. struct be_queue_info *eq, *q, *cq;
  891. adapter->tx_eq.max_eqd = 0;
  892. adapter->tx_eq.min_eqd = 0;
  893. adapter->tx_eq.cur_eqd = 96;
  894. adapter->tx_eq.enable_aic = false;
  895. /* Alloc Tx Event queue */
  896. eq = &adapter->tx_eq.q;
  897. if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
  898. return -1;
  899. /* Ask BE to create Tx Event queue */
  900. if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd))
  901. goto tx_eq_free;
  902. /* Alloc TX eth compl queue */
  903. cq = &adapter->tx_obj.cq;
  904. if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
  905. sizeof(struct be_eth_tx_compl)))
  906. goto tx_eq_destroy;
  907. /* Ask BE to create Tx eth compl queue */
  908. if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3))
  909. goto tx_cq_free;
  910. /* Alloc TX eth queue */
  911. q = &adapter->tx_obj.q;
  912. if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
  913. goto tx_cq_destroy;
  914. /* Ask BE to create Tx eth queue */
  915. if (be_cmd_txq_create(&adapter->ctrl, q, cq))
  916. goto tx_q_free;
  917. return 0;
  918. tx_q_free:
  919. be_queue_free(adapter, q);
  920. tx_cq_destroy:
  921. be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
  922. tx_cq_free:
  923. be_queue_free(adapter, cq);
  924. tx_eq_destroy:
  925. be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
  926. tx_eq_free:
  927. be_queue_free(adapter, eq);
  928. return -1;
  929. }
  930. static void be_rx_queues_destroy(struct be_adapter *adapter)
  931. {
  932. struct be_queue_info *q;
  933. q = &adapter->rx_obj.q;
  934. if (q->created) {
  935. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ);
  936. be_rx_q_clean(adapter);
  937. }
  938. be_queue_free(adapter, q);
  939. q = &adapter->rx_obj.cq;
  940. if (q->created)
  941. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
  942. be_queue_free(adapter, q);
  943. q = &adapter->rx_eq.q;
  944. if (q->created)
  945. be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
  946. be_queue_free(adapter, q);
  947. }
  948. static int be_rx_queues_create(struct be_adapter *adapter)
  949. {
  950. struct be_queue_info *eq, *q, *cq;
  951. int rc;
  952. adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
  953. adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
  954. adapter->rx_eq.max_eqd = BE_MAX_EQD;
  955. adapter->rx_eq.min_eqd = 0;
  956. adapter->rx_eq.cur_eqd = 0;
  957. adapter->rx_eq.enable_aic = true;
  958. /* Alloc Rx Event queue */
  959. eq = &adapter->rx_eq.q;
  960. rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
  961. sizeof(struct be_eq_entry));
  962. if (rc)
  963. return rc;
  964. /* Ask BE to create Rx Event queue */
  965. rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd);
  966. if (rc)
  967. goto rx_eq_free;
  968. /* Alloc RX eth compl queue */
  969. cq = &adapter->rx_obj.cq;
  970. rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
  971. sizeof(struct be_eth_rx_compl));
  972. if (rc)
  973. goto rx_eq_destroy;
  974. /* Ask BE to create Rx eth compl queue */
  975. rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3);
  976. if (rc)
  977. goto rx_cq_free;
  978. /* Alloc RX eth queue */
  979. q = &adapter->rx_obj.q;
  980. rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
  981. if (rc)
  982. goto rx_cq_destroy;
  983. /* Ask BE to create Rx eth queue */
  984. rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size,
  985. BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
  986. if (rc)
  987. goto rx_q_free;
  988. return 0;
  989. rx_q_free:
  990. be_queue_free(adapter, q);
  991. rx_cq_destroy:
  992. be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
  993. rx_cq_free:
  994. be_queue_free(adapter, cq);
  995. rx_eq_destroy:
  996. be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
  997. rx_eq_free:
  998. be_queue_free(adapter, eq);
  999. return rc;
  1000. }
  1001. static bool event_get(struct be_eq_obj *eq_obj, u16 *rid)
  1002. {
  1003. struct be_eq_entry *entry = queue_tail_node(&eq_obj->q);
  1004. u32 evt = entry->evt;
  1005. if (!evt)
  1006. return false;
  1007. evt = le32_to_cpu(evt);
  1008. *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK;
  1009. entry->evt = 0;
  1010. queue_tail_inc(&eq_obj->q);
  1011. return true;
  1012. }
  1013. static int event_handle(struct be_ctrl_info *ctrl,
  1014. struct be_eq_obj *eq_obj)
  1015. {
  1016. u16 rid = 0, num = 0;
  1017. while (event_get(eq_obj, &rid))
  1018. num++;
  1019. /* We can see an interrupt and no event */
  1020. be_eq_notify(ctrl, eq_obj->q.id, true, true, num);
  1021. if (num)
  1022. napi_schedule(&eq_obj->napi);
  1023. return num;
  1024. }
  1025. static irqreturn_t be_intx(int irq, void *dev)
  1026. {
  1027. struct be_adapter *adapter = dev;
  1028. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1029. int rx, tx;
  1030. tx = event_handle(ctrl, &adapter->tx_eq);
  1031. rx = event_handle(ctrl, &adapter->rx_eq);
  1032. if (rx || tx)
  1033. return IRQ_HANDLED;
  1034. else
  1035. return IRQ_NONE;
  1036. }
  1037. static irqreturn_t be_msix_rx(int irq, void *dev)
  1038. {
  1039. struct be_adapter *adapter = dev;
  1040. event_handle(&adapter->ctrl, &adapter->rx_eq);
  1041. return IRQ_HANDLED;
  1042. }
  1043. static irqreturn_t be_msix_tx(int irq, void *dev)
  1044. {
  1045. struct be_adapter *adapter = dev;
  1046. event_handle(&adapter->ctrl, &adapter->tx_eq);
  1047. return IRQ_HANDLED;
  1048. }
  1049. static inline bool do_lro(struct be_adapter *adapter,
  1050. struct be_eth_rx_compl *rxcp)
  1051. {
  1052. int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
  1053. int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
  1054. if (err)
  1055. drvr_stats(adapter)->be_rxcp_err++;
  1056. return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ?
  1057. false : true;
  1058. }
  1059. int be_poll_rx(struct napi_struct *napi, int budget)
  1060. {
  1061. struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
  1062. struct be_adapter *adapter =
  1063. container_of(rx_eq, struct be_adapter, rx_eq);
  1064. struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
  1065. struct be_eth_rx_compl *rxcp;
  1066. u32 work_done;
  1067. for (work_done = 0; work_done < budget; work_done++) {
  1068. rxcp = be_rx_compl_get(adapter);
  1069. if (!rxcp)
  1070. break;
  1071. if (do_lro(adapter, rxcp))
  1072. be_rx_compl_process_lro(adapter, rxcp);
  1073. else
  1074. be_rx_compl_process(adapter, rxcp);
  1075. }
  1076. lro_flush_all(&adapter->rx_obj.lro_mgr);
  1077. /* Refill the queue */
  1078. if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
  1079. be_post_rx_frags(adapter);
  1080. /* All consumed */
  1081. if (work_done < budget) {
  1082. napi_complete(napi);
  1083. be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done);
  1084. } else {
  1085. /* More to be consumed; continue with interrupts disabled */
  1086. be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done);
  1087. }
  1088. return work_done;
  1089. }
  1090. /* For TX we don't honour budget; consume everything */
  1091. int be_poll_tx(struct napi_struct *napi, int budget)
  1092. {
  1093. struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
  1094. struct be_adapter *adapter =
  1095. container_of(tx_eq, struct be_adapter, tx_eq);
  1096. struct be_tx_obj *tx_obj = &adapter->tx_obj;
  1097. struct be_queue_info *tx_cq = &tx_obj->cq;
  1098. struct be_queue_info *txq = &tx_obj->q;
  1099. struct be_eth_tx_compl *txcp;
  1100. u32 num_cmpl = 0;
  1101. u16 end_idx;
  1102. while ((txcp = be_tx_compl_get(adapter))) {
  1103. end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
  1104. wrb_index, txcp);
  1105. be_tx_compl_process(adapter, end_idx);
  1106. num_cmpl++;
  1107. }
  1108. /* As Tx wrbs have been freed up, wake up netdev queue if
  1109. * it was stopped due to lack of tx wrbs.
  1110. */
  1111. if (netif_queue_stopped(adapter->netdev) &&
  1112. atomic_read(&txq->used) < txq->len / 2) {
  1113. netif_wake_queue(adapter->netdev);
  1114. }
  1115. napi_complete(napi);
  1116. be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
  1117. drvr_stats(adapter)->be_tx_events++;
  1118. drvr_stats(adapter)->be_tx_compl += num_cmpl;
  1119. return 1;
  1120. }
  1121. static void be_worker(struct work_struct *work)
  1122. {
  1123. struct be_adapter *adapter =
  1124. container_of(work, struct be_adapter, work.work);
  1125. int status;
  1126. /* Check link */
  1127. be_link_status_update(adapter);
  1128. /* Get Stats */
  1129. status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
  1130. if (!status)
  1131. netdev_stats_update(adapter);
  1132. /* Set EQ delay */
  1133. be_rx_eqd_update(adapter);
  1134. be_tx_rate_update(adapter);
  1135. be_rx_rate_update(adapter);
  1136. if (adapter->rx_post_starved) {
  1137. adapter->rx_post_starved = false;
  1138. be_post_rx_frags(adapter);
  1139. }
  1140. schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
  1141. }
  1142. static void be_msix_enable(struct be_adapter *adapter)
  1143. {
  1144. int i, status;
  1145. for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
  1146. adapter->msix_entries[i].entry = i;
  1147. status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
  1148. BE_NUM_MSIX_VECTORS);
  1149. if (status == 0)
  1150. adapter->msix_enabled = true;
  1151. return;
  1152. }
  1153. static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
  1154. {
  1155. return adapter->msix_entries[eq_id -
  1156. 8 * adapter->ctrl.pci_func].vector;
  1157. }
  1158. static int be_msix_register(struct be_adapter *adapter)
  1159. {
  1160. struct net_device *netdev = adapter->netdev;
  1161. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  1162. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  1163. int status, vec;
  1164. sprintf(tx_eq->desc, "%s-tx", netdev->name);
  1165. vec = be_msix_vec_get(adapter, tx_eq->q.id);
  1166. status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
  1167. if (status)
  1168. goto err;
  1169. sprintf(rx_eq->desc, "%s-rx", netdev->name);
  1170. vec = be_msix_vec_get(adapter, rx_eq->q.id);
  1171. status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter);
  1172. if (status) { /* Free TX IRQ */
  1173. vec = be_msix_vec_get(adapter, tx_eq->q.id);
  1174. free_irq(vec, adapter);
  1175. goto err;
  1176. }
  1177. return 0;
  1178. err:
  1179. dev_warn(&adapter->pdev->dev,
  1180. "MSIX Request IRQ failed - err %d\n", status);
  1181. pci_disable_msix(adapter->pdev);
  1182. adapter->msix_enabled = false;
  1183. return status;
  1184. }
  1185. static int be_irq_register(struct be_adapter *adapter)
  1186. {
  1187. struct net_device *netdev = adapter->netdev;
  1188. int status;
  1189. if (adapter->msix_enabled) {
  1190. status = be_msix_register(adapter);
  1191. if (status == 0)
  1192. goto done;
  1193. }
  1194. /* INTx */
  1195. netdev->irq = adapter->pdev->irq;
  1196. status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
  1197. adapter);
  1198. if (status) {
  1199. dev_err(&adapter->pdev->dev,
  1200. "INTx request IRQ failed - err %d\n", status);
  1201. return status;
  1202. }
  1203. done:
  1204. adapter->isr_registered = true;
  1205. return 0;
  1206. }
  1207. static void be_irq_unregister(struct be_adapter *adapter)
  1208. {
  1209. struct net_device *netdev = adapter->netdev;
  1210. int vec;
  1211. if (!adapter->isr_registered)
  1212. return;
  1213. /* INTx */
  1214. if (!adapter->msix_enabled) {
  1215. free_irq(netdev->irq, adapter);
  1216. goto done;
  1217. }
  1218. /* MSIx */
  1219. vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id);
  1220. free_irq(vec, adapter);
  1221. vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id);
  1222. free_irq(vec, adapter);
  1223. done:
  1224. adapter->isr_registered = false;
  1225. return;
  1226. }
  1227. static int be_open(struct net_device *netdev)
  1228. {
  1229. struct be_adapter *adapter = netdev_priv(netdev);
  1230. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1231. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  1232. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  1233. u32 if_flags;
  1234. int status;
  1235. if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
  1236. BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
  1237. BE_IF_FLAGS_PASS_L3L4_ERRORS;
  1238. status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr,
  1239. false/* pmac_invalid */, &adapter->if_handle,
  1240. &adapter->pmac_id);
  1241. if (status != 0)
  1242. goto do_none;
  1243. be_vid_config(netdev);
  1244. status = be_cmd_set_flow_control(ctrl, true, true);
  1245. if (status != 0)
  1246. goto if_destroy;
  1247. status = be_tx_queues_create(adapter);
  1248. if (status != 0)
  1249. goto if_destroy;
  1250. status = be_rx_queues_create(adapter);
  1251. if (status != 0)
  1252. goto tx_qs_destroy;
  1253. /* First time posting */
  1254. be_post_rx_frags(adapter);
  1255. napi_enable(&rx_eq->napi);
  1256. napi_enable(&tx_eq->napi);
  1257. be_irq_register(adapter);
  1258. be_intr_set(ctrl, true);
  1259. /* The evt queues are created in the unarmed state; arm them */
  1260. be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
  1261. be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
  1262. /* The compl queues are created in the unarmed state; arm them */
  1263. be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
  1264. be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
  1265. be_link_status_update(adapter);
  1266. schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
  1267. return 0;
  1268. tx_qs_destroy:
  1269. be_tx_queues_destroy(adapter);
  1270. if_destroy:
  1271. be_cmd_if_destroy(ctrl, adapter->if_handle);
  1272. do_none:
  1273. return status;
  1274. }
  1275. static int be_close(struct net_device *netdev)
  1276. {
  1277. struct be_adapter *adapter = netdev_priv(netdev);
  1278. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1279. struct be_eq_obj *rx_eq = &adapter->rx_eq;
  1280. struct be_eq_obj *tx_eq = &adapter->tx_eq;
  1281. int vec;
  1282. cancel_delayed_work(&adapter->work);
  1283. netif_stop_queue(netdev);
  1284. netif_carrier_off(netdev);
  1285. adapter->link.speed = PHY_LINK_SPEED_ZERO;
  1286. be_intr_set(ctrl, false);
  1287. if (adapter->msix_enabled) {
  1288. vec = be_msix_vec_get(adapter, tx_eq->q.id);
  1289. synchronize_irq(vec);
  1290. vec = be_msix_vec_get(adapter, rx_eq->q.id);
  1291. synchronize_irq(vec);
  1292. } else {
  1293. synchronize_irq(netdev->irq);
  1294. }
  1295. be_irq_unregister(adapter);
  1296. napi_disable(&rx_eq->napi);
  1297. napi_disable(&tx_eq->napi);
  1298. be_rx_queues_destroy(adapter);
  1299. be_tx_queues_destroy(adapter);
  1300. be_cmd_if_destroy(ctrl, adapter->if_handle);
  1301. return 0;
  1302. }
  1303. static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
  1304. void **ip_hdr, void **tcpudp_hdr,
  1305. u64 *hdr_flags, void *priv)
  1306. {
  1307. struct ethhdr *eh;
  1308. struct vlan_ethhdr *veh;
  1309. struct iphdr *iph;
  1310. u8 *va = page_address(frag->page) + frag->page_offset;
  1311. unsigned long ll_hlen;
  1312. prefetch(va);
  1313. eh = (struct ethhdr *)va;
  1314. *mac_hdr = eh;
  1315. ll_hlen = ETH_HLEN;
  1316. if (eh->h_proto != htons(ETH_P_IP)) {
  1317. if (eh->h_proto == htons(ETH_P_8021Q)) {
  1318. veh = (struct vlan_ethhdr *)va;
  1319. if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
  1320. return -1;
  1321. ll_hlen += VLAN_HLEN;
  1322. } else {
  1323. return -1;
  1324. }
  1325. }
  1326. *hdr_flags = LRO_IPV4;
  1327. iph = (struct iphdr *)(va + ll_hlen);
  1328. *ip_hdr = iph;
  1329. if (iph->protocol != IPPROTO_TCP)
  1330. return -1;
  1331. *hdr_flags |= LRO_TCP;
  1332. *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
  1333. return 0;
  1334. }
  1335. static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
  1336. {
  1337. struct net_lro_mgr *lro_mgr;
  1338. lro_mgr = &adapter->rx_obj.lro_mgr;
  1339. lro_mgr->dev = netdev;
  1340. lro_mgr->features = LRO_F_NAPI;
  1341. lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
  1342. lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
  1343. lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
  1344. lro_mgr->lro_arr = adapter->rx_obj.lro_desc;
  1345. lro_mgr->get_frag_header = be_get_frag_header;
  1346. lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
  1347. }
  1348. static struct net_device_ops be_netdev_ops = {
  1349. .ndo_open = be_open,
  1350. .ndo_stop = be_close,
  1351. .ndo_start_xmit = be_xmit,
  1352. .ndo_get_stats = be_get_stats,
  1353. .ndo_set_rx_mode = be_set_multicast_list,
  1354. .ndo_set_mac_address = be_mac_addr_set,
  1355. .ndo_change_mtu = be_change_mtu,
  1356. .ndo_validate_addr = eth_validate_addr,
  1357. .ndo_vlan_rx_register = be_vlan_register,
  1358. .ndo_vlan_rx_add_vid = be_vlan_add_vid,
  1359. .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
  1360. };
  1361. static void be_netdev_init(struct net_device *netdev)
  1362. {
  1363. struct be_adapter *adapter = netdev_priv(netdev);
  1364. netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
  1365. NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
  1366. NETIF_F_IPV6_CSUM;
  1367. netdev->flags |= IFF_MULTICAST;
  1368. adapter->rx_csum = true;
  1369. BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
  1370. SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
  1371. be_lro_init(adapter, netdev);
  1372. netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
  1373. BE_NAPI_WEIGHT);
  1374. netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
  1375. BE_NAPI_WEIGHT);
  1376. netif_carrier_off(netdev);
  1377. netif_stop_queue(netdev);
  1378. }
  1379. static void be_unmap_pci_bars(struct be_adapter *adapter)
  1380. {
  1381. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1382. if (ctrl->csr)
  1383. iounmap(ctrl->csr);
  1384. if (ctrl->db)
  1385. iounmap(ctrl->db);
  1386. if (ctrl->pcicfg)
  1387. iounmap(ctrl->pcicfg);
  1388. }
  1389. static int be_map_pci_bars(struct be_adapter *adapter)
  1390. {
  1391. u8 __iomem *addr;
  1392. addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
  1393. pci_resource_len(adapter->pdev, 2));
  1394. if (addr == NULL)
  1395. return -ENOMEM;
  1396. adapter->ctrl.csr = addr;
  1397. addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
  1398. 128 * 1024);
  1399. if (addr == NULL)
  1400. goto pci_map_err;
  1401. adapter->ctrl.db = addr;
  1402. addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
  1403. pci_resource_len(adapter->pdev, 1));
  1404. if (addr == NULL)
  1405. goto pci_map_err;
  1406. adapter->ctrl.pcicfg = addr;
  1407. return 0;
  1408. pci_map_err:
  1409. be_unmap_pci_bars(adapter);
  1410. return -ENOMEM;
  1411. }
  1412. static void be_ctrl_cleanup(struct be_adapter *adapter)
  1413. {
  1414. struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced;
  1415. be_unmap_pci_bars(adapter);
  1416. if (mem->va)
  1417. pci_free_consistent(adapter->pdev, mem->size,
  1418. mem->va, mem->dma);
  1419. }
  1420. /* Initialize the mbox required to send cmds to BE */
  1421. static int be_ctrl_init(struct be_adapter *adapter)
  1422. {
  1423. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1424. struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
  1425. struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
  1426. int status;
  1427. u32 val;
  1428. status = be_map_pci_bars(adapter);
  1429. if (status)
  1430. return status;
  1431. mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
  1432. mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
  1433. mbox_mem_alloc->size, &mbox_mem_alloc->dma);
  1434. if (!mbox_mem_alloc->va) {
  1435. be_unmap_pci_bars(adapter);
  1436. return -1;
  1437. }
  1438. mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
  1439. mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
  1440. mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
  1441. memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
  1442. spin_lock_init(&ctrl->cmd_lock);
  1443. val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
  1444. ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
  1445. MEMBAR_CTRL_INT_CTRL_PFUNC_MASK;
  1446. return 0;
  1447. }
  1448. static void be_stats_cleanup(struct be_adapter *adapter)
  1449. {
  1450. struct be_stats_obj *stats = &adapter->stats;
  1451. struct be_dma_mem *cmd = &stats->cmd;
  1452. if (cmd->va)
  1453. pci_free_consistent(adapter->pdev, cmd->size,
  1454. cmd->va, cmd->dma);
  1455. }
  1456. static int be_stats_init(struct be_adapter *adapter)
  1457. {
  1458. struct be_stats_obj *stats = &adapter->stats;
  1459. struct be_dma_mem *cmd = &stats->cmd;
  1460. cmd->size = sizeof(struct be_cmd_req_get_stats);
  1461. cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
  1462. if (cmd->va == NULL)
  1463. return -1;
  1464. return 0;
  1465. }
  1466. static void __devexit be_remove(struct pci_dev *pdev)
  1467. {
  1468. struct be_adapter *adapter = pci_get_drvdata(pdev);
  1469. if (!adapter)
  1470. return;
  1471. unregister_netdev(adapter->netdev);
  1472. be_stats_cleanup(adapter);
  1473. be_ctrl_cleanup(adapter);
  1474. if (adapter->msix_enabled) {
  1475. pci_disable_msix(adapter->pdev);
  1476. adapter->msix_enabled = false;
  1477. }
  1478. pci_set_drvdata(pdev, NULL);
  1479. pci_release_regions(pdev);
  1480. pci_disable_device(pdev);
  1481. free_netdev(adapter->netdev);
  1482. }
  1483. static int be_hw_up(struct be_adapter *adapter)
  1484. {
  1485. struct be_ctrl_info *ctrl = &adapter->ctrl;
  1486. int status;
  1487. status = be_cmd_POST(ctrl);
  1488. if (status)
  1489. return status;
  1490. status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver);
  1491. if (status)
  1492. return status;
  1493. status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num);
  1494. return status;
  1495. }
  1496. static int __devinit be_probe(struct pci_dev *pdev,
  1497. const struct pci_device_id *pdev_id)
  1498. {
  1499. int status = 0;
  1500. struct be_adapter *adapter;
  1501. struct net_device *netdev;
  1502. struct be_ctrl_info *ctrl;
  1503. u8 mac[ETH_ALEN];
  1504. status = pci_enable_device(pdev);
  1505. if (status)
  1506. goto do_none;
  1507. status = pci_request_regions(pdev, DRV_NAME);
  1508. if (status)
  1509. goto disable_dev;
  1510. pci_set_master(pdev);
  1511. netdev = alloc_etherdev(sizeof(struct be_adapter));
  1512. if (netdev == NULL) {
  1513. status = -ENOMEM;
  1514. goto rel_reg;
  1515. }
  1516. adapter = netdev_priv(netdev);
  1517. adapter->pdev = pdev;
  1518. pci_set_drvdata(pdev, adapter);
  1519. adapter->netdev = netdev;
  1520. be_msix_enable(adapter);
  1521. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1522. if (!status) {
  1523. netdev->features |= NETIF_F_HIGHDMA;
  1524. } else {
  1525. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1526. if (status) {
  1527. dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
  1528. goto free_netdev;
  1529. }
  1530. }
  1531. ctrl = &adapter->ctrl;
  1532. status = be_ctrl_init(adapter);
  1533. if (status)
  1534. goto free_netdev;
  1535. status = be_stats_init(adapter);
  1536. if (status)
  1537. goto ctrl_clean;
  1538. status = be_hw_up(adapter);
  1539. if (status)
  1540. goto stats_clean;
  1541. status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK,
  1542. true /* permanent */, 0);
  1543. if (status)
  1544. goto stats_clean;
  1545. memcpy(netdev->dev_addr, mac, ETH_ALEN);
  1546. INIT_DELAYED_WORK(&adapter->work, be_worker);
  1547. be_netdev_init(netdev);
  1548. SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
  1549. status = register_netdev(netdev);
  1550. if (status != 0)
  1551. goto stats_clean;
  1552. dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
  1553. return 0;
  1554. stats_clean:
  1555. be_stats_cleanup(adapter);
  1556. ctrl_clean:
  1557. be_ctrl_cleanup(adapter);
  1558. free_netdev:
  1559. free_netdev(adapter->netdev);
  1560. rel_reg:
  1561. pci_release_regions(pdev);
  1562. disable_dev:
  1563. pci_disable_device(pdev);
  1564. do_none:
  1565. dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
  1566. return status;
  1567. }
  1568. static int be_suspend(struct pci_dev *pdev, pm_message_t state)
  1569. {
  1570. struct be_adapter *adapter = pci_get_drvdata(pdev);
  1571. struct net_device *netdev = adapter->netdev;
  1572. netif_device_detach(netdev);
  1573. if (netif_running(netdev)) {
  1574. rtnl_lock();
  1575. be_close(netdev);
  1576. rtnl_unlock();
  1577. }
  1578. pci_save_state(pdev);
  1579. pci_disable_device(pdev);
  1580. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1581. return 0;
  1582. }
  1583. static int be_resume(struct pci_dev *pdev)
  1584. {
  1585. int status = 0;
  1586. struct be_adapter *adapter = pci_get_drvdata(pdev);
  1587. struct net_device *netdev = adapter->netdev;
  1588. netif_device_detach(netdev);
  1589. status = pci_enable_device(pdev);
  1590. if (status)
  1591. return status;
  1592. pci_set_power_state(pdev, 0);
  1593. pci_restore_state(pdev);
  1594. if (netif_running(netdev)) {
  1595. rtnl_lock();
  1596. be_open(netdev);
  1597. rtnl_unlock();
  1598. }
  1599. netif_device_attach(netdev);
  1600. return 0;
  1601. }
  1602. static struct pci_driver be_driver = {
  1603. .name = DRV_NAME,
  1604. .id_table = be_dev_ids,
  1605. .probe = be_probe,
  1606. .remove = be_remove,
  1607. .suspend = be_suspend,
  1608. .resume = be_resume
  1609. };
  1610. static int __init be_init_module(void)
  1611. {
  1612. if (rx_frag_size != 8192 && rx_frag_size != 4096
  1613. && rx_frag_size != 2048) {
  1614. printk(KERN_WARNING DRV_NAME
  1615. " : Module param rx_frag_size must be 2048/4096/8192."
  1616. " Using 2048\n");
  1617. rx_frag_size = 2048;
  1618. }
  1619. /* Ensure rx_frag_size is aligned to chache line */
  1620. if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) {
  1621. printk(KERN_WARNING DRV_NAME
  1622. " : Bad module param rx_frag_size. Using 2048\n");
  1623. rx_frag_size = 2048;
  1624. }
  1625. return pci_register_driver(&be_driver);
  1626. }
  1627. module_init(be_init_module);
  1628. static void __exit be_exit_module(void)
  1629. {
  1630. pci_unregister_driver(&be_driver);
  1631. }
  1632. module_exit(be_exit_module);