en_netdev.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/etherdevice.h>
  34. #include <linux/tcp.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/delay.h>
  37. #include <linux/slab.h>
  38. #include <linux/mlx4/driver.h>
  39. #include <linux/mlx4/device.h>
  40. #include <linux/mlx4/cmd.h>
  41. #include <linux/mlx4/cq.h>
  42. #include "mlx4_en.h"
  43. #include "en_port.h"
  44. static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
  45. {
  46. if (up != MLX4_EN_NUM_UP)
  47. return -EINVAL;
  48. return 0;
  49. }
  50. static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  51. {
  52. struct mlx4_en_priv *priv = netdev_priv(dev);
  53. struct mlx4_en_dev *mdev = priv->mdev;
  54. int err;
  55. int idx;
  56. en_dbg(HW, priv, "adding VLAN:%d\n", vid);
  57. set_bit(vid, priv->active_vlans);
  58. /* Add VID to port VLAN filter */
  59. mutex_lock(&mdev->state_lock);
  60. if (mdev->device_up && priv->port_up) {
  61. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  62. if (err)
  63. en_err(priv, "Failed configuring VLAN filter\n");
  64. }
  65. if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
  66. en_err(priv, "failed adding vlan %d\n", vid);
  67. mutex_unlock(&mdev->state_lock);
  68. return 0;
  69. }
  70. static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  71. {
  72. struct mlx4_en_priv *priv = netdev_priv(dev);
  73. struct mlx4_en_dev *mdev = priv->mdev;
  74. int err;
  75. int idx;
  76. en_dbg(HW, priv, "Killing VID:%d\n", vid);
  77. clear_bit(vid, priv->active_vlans);
  78. /* Remove VID from port VLAN filter */
  79. mutex_lock(&mdev->state_lock);
  80. if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
  81. mlx4_unregister_vlan(mdev->dev, priv->port, idx);
  82. else
  83. en_err(priv, "could not find vid %d in cache\n", vid);
  84. if (mdev->device_up && priv->port_up) {
  85. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  86. if (err)
  87. en_err(priv, "Failed configuring VLAN filter\n");
  88. }
  89. mutex_unlock(&mdev->state_lock);
  90. return 0;
  91. }
  92. u64 mlx4_en_mac_to_u64(u8 *addr)
  93. {
  94. u64 mac = 0;
  95. int i;
  96. for (i = 0; i < ETH_ALEN; i++) {
  97. mac <<= 8;
  98. mac |= addr[i];
  99. }
  100. return mac;
  101. }
  102. static int mlx4_en_set_mac(struct net_device *dev, void *addr)
  103. {
  104. struct mlx4_en_priv *priv = netdev_priv(dev);
  105. struct mlx4_en_dev *mdev = priv->mdev;
  106. struct sockaddr *saddr = addr;
  107. if (!is_valid_ether_addr(saddr->sa_data))
  108. return -EADDRNOTAVAIL;
  109. memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
  110. priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
  111. queue_work(mdev->workqueue, &priv->mac_task);
  112. return 0;
  113. }
  114. static void mlx4_en_do_set_mac(struct work_struct *work)
  115. {
  116. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  117. mac_task);
  118. struct mlx4_en_dev *mdev = priv->mdev;
  119. int err = 0;
  120. mutex_lock(&mdev->state_lock);
  121. if (priv->port_up) {
  122. /* Remove old MAC and insert the new one */
  123. err = mlx4_replace_mac(mdev->dev, priv->port,
  124. priv->base_qpn, priv->mac);
  125. if (err)
  126. en_err(priv, "Failed changing HW MAC address\n");
  127. } else
  128. en_dbg(HW, priv, "Port is down while "
  129. "registering mac, exiting...\n");
  130. mutex_unlock(&mdev->state_lock);
  131. }
  132. static void mlx4_en_clear_list(struct net_device *dev)
  133. {
  134. struct mlx4_en_priv *priv = netdev_priv(dev);
  135. kfree(priv->mc_addrs);
  136. priv->mc_addrs = NULL;
  137. priv->mc_addrs_cnt = 0;
  138. }
  139. static void mlx4_en_cache_mclist(struct net_device *dev)
  140. {
  141. struct mlx4_en_priv *priv = netdev_priv(dev);
  142. struct netdev_hw_addr *ha;
  143. char *mc_addrs;
  144. int mc_addrs_cnt = netdev_mc_count(dev);
  145. int i;
  146. mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
  147. if (!mc_addrs) {
  148. en_err(priv, "failed to allocate multicast list\n");
  149. return;
  150. }
  151. i = 0;
  152. netdev_for_each_mc_addr(ha, dev)
  153. memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
  154. mlx4_en_clear_list(dev);
  155. priv->mc_addrs = mc_addrs;
  156. priv->mc_addrs_cnt = mc_addrs_cnt;
  157. }
  158. static void mlx4_en_set_multicast(struct net_device *dev)
  159. {
  160. struct mlx4_en_priv *priv = netdev_priv(dev);
  161. if (!priv->port_up)
  162. return;
  163. queue_work(priv->mdev->workqueue, &priv->mcast_task);
  164. }
  165. static void mlx4_en_do_set_multicast(struct work_struct *work)
  166. {
  167. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  168. mcast_task);
  169. struct mlx4_en_dev *mdev = priv->mdev;
  170. struct net_device *dev = priv->dev;
  171. u64 mcast_addr = 0;
  172. u8 mc_list[16] = {0};
  173. int err;
  174. mutex_lock(&mdev->state_lock);
  175. if (!mdev->device_up) {
  176. en_dbg(HW, priv, "Card is not up, "
  177. "ignoring multicast change.\n");
  178. goto out;
  179. }
  180. if (!priv->port_up) {
  181. en_dbg(HW, priv, "Port is down, "
  182. "ignoring multicast change.\n");
  183. goto out;
  184. }
  185. if (!netif_carrier_ok(dev)) {
  186. if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
  187. if (priv->port_state.link_state) {
  188. priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
  189. netif_carrier_on(dev);
  190. en_dbg(LINK, priv, "Link Up\n");
  191. }
  192. }
  193. }
  194. /*
  195. * Promsicuous mode: disable all filters
  196. */
  197. if (dev->flags & IFF_PROMISC) {
  198. if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
  199. if (netif_msg_rx_status(priv))
  200. en_warn(priv, "Entering promiscuous mode\n");
  201. priv->flags |= MLX4_EN_FLAG_PROMISC;
  202. /* Enable promiscouos mode */
  203. if (!(mdev->dev->caps.flags &
  204. MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  205. err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
  206. priv->base_qpn, 1);
  207. else
  208. err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
  209. priv->port);
  210. if (err)
  211. en_err(priv, "Failed enabling "
  212. "promiscuous mode\n");
  213. /* Disable port multicast filter (unconditionally) */
  214. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  215. 0, MLX4_MCAST_DISABLE);
  216. if (err)
  217. en_err(priv, "Failed disabling "
  218. "multicast filter\n");
  219. /* Add the default qp number as multicast promisc */
  220. if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
  221. err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
  222. priv->port);
  223. if (err)
  224. en_err(priv, "Failed entering multicast promisc mode\n");
  225. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  226. }
  227. /* Disable port VLAN filter */
  228. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  229. if (err)
  230. en_err(priv, "Failed disabling VLAN filter\n");
  231. }
  232. goto out;
  233. }
  234. /*
  235. * Not in promiscuous mode
  236. */
  237. if (priv->flags & MLX4_EN_FLAG_PROMISC) {
  238. if (netif_msg_rx_status(priv))
  239. en_warn(priv, "Leaving promiscuous mode\n");
  240. priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  241. /* Disable promiscouos mode */
  242. if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  243. err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
  244. priv->base_qpn, 0);
  245. else
  246. err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
  247. priv->port);
  248. if (err)
  249. en_err(priv, "Failed disabling promiscuous mode\n");
  250. /* Disable Multicast promisc */
  251. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  252. err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
  253. priv->port);
  254. if (err)
  255. en_err(priv, "Failed disabling multicast promiscuous mode\n");
  256. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  257. }
  258. /* Enable port VLAN filter */
  259. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  260. if (err)
  261. en_err(priv, "Failed enabling VLAN filter\n");
  262. }
  263. /* Enable/disable the multicast filter according to IFF_ALLMULTI */
  264. if (dev->flags & IFF_ALLMULTI) {
  265. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  266. 0, MLX4_MCAST_DISABLE);
  267. if (err)
  268. en_err(priv, "Failed disabling multicast filter\n");
  269. /* Add the default qp number as multicast promisc */
  270. if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
  271. err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
  272. priv->port);
  273. if (err)
  274. en_err(priv, "Failed entering multicast promisc mode\n");
  275. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  276. }
  277. } else {
  278. int i;
  279. /* Disable Multicast promisc */
  280. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  281. err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
  282. priv->port);
  283. if (err)
  284. en_err(priv, "Failed disabling multicast promiscuous mode\n");
  285. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  286. }
  287. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  288. 0, MLX4_MCAST_DISABLE);
  289. if (err)
  290. en_err(priv, "Failed disabling multicast filter\n");
  291. /* Detach our qp from all the multicast addresses */
  292. for (i = 0; i < priv->mc_addrs_cnt; i++) {
  293. memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
  294. mc_list[5] = priv->port;
  295. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
  296. mc_list, MLX4_PROT_ETH);
  297. }
  298. /* Flush mcast filter and init it with broadcast address */
  299. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
  300. 1, MLX4_MCAST_CONFIG);
  301. /* Update multicast list - we cache all addresses so they won't
  302. * change while HW is updated holding the command semaphor */
  303. netif_tx_lock_bh(dev);
  304. mlx4_en_cache_mclist(dev);
  305. netif_tx_unlock_bh(dev);
  306. for (i = 0; i < priv->mc_addrs_cnt; i++) {
  307. mcast_addr =
  308. mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
  309. memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
  310. mc_list[5] = priv->port;
  311. mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
  312. mc_list, 0, MLX4_PROT_ETH);
  313. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
  314. mcast_addr, 0, MLX4_MCAST_CONFIG);
  315. }
  316. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  317. 0, MLX4_MCAST_ENABLE);
  318. if (err)
  319. en_err(priv, "Failed enabling multicast filter\n");
  320. }
  321. out:
  322. mutex_unlock(&mdev->state_lock);
  323. }
  324. #ifdef CONFIG_NET_POLL_CONTROLLER
  325. static void mlx4_en_netpoll(struct net_device *dev)
  326. {
  327. struct mlx4_en_priv *priv = netdev_priv(dev);
  328. struct mlx4_en_cq *cq;
  329. unsigned long flags;
  330. int i;
  331. for (i = 0; i < priv->rx_ring_num; i++) {
  332. cq = &priv->rx_cq[i];
  333. spin_lock_irqsave(&cq->lock, flags);
  334. napi_synchronize(&cq->napi);
  335. mlx4_en_process_rx_cq(dev, cq, 0);
  336. spin_unlock_irqrestore(&cq->lock, flags);
  337. }
  338. }
  339. #endif
  340. static void mlx4_en_tx_timeout(struct net_device *dev)
  341. {
  342. struct mlx4_en_priv *priv = netdev_priv(dev);
  343. struct mlx4_en_dev *mdev = priv->mdev;
  344. if (netif_msg_timer(priv))
  345. en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
  346. priv->port_stats.tx_timeout++;
  347. en_dbg(DRV, priv, "Scheduling watchdog\n");
  348. queue_work(mdev->workqueue, &priv->watchdog_task);
  349. }
  350. static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
  351. {
  352. struct mlx4_en_priv *priv = netdev_priv(dev);
  353. spin_lock_bh(&priv->stats_lock);
  354. memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
  355. spin_unlock_bh(&priv->stats_lock);
  356. return &priv->ret_stats;
  357. }
  358. static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
  359. {
  360. struct mlx4_en_cq *cq;
  361. int i;
  362. /* If we haven't received a specific coalescing setting
  363. * (module param), we set the moderation parameters as follows:
  364. * - moder_cnt is set to the number of mtu sized packets to
  365. * satisfy our coelsing target.
  366. * - moder_time is set to a fixed value.
  367. */
  368. priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
  369. priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
  370. priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
  371. priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
  372. en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
  373. "rx_frames:%d rx_usecs:%d\n",
  374. priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
  375. /* Setup cq moderation params */
  376. for (i = 0; i < priv->rx_ring_num; i++) {
  377. cq = &priv->rx_cq[i];
  378. cq->moder_cnt = priv->rx_frames;
  379. cq->moder_time = priv->rx_usecs;
  380. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  381. priv->last_moder_packets[i] = 0;
  382. priv->last_moder_bytes[i] = 0;
  383. }
  384. for (i = 0; i < priv->tx_ring_num; i++) {
  385. cq = &priv->tx_cq[i];
  386. cq->moder_cnt = priv->tx_frames;
  387. cq->moder_time = priv->tx_usecs;
  388. }
  389. /* Reset auto-moderation params */
  390. priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
  391. priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
  392. priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
  393. priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
  394. priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
  395. priv->adaptive_rx_coal = 1;
  396. priv->last_moder_jiffies = 0;
  397. priv->last_moder_tx_packets = 0;
  398. }
  399. static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
  400. {
  401. unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
  402. struct mlx4_en_cq *cq;
  403. unsigned long packets;
  404. unsigned long rate;
  405. unsigned long avg_pkt_size;
  406. unsigned long rx_packets;
  407. unsigned long rx_bytes;
  408. unsigned long rx_pkt_diff;
  409. int moder_time;
  410. int ring, err;
  411. if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
  412. return;
  413. for (ring = 0; ring < priv->rx_ring_num; ring++) {
  414. spin_lock_bh(&priv->stats_lock);
  415. rx_packets = priv->rx_ring[ring].packets;
  416. rx_bytes = priv->rx_ring[ring].bytes;
  417. spin_unlock_bh(&priv->stats_lock);
  418. rx_pkt_diff = ((unsigned long) (rx_packets -
  419. priv->last_moder_packets[ring]));
  420. packets = rx_pkt_diff;
  421. rate = packets * HZ / period;
  422. avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
  423. priv->last_moder_bytes[ring])) / packets : 0;
  424. /* Apply auto-moderation only when packet rate
  425. * exceeds a rate that it matters */
  426. if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
  427. avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
  428. if (rate < priv->pkt_rate_low)
  429. moder_time = priv->rx_usecs_low;
  430. else if (rate > priv->pkt_rate_high)
  431. moder_time = priv->rx_usecs_high;
  432. else
  433. moder_time = (rate - priv->pkt_rate_low) *
  434. (priv->rx_usecs_high - priv->rx_usecs_low) /
  435. (priv->pkt_rate_high - priv->pkt_rate_low) +
  436. priv->rx_usecs_low;
  437. } else {
  438. moder_time = priv->rx_usecs_low;
  439. }
  440. if (moder_time != priv->last_moder_time[ring]) {
  441. priv->last_moder_time[ring] = moder_time;
  442. cq = &priv->rx_cq[ring];
  443. cq->moder_time = moder_time;
  444. err = mlx4_en_set_cq_moder(priv, cq);
  445. if (err)
  446. en_err(priv, "Failed modifying moderation "
  447. "for cq:%d\n", ring);
  448. }
  449. priv->last_moder_packets[ring] = rx_packets;
  450. priv->last_moder_bytes[ring] = rx_bytes;
  451. }
  452. priv->last_moder_jiffies = jiffies;
  453. }
  454. static void mlx4_en_do_get_stats(struct work_struct *work)
  455. {
  456. struct delayed_work *delay = to_delayed_work(work);
  457. struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
  458. stats_task);
  459. struct mlx4_en_dev *mdev = priv->mdev;
  460. int err;
  461. err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
  462. if (err)
  463. en_dbg(HW, priv, "Could not update stats\n");
  464. mutex_lock(&mdev->state_lock);
  465. if (mdev->device_up) {
  466. if (priv->port_up)
  467. mlx4_en_auto_moderation(priv);
  468. queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
  469. }
  470. if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
  471. queue_work(mdev->workqueue, &priv->mac_task);
  472. mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
  473. }
  474. mutex_unlock(&mdev->state_lock);
  475. }
  476. static void mlx4_en_linkstate(struct work_struct *work)
  477. {
  478. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  479. linkstate_task);
  480. struct mlx4_en_dev *mdev = priv->mdev;
  481. int linkstate = priv->link_state;
  482. mutex_lock(&mdev->state_lock);
  483. /* If observable port state changed set carrier state and
  484. * report to system log */
  485. if (priv->last_link_state != linkstate) {
  486. if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
  487. en_info(priv, "Link Down\n");
  488. netif_carrier_off(priv->dev);
  489. } else {
  490. en_info(priv, "Link Up\n");
  491. netif_carrier_on(priv->dev);
  492. }
  493. }
  494. priv->last_link_state = linkstate;
  495. mutex_unlock(&mdev->state_lock);
  496. }
  497. int mlx4_en_start_port(struct net_device *dev)
  498. {
  499. struct mlx4_en_priv *priv = netdev_priv(dev);
  500. struct mlx4_en_dev *mdev = priv->mdev;
  501. struct mlx4_en_cq *cq;
  502. struct mlx4_en_tx_ring *tx_ring;
  503. int rx_index = 0;
  504. int tx_index = 0;
  505. int err = 0;
  506. int i;
  507. int j;
  508. u8 mc_list[16] = {0};
  509. if (priv->port_up) {
  510. en_dbg(DRV, priv, "start port called while port already up\n");
  511. return 0;
  512. }
  513. /* Calculate Rx buf size */
  514. dev->mtu = min(dev->mtu, priv->max_mtu);
  515. mlx4_en_calc_rx_buf(dev);
  516. en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
  517. /* Configure rx cq's and rings */
  518. err = mlx4_en_activate_rx_rings(priv);
  519. if (err) {
  520. en_err(priv, "Failed to activate RX rings\n");
  521. return err;
  522. }
  523. for (i = 0; i < priv->rx_ring_num; i++) {
  524. cq = &priv->rx_cq[i];
  525. err = mlx4_en_activate_cq(priv, cq, i);
  526. if (err) {
  527. en_err(priv, "Failed activating Rx CQ\n");
  528. goto cq_err;
  529. }
  530. for (j = 0; j < cq->size; j++)
  531. cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
  532. err = mlx4_en_set_cq_moder(priv, cq);
  533. if (err) {
  534. en_err(priv, "Failed setting cq moderation parameters");
  535. mlx4_en_deactivate_cq(priv, cq);
  536. goto cq_err;
  537. }
  538. mlx4_en_arm_cq(priv, cq);
  539. priv->rx_ring[i].cqn = cq->mcq.cqn;
  540. ++rx_index;
  541. }
  542. /* Set qp number */
  543. en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
  544. err = mlx4_get_eth_qp(mdev->dev, priv->port,
  545. priv->mac, &priv->base_qpn);
  546. if (err) {
  547. en_err(priv, "Failed getting eth qp\n");
  548. goto cq_err;
  549. }
  550. mdev->mac_removed[priv->port] = 0;
  551. err = mlx4_en_config_rss_steer(priv);
  552. if (err) {
  553. en_err(priv, "Failed configuring rss steering\n");
  554. goto mac_err;
  555. }
  556. /* Configure tx cq's and rings */
  557. for (i = 0; i < priv->tx_ring_num; i++) {
  558. /* Configure cq */
  559. cq = &priv->tx_cq[i];
  560. err = mlx4_en_activate_cq(priv, cq, i);
  561. if (err) {
  562. en_err(priv, "Failed allocating Tx CQ\n");
  563. goto tx_err;
  564. }
  565. err = mlx4_en_set_cq_moder(priv, cq);
  566. if (err) {
  567. en_err(priv, "Failed setting cq moderation parameters");
  568. mlx4_en_deactivate_cq(priv, cq);
  569. goto tx_err;
  570. }
  571. en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
  572. cq->buf->wqe_index = cpu_to_be16(0xffff);
  573. /* Configure ring */
  574. tx_ring = &priv->tx_ring[i];
  575. err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
  576. max(0, i - MLX4_EN_NUM_TX_RINGS));
  577. if (err) {
  578. en_err(priv, "Failed allocating Tx ring\n");
  579. mlx4_en_deactivate_cq(priv, cq);
  580. goto tx_err;
  581. }
  582. /* Set initial ownership of all Tx TXBBs to SW (1) */
  583. for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
  584. *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
  585. ++tx_index;
  586. }
  587. /* Configure port */
  588. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  589. priv->rx_skb_size + ETH_FCS_LEN,
  590. priv->prof->tx_pause,
  591. priv->prof->tx_ppp,
  592. priv->prof->rx_pause,
  593. priv->prof->rx_ppp);
  594. if (err) {
  595. en_err(priv, "Failed setting port general configurations "
  596. "for port %d, with error %d\n", priv->port, err);
  597. goto tx_err;
  598. }
  599. /* Set default qp number */
  600. err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
  601. if (err) {
  602. en_err(priv, "Failed setting default qp numbers\n");
  603. goto tx_err;
  604. }
  605. /* Init port */
  606. en_dbg(HW, priv, "Initializing port\n");
  607. err = mlx4_INIT_PORT(mdev->dev, priv->port);
  608. if (err) {
  609. en_err(priv, "Failed Initializing port\n");
  610. goto tx_err;
  611. }
  612. /* Attach rx QP to bradcast address */
  613. memset(&mc_list[10], 0xff, ETH_ALEN);
  614. mc_list[5] = priv->port;
  615. if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
  616. 0, MLX4_PROT_ETH))
  617. mlx4_warn(mdev, "Failed Attaching Broadcast\n");
  618. /* Must redo promiscuous mode setup. */
  619. priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
  620. /* Schedule multicast task to populate multicast list */
  621. queue_work(mdev->workqueue, &priv->mcast_task);
  622. mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
  623. priv->port_up = true;
  624. netif_tx_start_all_queues(dev);
  625. return 0;
  626. tx_err:
  627. while (tx_index--) {
  628. mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
  629. mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
  630. }
  631. mlx4_en_release_rss_steer(priv);
  632. mac_err:
  633. mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
  634. cq_err:
  635. while (rx_index--)
  636. mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
  637. for (i = 0; i < priv->rx_ring_num; i++)
  638. mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
  639. return err; /* need to close devices */
  640. }
  641. void mlx4_en_stop_port(struct net_device *dev)
  642. {
  643. struct mlx4_en_priv *priv = netdev_priv(dev);
  644. struct mlx4_en_dev *mdev = priv->mdev;
  645. int i;
  646. u8 mc_list[16] = {0};
  647. if (!priv->port_up) {
  648. en_dbg(DRV, priv, "stop port called while port already down\n");
  649. return;
  650. }
  651. /* Synchronize with tx routine */
  652. netif_tx_lock_bh(dev);
  653. netif_tx_stop_all_queues(dev);
  654. netif_tx_unlock_bh(dev);
  655. /* Set port as not active */
  656. priv->port_up = false;
  657. /* Detach All multicasts */
  658. memset(&mc_list[10], 0xff, ETH_ALEN);
  659. mc_list[5] = priv->port;
  660. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
  661. MLX4_PROT_ETH);
  662. for (i = 0; i < priv->mc_addrs_cnt; i++) {
  663. memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
  664. mc_list[5] = priv->port;
  665. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
  666. mc_list, MLX4_PROT_ETH);
  667. }
  668. mlx4_en_clear_list(dev);
  669. /* Flush multicast filter */
  670. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
  671. /* Free TX Rings */
  672. for (i = 0; i < priv->tx_ring_num; i++) {
  673. mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
  674. mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
  675. }
  676. msleep(10);
  677. for (i = 0; i < priv->tx_ring_num; i++)
  678. mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
  679. /* Free RSS qps */
  680. mlx4_en_release_rss_steer(priv);
  681. /* Unregister Mac address for the port */
  682. mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
  683. mdev->mac_removed[priv->port] = 1;
  684. /* Free RX Rings */
  685. for (i = 0; i < priv->rx_ring_num; i++) {
  686. mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
  687. while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
  688. msleep(1);
  689. mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  690. }
  691. /* close port*/
  692. mlx4_CLOSE_PORT(mdev->dev, priv->port);
  693. }
  694. static void mlx4_en_restart(struct work_struct *work)
  695. {
  696. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  697. watchdog_task);
  698. struct mlx4_en_dev *mdev = priv->mdev;
  699. struct net_device *dev = priv->dev;
  700. en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
  701. mutex_lock(&mdev->state_lock);
  702. if (priv->port_up) {
  703. mlx4_en_stop_port(dev);
  704. if (mlx4_en_start_port(dev))
  705. en_err(priv, "Failed restarting port %d\n", priv->port);
  706. }
  707. mutex_unlock(&mdev->state_lock);
  708. }
  709. static void mlx4_en_clear_stats(struct net_device *dev)
  710. {
  711. struct mlx4_en_priv *priv = netdev_priv(dev);
  712. struct mlx4_en_dev *mdev = priv->mdev;
  713. int i;
  714. if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
  715. en_dbg(HW, priv, "Failed dumping statistics\n");
  716. memset(&priv->stats, 0, sizeof(priv->stats));
  717. memset(&priv->pstats, 0, sizeof(priv->pstats));
  718. memset(&priv->pkstats, 0, sizeof(priv->pkstats));
  719. memset(&priv->port_stats, 0, sizeof(priv->port_stats));
  720. for (i = 0; i < priv->tx_ring_num; i++) {
  721. priv->tx_ring[i].bytes = 0;
  722. priv->tx_ring[i].packets = 0;
  723. priv->tx_ring[i].tx_csum = 0;
  724. }
  725. for (i = 0; i < priv->rx_ring_num; i++) {
  726. priv->rx_ring[i].bytes = 0;
  727. priv->rx_ring[i].packets = 0;
  728. priv->rx_ring[i].csum_ok = 0;
  729. priv->rx_ring[i].csum_none = 0;
  730. }
  731. }
  732. static int mlx4_en_open(struct net_device *dev)
  733. {
  734. struct mlx4_en_priv *priv = netdev_priv(dev);
  735. struct mlx4_en_dev *mdev = priv->mdev;
  736. int err = 0;
  737. mutex_lock(&mdev->state_lock);
  738. if (!mdev->device_up) {
  739. en_err(priv, "Cannot open - device down/disabled\n");
  740. err = -EBUSY;
  741. goto out;
  742. }
  743. /* Reset HW statistics and SW counters */
  744. mlx4_en_clear_stats(dev);
  745. err = mlx4_en_start_port(dev);
  746. if (err)
  747. en_err(priv, "Failed starting port:%d\n", priv->port);
  748. out:
  749. mutex_unlock(&mdev->state_lock);
  750. return err;
  751. }
  752. static int mlx4_en_close(struct net_device *dev)
  753. {
  754. struct mlx4_en_priv *priv = netdev_priv(dev);
  755. struct mlx4_en_dev *mdev = priv->mdev;
  756. en_dbg(IFDOWN, priv, "Close port called\n");
  757. mutex_lock(&mdev->state_lock);
  758. mlx4_en_stop_port(dev);
  759. netif_carrier_off(dev);
  760. mutex_unlock(&mdev->state_lock);
  761. return 0;
  762. }
  763. void mlx4_en_free_resources(struct mlx4_en_priv *priv)
  764. {
  765. int i;
  766. for (i = 0; i < priv->tx_ring_num; i++) {
  767. if (priv->tx_ring[i].tx_info)
  768. mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
  769. if (priv->tx_cq[i].buf)
  770. mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
  771. }
  772. for (i = 0; i < priv->rx_ring_num; i++) {
  773. if (priv->rx_ring[i].rx_info)
  774. mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
  775. priv->prof->rx_ring_size, priv->stride);
  776. if (priv->rx_cq[i].buf)
  777. mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
  778. }
  779. }
  780. int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
  781. {
  782. struct mlx4_en_port_profile *prof = priv->prof;
  783. int i;
  784. int base_tx_qpn, err;
  785. err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
  786. if (err) {
  787. en_err(priv, "failed reserving range for TX rings\n");
  788. return err;
  789. }
  790. /* Create tx Rings */
  791. for (i = 0; i < priv->tx_ring_num; i++) {
  792. if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
  793. prof->tx_ring_size, i, TX))
  794. goto err;
  795. if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
  796. prof->tx_ring_size, TXBB_SIZE))
  797. goto err;
  798. }
  799. /* Create rx Rings */
  800. for (i = 0; i < priv->rx_ring_num; i++) {
  801. if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
  802. prof->rx_ring_size, i, RX))
  803. goto err;
  804. if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
  805. prof->rx_ring_size, priv->stride))
  806. goto err;
  807. }
  808. return 0;
  809. err:
  810. en_err(priv, "Failed to allocate NIC resources\n");
  811. mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
  812. return -ENOMEM;
  813. }
  814. void mlx4_en_destroy_netdev(struct net_device *dev)
  815. {
  816. struct mlx4_en_priv *priv = netdev_priv(dev);
  817. struct mlx4_en_dev *mdev = priv->mdev;
  818. en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
  819. /* Unregister device - this will close the port if it was up */
  820. if (priv->registered)
  821. unregister_netdev(dev);
  822. if (priv->allocated)
  823. mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
  824. cancel_delayed_work(&priv->stats_task);
  825. /* flush any pending task for this netdev */
  826. flush_workqueue(mdev->workqueue);
  827. /* Detach the netdev so tasks would not attempt to access it */
  828. mutex_lock(&mdev->state_lock);
  829. mdev->pndev[priv->port] = NULL;
  830. mutex_unlock(&mdev->state_lock);
  831. mlx4_en_free_resources(priv);
  832. free_netdev(dev);
  833. }
  834. static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
  835. {
  836. struct mlx4_en_priv *priv = netdev_priv(dev);
  837. struct mlx4_en_dev *mdev = priv->mdev;
  838. int err = 0;
  839. en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
  840. dev->mtu, new_mtu);
  841. if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
  842. en_err(priv, "Bad MTU size:%d.\n", new_mtu);
  843. return -EPERM;
  844. }
  845. dev->mtu = new_mtu;
  846. if (netif_running(dev)) {
  847. mutex_lock(&mdev->state_lock);
  848. if (!mdev->device_up) {
  849. /* NIC is probably restarting - let watchdog task reset
  850. * the port */
  851. en_dbg(DRV, priv, "Change MTU called with card down!?\n");
  852. } else {
  853. mlx4_en_stop_port(dev);
  854. err = mlx4_en_start_port(dev);
  855. if (err) {
  856. en_err(priv, "Failed restarting port:%d\n",
  857. priv->port);
  858. queue_work(mdev->workqueue, &priv->watchdog_task);
  859. }
  860. }
  861. mutex_unlock(&mdev->state_lock);
  862. }
  863. return 0;
  864. }
  865. static int mlx4_en_set_features(struct net_device *netdev,
  866. netdev_features_t features)
  867. {
  868. struct mlx4_en_priv *priv = netdev_priv(netdev);
  869. if (features & NETIF_F_LOOPBACK)
  870. priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
  871. else
  872. priv->ctrl_flags &=
  873. cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
  874. return 0;
  875. }
  876. static const struct net_device_ops mlx4_netdev_ops = {
  877. .ndo_open = mlx4_en_open,
  878. .ndo_stop = mlx4_en_close,
  879. .ndo_start_xmit = mlx4_en_xmit,
  880. .ndo_select_queue = mlx4_en_select_queue,
  881. .ndo_get_stats = mlx4_en_get_stats,
  882. .ndo_set_rx_mode = mlx4_en_set_multicast,
  883. .ndo_set_mac_address = mlx4_en_set_mac,
  884. .ndo_validate_addr = eth_validate_addr,
  885. .ndo_change_mtu = mlx4_en_change_mtu,
  886. .ndo_tx_timeout = mlx4_en_tx_timeout,
  887. .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
  888. .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
  889. #ifdef CONFIG_NET_POLL_CONTROLLER
  890. .ndo_poll_controller = mlx4_en_netpoll,
  891. #endif
  892. .ndo_set_features = mlx4_en_set_features,
  893. .ndo_setup_tc = mlx4_en_setup_tc,
  894. };
  895. int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
  896. struct mlx4_en_port_profile *prof)
  897. {
  898. struct net_device *dev;
  899. struct mlx4_en_priv *priv;
  900. int i;
  901. int err;
  902. dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
  903. prof->tx_ring_num, prof->rx_ring_num);
  904. if (dev == NULL)
  905. return -ENOMEM;
  906. SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
  907. dev->dev_id = port - 1;
  908. /*
  909. * Initialize driver private data
  910. */
  911. priv = netdev_priv(dev);
  912. memset(priv, 0, sizeof(struct mlx4_en_priv));
  913. priv->dev = dev;
  914. priv->mdev = mdev;
  915. priv->ddev = &mdev->pdev->dev;
  916. priv->prof = prof;
  917. priv->port = port;
  918. priv->port_up = false;
  919. priv->flags = prof->flags;
  920. priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
  921. MLX4_WQE_CTRL_SOLICITED);
  922. priv->tx_ring_num = prof->tx_ring_num;
  923. priv->rx_ring_num = prof->rx_ring_num;
  924. priv->mac_index = -1;
  925. priv->msg_enable = MLX4_EN_MSG_LEVEL;
  926. spin_lock_init(&priv->stats_lock);
  927. INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
  928. INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
  929. INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
  930. INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
  931. INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
  932. #ifdef CONFIG_MLX4_EN_DCB
  933. if (!mlx4_is_slave(priv->mdev->dev))
  934. dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
  935. #endif
  936. /* Query for default mac and max mtu */
  937. priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
  938. priv->mac = mdev->dev->caps.def_mac[priv->port];
  939. if (ILLEGAL_MAC(priv->mac)) {
  940. en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
  941. priv->port, priv->mac);
  942. err = -EINVAL;
  943. goto out;
  944. }
  945. priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
  946. DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
  947. err = mlx4_en_alloc_resources(priv);
  948. if (err)
  949. goto out;
  950. /* Allocate page for receive rings */
  951. err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
  952. MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
  953. if (err) {
  954. en_err(priv, "Failed to allocate page for rx qps\n");
  955. goto out;
  956. }
  957. priv->allocated = 1;
  958. /*
  959. * Initialize netdev entry points
  960. */
  961. dev->netdev_ops = &mlx4_netdev_ops;
  962. dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
  963. netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  964. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  965. netdev_set_num_tc(dev, MLX4_EN_NUM_UP);
  966. /* First 9 rings are for UP 0 */
  967. netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0);
  968. /* Partition Tx queues evenly amongst UP's 1-7 */
  969. for (i = 1; i < MLX4_EN_NUM_UP; i++)
  970. netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i);
  971. SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
  972. /* Set defualt MAC */
  973. dev->addr_len = ETH_ALEN;
  974. for (i = 0; i < ETH_ALEN; i++) {
  975. dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
  976. dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
  977. }
  978. /*
  979. * Set driver features
  980. */
  981. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  982. if (mdev->LSO_support)
  983. dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
  984. dev->vlan_features = dev->hw_features;
  985. dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
  986. dev->features = dev->hw_features | NETIF_F_HIGHDMA |
  987. NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  988. NETIF_F_HW_VLAN_FILTER;
  989. dev->hw_features |= NETIF_F_LOOPBACK;
  990. mdev->pndev[port] = dev;
  991. netif_carrier_off(dev);
  992. err = register_netdev(dev);
  993. if (err) {
  994. en_err(priv, "Netdev registration failed for port %d\n", port);
  995. goto out;
  996. }
  997. priv->registered = 1;
  998. en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
  999. en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
  1000. /* Configure port */
  1001. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  1002. MLX4_EN_MIN_MTU,
  1003. 0, 0, 0, 0);
  1004. if (err) {
  1005. en_err(priv, "Failed setting port general configurations "
  1006. "for port %d, with error %d\n", priv->port, err);
  1007. goto out;
  1008. }
  1009. /* Init port */
  1010. en_warn(priv, "Initializing port\n");
  1011. err = mlx4_INIT_PORT(mdev->dev, priv->port);
  1012. if (err) {
  1013. en_err(priv, "Failed Initializing port\n");
  1014. goto out;
  1015. }
  1016. mlx4_en_set_default_moderation(priv);
  1017. queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
  1018. return 0;
  1019. out:
  1020. mlx4_en_destroy_netdev(dev);
  1021. return err;
  1022. }