en_ethtool.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include "mlx4_en.h"
  38. #include "en_port.h"
  39. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  40. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  41. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  42. static void
  43. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  44. {
  45. struct mlx4_en_priv *priv = netdev_priv(dev);
  46. struct mlx4_en_dev *mdev = priv->mdev;
  47. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  48. strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
  49. sizeof(drvinfo->version));
  50. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  51. "%d.%d.%d",
  52. (u16) (mdev->dev->caps.fw_ver >> 32),
  53. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  54. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  55. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
  56. sizeof(drvinfo->bus_info));
  57. drvinfo->n_stats = 0;
  58. drvinfo->regdump_len = 0;
  59. drvinfo->eedump_len = 0;
  60. }
  61. static const char main_strings[][ETH_GSTRING_LEN] = {
  62. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  63. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  64. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  65. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  66. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  67. "tx_heartbeat_errors", "tx_window_errors",
  68. /* port statistics */
  69. "tso_packets",
  70. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
  71. "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
  72. /* packet statistics */
  73. "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
  74. "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
  75. "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
  76. "tx_prio_6", "tx_prio_7",
  77. };
  78. #define NUM_MAIN_STATS 21
  79. #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
  80. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  81. "Interrupt Test",
  82. "Link Test",
  83. "Speed Test",
  84. "Register Test",
  85. "Loopback Test",
  86. };
  87. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  88. {
  89. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  90. }
  91. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  92. {
  93. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  94. }
  95. static void mlx4_en_get_wol(struct net_device *netdev,
  96. struct ethtool_wolinfo *wol)
  97. {
  98. struct mlx4_en_priv *priv = netdev_priv(netdev);
  99. int err = 0;
  100. u64 config = 0;
  101. u64 mask;
  102. if ((priv->port < 1) || (priv->port > 2)) {
  103. en_err(priv, "Failed to get WoL information\n");
  104. return;
  105. }
  106. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  107. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  108. if (!(priv->mdev->dev->caps.flags & mask)) {
  109. wol->supported = 0;
  110. wol->wolopts = 0;
  111. return;
  112. }
  113. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  114. if (err) {
  115. en_err(priv, "Failed to get WoL information\n");
  116. return;
  117. }
  118. if (config & MLX4_EN_WOL_MAGIC)
  119. wol->supported = WAKE_MAGIC;
  120. else
  121. wol->supported = 0;
  122. if (config & MLX4_EN_WOL_ENABLED)
  123. wol->wolopts = WAKE_MAGIC;
  124. else
  125. wol->wolopts = 0;
  126. }
  127. static int mlx4_en_set_wol(struct net_device *netdev,
  128. struct ethtool_wolinfo *wol)
  129. {
  130. struct mlx4_en_priv *priv = netdev_priv(netdev);
  131. u64 config = 0;
  132. int err = 0;
  133. u64 mask;
  134. if ((priv->port < 1) || (priv->port > 2))
  135. return -EOPNOTSUPP;
  136. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  137. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  138. if (!(priv->mdev->dev->caps.flags & mask))
  139. return -EOPNOTSUPP;
  140. if (wol->supported & ~WAKE_MAGIC)
  141. return -EINVAL;
  142. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  143. if (err) {
  144. en_err(priv, "Failed to get WoL info, unable to modify\n");
  145. return err;
  146. }
  147. if (wol->wolopts & WAKE_MAGIC) {
  148. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  149. MLX4_EN_WOL_MAGIC;
  150. } else {
  151. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  152. config |= MLX4_EN_WOL_DO_MODIFY;
  153. }
  154. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  155. if (err)
  156. en_err(priv, "Failed to set WoL information\n");
  157. return err;
  158. }
  159. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  160. {
  161. struct mlx4_en_priv *priv = netdev_priv(dev);
  162. int bit_count = hweight64(priv->stats_bitmap);
  163. switch (sset) {
  164. case ETH_SS_STATS:
  165. return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
  166. (priv->tx_ring_num + priv->rx_ring_num) * 2;
  167. case ETH_SS_TEST:
  168. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  169. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  170. default:
  171. return -EOPNOTSUPP;
  172. }
  173. }
  174. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  175. struct ethtool_stats *stats, uint64_t *data)
  176. {
  177. struct mlx4_en_priv *priv = netdev_priv(dev);
  178. int index = 0;
  179. int i, j = 0;
  180. spin_lock_bh(&priv->stats_lock);
  181. if (!(priv->stats_bitmap)) {
  182. for (i = 0; i < NUM_MAIN_STATS; i++)
  183. data[index++] =
  184. ((unsigned long *) &priv->stats)[i];
  185. for (i = 0; i < NUM_PORT_STATS; i++)
  186. data[index++] =
  187. ((unsigned long *) &priv->port_stats)[i];
  188. for (i = 0; i < NUM_PKT_STATS; i++)
  189. data[index++] =
  190. ((unsigned long *) &priv->pkstats)[i];
  191. } else {
  192. for (i = 0; i < NUM_MAIN_STATS; i++) {
  193. if ((priv->stats_bitmap >> j) & 1)
  194. data[index++] =
  195. ((unsigned long *) &priv->stats)[i];
  196. j++;
  197. }
  198. for (i = 0; i < NUM_PORT_STATS; i++) {
  199. if ((priv->stats_bitmap >> j) & 1)
  200. data[index++] =
  201. ((unsigned long *) &priv->port_stats)[i];
  202. j++;
  203. }
  204. }
  205. for (i = 0; i < priv->tx_ring_num; i++) {
  206. data[index++] = priv->tx_ring[i].packets;
  207. data[index++] = priv->tx_ring[i].bytes;
  208. }
  209. for (i = 0; i < priv->rx_ring_num; i++) {
  210. data[index++] = priv->rx_ring[i].packets;
  211. data[index++] = priv->rx_ring[i].bytes;
  212. }
  213. spin_unlock_bh(&priv->stats_lock);
  214. }
  215. static void mlx4_en_self_test(struct net_device *dev,
  216. struct ethtool_test *etest, u64 *buf)
  217. {
  218. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  219. }
  220. static void mlx4_en_get_strings(struct net_device *dev,
  221. uint32_t stringset, uint8_t *data)
  222. {
  223. struct mlx4_en_priv *priv = netdev_priv(dev);
  224. int index = 0;
  225. int i;
  226. switch (stringset) {
  227. case ETH_SS_TEST:
  228. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  229. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  230. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  231. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  232. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  233. break;
  234. case ETH_SS_STATS:
  235. /* Add main counters */
  236. if (!priv->stats_bitmap) {
  237. for (i = 0; i < NUM_MAIN_STATS; i++)
  238. strcpy(data + (index++) * ETH_GSTRING_LEN,
  239. main_strings[i]);
  240. for (i = 0; i < NUM_PORT_STATS; i++)
  241. strcpy(data + (index++) * ETH_GSTRING_LEN,
  242. main_strings[i +
  243. NUM_MAIN_STATS]);
  244. for (i = 0; i < NUM_PKT_STATS; i++)
  245. strcpy(data + (index++) * ETH_GSTRING_LEN,
  246. main_strings[i +
  247. NUM_MAIN_STATS +
  248. NUM_PORT_STATS]);
  249. } else
  250. for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
  251. if ((priv->stats_bitmap >> i) & 1) {
  252. strcpy(data +
  253. (index++) * ETH_GSTRING_LEN,
  254. main_strings[i]);
  255. }
  256. if (!(priv->stats_bitmap >> i))
  257. break;
  258. }
  259. for (i = 0; i < priv->tx_ring_num; i++) {
  260. sprintf(data + (index++) * ETH_GSTRING_LEN,
  261. "tx%d_packets", i);
  262. sprintf(data + (index++) * ETH_GSTRING_LEN,
  263. "tx%d_bytes", i);
  264. }
  265. for (i = 0; i < priv->rx_ring_num; i++) {
  266. sprintf(data + (index++) * ETH_GSTRING_LEN,
  267. "rx%d_packets", i);
  268. sprintf(data + (index++) * ETH_GSTRING_LEN,
  269. "rx%d_bytes", i);
  270. }
  271. break;
  272. }
  273. }
  274. static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  275. {
  276. struct mlx4_en_priv *priv = netdev_priv(dev);
  277. int trans_type;
  278. cmd->autoneg = AUTONEG_DISABLE;
  279. cmd->supported = SUPPORTED_10000baseT_Full;
  280. cmd->advertising = ADVERTISED_10000baseT_Full;
  281. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  282. return -ENOMEM;
  283. trans_type = priv->port_state.transciver;
  284. if (netif_carrier_ok(dev)) {
  285. ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
  286. cmd->duplex = DUPLEX_FULL;
  287. } else {
  288. ethtool_cmd_speed_set(cmd, -1);
  289. cmd->duplex = -1;
  290. }
  291. if (trans_type > 0 && trans_type <= 0xC) {
  292. cmd->port = PORT_FIBRE;
  293. cmd->transceiver = XCVR_EXTERNAL;
  294. cmd->supported |= SUPPORTED_FIBRE;
  295. cmd->advertising |= ADVERTISED_FIBRE;
  296. } else if (trans_type == 0x80 || trans_type == 0) {
  297. cmd->port = PORT_TP;
  298. cmd->transceiver = XCVR_INTERNAL;
  299. cmd->supported |= SUPPORTED_TP;
  300. cmd->advertising |= ADVERTISED_TP;
  301. } else {
  302. cmd->port = -1;
  303. cmd->transceiver = -1;
  304. }
  305. return 0;
  306. }
  307. static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  308. {
  309. if ((cmd->autoneg == AUTONEG_ENABLE) ||
  310. (ethtool_cmd_speed(cmd) != SPEED_10000) ||
  311. (cmd->duplex != DUPLEX_FULL))
  312. return -EINVAL;
  313. /* Nothing to change */
  314. return 0;
  315. }
  316. static int mlx4_en_get_coalesce(struct net_device *dev,
  317. struct ethtool_coalesce *coal)
  318. {
  319. struct mlx4_en_priv *priv = netdev_priv(dev);
  320. coal->tx_coalesce_usecs = priv->tx_usecs;
  321. coal->tx_max_coalesced_frames = priv->tx_frames;
  322. coal->rx_coalesce_usecs = priv->rx_usecs;
  323. coal->rx_max_coalesced_frames = priv->rx_frames;
  324. coal->pkt_rate_low = priv->pkt_rate_low;
  325. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  326. coal->pkt_rate_high = priv->pkt_rate_high;
  327. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  328. coal->rate_sample_interval = priv->sample_interval;
  329. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  330. return 0;
  331. }
  332. static int mlx4_en_set_coalesce(struct net_device *dev,
  333. struct ethtool_coalesce *coal)
  334. {
  335. struct mlx4_en_priv *priv = netdev_priv(dev);
  336. int err, i;
  337. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  338. MLX4_EN_AUTO_CONF) ?
  339. MLX4_EN_RX_COAL_TARGET :
  340. coal->rx_max_coalesced_frames;
  341. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  342. MLX4_EN_AUTO_CONF) ?
  343. MLX4_EN_RX_COAL_TIME :
  344. coal->rx_coalesce_usecs;
  345. /* Setting TX coalescing parameters */
  346. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  347. coal->tx_max_coalesced_frames != priv->tx_frames) {
  348. priv->tx_usecs = coal->tx_coalesce_usecs;
  349. priv->tx_frames = coal->tx_max_coalesced_frames;
  350. for (i = 0; i < priv->tx_ring_num; i++) {
  351. priv->tx_cq[i].moder_cnt = priv->tx_frames;
  352. priv->tx_cq[i].moder_time = priv->tx_usecs;
  353. if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
  354. en_warn(priv, "Failed changing moderation "
  355. "for TX cq %d\n", i);
  356. }
  357. }
  358. }
  359. /* Set adaptive coalescing params */
  360. priv->pkt_rate_low = coal->pkt_rate_low;
  361. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  362. priv->pkt_rate_high = coal->pkt_rate_high;
  363. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  364. priv->sample_interval = coal->rate_sample_interval;
  365. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  366. if (priv->adaptive_rx_coal)
  367. return 0;
  368. for (i = 0; i < priv->rx_ring_num; i++) {
  369. priv->rx_cq[i].moder_cnt = priv->rx_frames;
  370. priv->rx_cq[i].moder_time = priv->rx_usecs;
  371. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  372. err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
  373. if (err)
  374. return err;
  375. }
  376. return 0;
  377. }
  378. static int mlx4_en_set_pauseparam(struct net_device *dev,
  379. struct ethtool_pauseparam *pause)
  380. {
  381. struct mlx4_en_priv *priv = netdev_priv(dev);
  382. struct mlx4_en_dev *mdev = priv->mdev;
  383. int err;
  384. priv->prof->tx_pause = pause->tx_pause != 0;
  385. priv->prof->rx_pause = pause->rx_pause != 0;
  386. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  387. priv->rx_skb_size + ETH_FCS_LEN,
  388. priv->prof->tx_pause,
  389. priv->prof->tx_ppp,
  390. priv->prof->rx_pause,
  391. priv->prof->rx_ppp);
  392. if (err)
  393. en_err(priv, "Failed setting pause params\n");
  394. return err;
  395. }
  396. static void mlx4_en_get_pauseparam(struct net_device *dev,
  397. struct ethtool_pauseparam *pause)
  398. {
  399. struct mlx4_en_priv *priv = netdev_priv(dev);
  400. pause->tx_pause = priv->prof->tx_pause;
  401. pause->rx_pause = priv->prof->rx_pause;
  402. }
  403. static int mlx4_en_set_ringparam(struct net_device *dev,
  404. struct ethtool_ringparam *param)
  405. {
  406. struct mlx4_en_priv *priv = netdev_priv(dev);
  407. struct mlx4_en_dev *mdev = priv->mdev;
  408. u32 rx_size, tx_size;
  409. int port_up = 0;
  410. int err = 0;
  411. int i;
  412. if (param->rx_jumbo_pending || param->rx_mini_pending)
  413. return -EINVAL;
  414. rx_size = roundup_pow_of_two(param->rx_pending);
  415. rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
  416. rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
  417. tx_size = roundup_pow_of_two(param->tx_pending);
  418. tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
  419. tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
  420. if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
  421. priv->rx_ring[0].size) &&
  422. tx_size == priv->tx_ring[0].size)
  423. return 0;
  424. mutex_lock(&mdev->state_lock);
  425. if (priv->port_up) {
  426. port_up = 1;
  427. mlx4_en_stop_port(dev);
  428. }
  429. mlx4_en_free_resources(priv);
  430. priv->prof->tx_ring_size = tx_size;
  431. priv->prof->rx_ring_size = rx_size;
  432. err = mlx4_en_alloc_resources(priv);
  433. if (err) {
  434. en_err(priv, "Failed reallocating port resources\n");
  435. goto out;
  436. }
  437. if (port_up) {
  438. err = mlx4_en_start_port(dev);
  439. if (err)
  440. en_err(priv, "Failed starting port\n");
  441. }
  442. for (i = 0; i < priv->rx_ring_num; i++) {
  443. priv->rx_cq[i].moder_cnt = priv->rx_frames;
  444. priv->rx_cq[i].moder_time = priv->rx_usecs;
  445. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  446. err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
  447. if (err)
  448. goto out;
  449. }
  450. out:
  451. mutex_unlock(&mdev->state_lock);
  452. return err;
  453. }
  454. static void mlx4_en_get_ringparam(struct net_device *dev,
  455. struct ethtool_ringparam *param)
  456. {
  457. struct mlx4_en_priv *priv = netdev_priv(dev);
  458. memset(param, 0, sizeof(*param));
  459. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  460. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  461. param->rx_pending = priv->port_up ?
  462. priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
  463. param->tx_pending = priv->tx_ring[0].size;
  464. }
  465. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  466. {
  467. struct mlx4_en_priv *priv = netdev_priv(dev);
  468. return priv->rx_ring_num;
  469. }
  470. static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
  471. {
  472. struct mlx4_en_priv *priv = netdev_priv(dev);
  473. struct mlx4_en_rss_map *rss_map = &priv->rss_map;
  474. int rss_rings;
  475. size_t n = priv->rx_ring_num;
  476. int err = 0;
  477. rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
  478. while (n--) {
  479. ring_index[n] = rss_map->qps[n % rss_rings].qpn -
  480. rss_map->base_qpn;
  481. }
  482. return err;
  483. }
  484. static int mlx4_en_set_rxfh_indir(struct net_device *dev,
  485. const u32 *ring_index)
  486. {
  487. struct mlx4_en_priv *priv = netdev_priv(dev);
  488. struct mlx4_en_dev *mdev = priv->mdev;
  489. int port_up = 0;
  490. int err = 0;
  491. int i;
  492. int rss_rings = 0;
  493. /* Calculate RSS table size and make sure flows are spread evenly
  494. * between rings
  495. */
  496. for (i = 0; i < priv->rx_ring_num; i++) {
  497. if (i > 0 && !ring_index[i] && !rss_rings)
  498. rss_rings = i;
  499. if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
  500. return -EINVAL;
  501. }
  502. if (!rss_rings)
  503. rss_rings = priv->rx_ring_num;
  504. /* RSS table size must be an order of 2 */
  505. if (!is_power_of_2(rss_rings))
  506. return -EINVAL;
  507. mutex_lock(&mdev->state_lock);
  508. if (priv->port_up) {
  509. port_up = 1;
  510. mlx4_en_stop_port(dev);
  511. }
  512. priv->prof->rss_rings = rss_rings;
  513. if (port_up) {
  514. err = mlx4_en_start_port(dev);
  515. if (err)
  516. en_err(priv, "Failed starting port\n");
  517. }
  518. mutex_unlock(&mdev->state_lock);
  519. return err;
  520. }
  521. #define all_zeros_or_all_ones(field) \
  522. ((field) == 0 || (field) == (__force typeof(field))-1)
  523. static int mlx4_en_validate_flow(struct net_device *dev,
  524. struct ethtool_rxnfc *cmd)
  525. {
  526. struct ethtool_usrip4_spec *l3_mask;
  527. struct ethtool_tcpip4_spec *l4_mask;
  528. struct ethhdr *eth_mask;
  529. u64 full_mac = ~0ull;
  530. u64 zero_mac = 0;
  531. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  532. return -EINVAL;
  533. switch (cmd->fs.flow_type & ~FLOW_EXT) {
  534. case TCP_V4_FLOW:
  535. case UDP_V4_FLOW:
  536. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  537. return -EINVAL;
  538. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  539. /* don't allow mask which isn't all 0 or 1 */
  540. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  541. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  542. !all_zeros_or_all_ones(l4_mask->psrc) ||
  543. !all_zeros_or_all_ones(l4_mask->pdst))
  544. return -EINVAL;
  545. break;
  546. case IP_USER_FLOW:
  547. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  548. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  549. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  550. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  551. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  552. !all_zeros_or_all_ones(l3_mask->ip4dst))
  553. return -EINVAL;
  554. break;
  555. case ETHER_FLOW:
  556. eth_mask = &cmd->fs.m_u.ether_spec;
  557. /* source mac mask must not be set */
  558. if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
  559. return -EINVAL;
  560. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  561. if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
  562. return -EINVAL;
  563. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  564. return -EINVAL;
  565. break;
  566. default:
  567. return -EINVAL;
  568. }
  569. if ((cmd->fs.flow_type & FLOW_EXT)) {
  570. if (cmd->fs.m_ext.vlan_etype ||
  571. !(cmd->fs.m_ext.vlan_tci == 0 ||
  572. cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
  573. return -EINVAL;
  574. }
  575. return 0;
  576. }
  577. static int add_ip_rule(struct mlx4_en_priv *priv,
  578. struct ethtool_rxnfc *cmd,
  579. struct list_head *list_h)
  580. {
  581. struct mlx4_spec_list *spec_l3;
  582. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  583. spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
  584. if (!spec_l3) {
  585. en_err(priv, "Fail to alloc ethtool rule.\n");
  586. return -ENOMEM;
  587. }
  588. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  589. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  590. if (l3_mask->ip4src)
  591. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  592. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  593. if (l3_mask->ip4dst)
  594. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  595. list_add_tail(&spec_l3->list, list_h);
  596. return 0;
  597. }
  598. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  599. struct ethtool_rxnfc *cmd,
  600. struct list_head *list_h, int proto)
  601. {
  602. struct mlx4_spec_list *spec_l3;
  603. struct mlx4_spec_list *spec_l4;
  604. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  605. spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
  606. spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
  607. if (!spec_l4 || !spec_l3) {
  608. en_err(priv, "Fail to alloc ethtool rule.\n");
  609. kfree(spec_l3);
  610. kfree(spec_l4);
  611. return -ENOMEM;
  612. }
  613. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  614. if (proto == TCP_V4_FLOW) {
  615. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  616. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  617. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  618. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  619. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  620. } else {
  621. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  622. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  623. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  624. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  625. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  626. }
  627. if (l4_mask->ip4src)
  628. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  629. if (l4_mask->ip4dst)
  630. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  631. if (l4_mask->psrc)
  632. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  633. if (l4_mask->pdst)
  634. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  635. list_add_tail(&spec_l3->list, list_h);
  636. list_add_tail(&spec_l4->list, list_h);
  637. return 0;
  638. }
  639. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  640. struct ethtool_rxnfc *cmd,
  641. struct list_head *rule_list_h)
  642. {
  643. int err;
  644. u64 mac;
  645. __be64 be_mac;
  646. struct ethhdr *eth_spec;
  647. struct mlx4_en_priv *priv = netdev_priv(dev);
  648. struct mlx4_spec_list *spec_l2;
  649. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  650. err = mlx4_en_validate_flow(dev, cmd);
  651. if (err)
  652. return err;
  653. spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
  654. if (!spec_l2)
  655. return -ENOMEM;
  656. mac = priv->mac & MLX4_MAC_MASK;
  657. be_mac = cpu_to_be64(mac << 16);
  658. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  659. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  660. if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
  661. memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
  662. if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
  663. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  664. spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
  665. }
  666. list_add_tail(&spec_l2->list, rule_list_h);
  667. switch (cmd->fs.flow_type & ~FLOW_EXT) {
  668. case ETHER_FLOW:
  669. eth_spec = &cmd->fs.h_u.ether_spec;
  670. memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
  671. spec_l2->eth.ether_type = eth_spec->h_proto;
  672. if (eth_spec->h_proto)
  673. spec_l2->eth.ether_type_enable = 1;
  674. break;
  675. case IP_USER_FLOW:
  676. err = add_ip_rule(priv, cmd, rule_list_h);
  677. break;
  678. case TCP_V4_FLOW:
  679. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  680. break;
  681. case UDP_V4_FLOW:
  682. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  683. break;
  684. }
  685. return err;
  686. }
  687. static int mlx4_en_flow_replace(struct net_device *dev,
  688. struct ethtool_rxnfc *cmd)
  689. {
  690. int err;
  691. struct mlx4_en_priv *priv = netdev_priv(dev);
  692. struct ethtool_flow_id *loc_rule;
  693. struct mlx4_spec_list *spec, *tmp_spec;
  694. u32 qpn;
  695. u64 reg_id;
  696. struct mlx4_net_trans_rule rule = {
  697. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  698. .exclusive = 0,
  699. .allow_loopback = 1,
  700. .promisc_mode = MLX4_FS_PROMISC_NONE,
  701. };
  702. rule.port = priv->port;
  703. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  704. INIT_LIST_HEAD(&rule.list);
  705. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  706. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  707. qpn = priv->drop_qp.qpn;
  708. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  709. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  710. } else {
  711. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  712. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
  713. cmd->fs.ring_cookie);
  714. return -EINVAL;
  715. }
  716. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  717. if (!qpn) {
  718. en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
  719. cmd->fs.ring_cookie);
  720. return -EINVAL;
  721. }
  722. }
  723. rule.qpn = qpn;
  724. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  725. if (err)
  726. goto out_free_list;
  727. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  728. if (loc_rule->id) {
  729. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  730. if (err) {
  731. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  732. cmd->fs.location, loc_rule->id);
  733. goto out_free_list;
  734. }
  735. loc_rule->id = 0;
  736. memset(&loc_rule->flow_spec, 0,
  737. sizeof(struct ethtool_rx_flow_spec));
  738. }
  739. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  740. if (err) {
  741. en_err(priv, "Fail to attach network rule at location %d.\n",
  742. cmd->fs.location);
  743. goto out_free_list;
  744. }
  745. loc_rule->id = reg_id;
  746. memcpy(&loc_rule->flow_spec, &cmd->fs,
  747. sizeof(struct ethtool_rx_flow_spec));
  748. out_free_list:
  749. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  750. list_del(&spec->list);
  751. kfree(spec);
  752. }
  753. return err;
  754. }
  755. static int mlx4_en_flow_detach(struct net_device *dev,
  756. struct ethtool_rxnfc *cmd)
  757. {
  758. int err = 0;
  759. struct ethtool_flow_id *rule;
  760. struct mlx4_en_priv *priv = netdev_priv(dev);
  761. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  762. return -EINVAL;
  763. rule = &priv->ethtool_rules[cmd->fs.location];
  764. if (!rule->id) {
  765. err = -ENOENT;
  766. goto out;
  767. }
  768. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  769. if (err) {
  770. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  771. cmd->fs.location, rule->id);
  772. goto out;
  773. }
  774. rule->id = 0;
  775. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  776. out:
  777. return err;
  778. }
  779. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  780. int loc)
  781. {
  782. int err = 0;
  783. struct ethtool_flow_id *rule;
  784. struct mlx4_en_priv *priv = netdev_priv(dev);
  785. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  786. return -EINVAL;
  787. rule = &priv->ethtool_rules[loc];
  788. if (rule->id)
  789. memcpy(&cmd->fs, &rule->flow_spec,
  790. sizeof(struct ethtool_rx_flow_spec));
  791. else
  792. err = -ENOENT;
  793. return err;
  794. }
  795. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  796. {
  797. int i, res = 0;
  798. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  799. if (priv->ethtool_rules[i].id)
  800. res++;
  801. }
  802. return res;
  803. }
  804. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  805. u32 *rule_locs)
  806. {
  807. struct mlx4_en_priv *priv = netdev_priv(dev);
  808. struct mlx4_en_dev *mdev = priv->mdev;
  809. int err = 0;
  810. int i = 0, priority = 0;
  811. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  812. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  813. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  814. mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
  815. return -EINVAL;
  816. switch (cmd->cmd) {
  817. case ETHTOOL_GRXRINGS:
  818. cmd->data = priv->rx_ring_num;
  819. break;
  820. case ETHTOOL_GRXCLSRLCNT:
  821. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  822. break;
  823. case ETHTOOL_GRXCLSRULE:
  824. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  825. break;
  826. case ETHTOOL_GRXCLSRLALL:
  827. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  828. err = mlx4_en_get_flow(dev, cmd, i);
  829. if (!err)
  830. rule_locs[priority++] = i;
  831. i++;
  832. }
  833. err = 0;
  834. break;
  835. default:
  836. err = -EOPNOTSUPP;
  837. break;
  838. }
  839. return err;
  840. }
  841. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  842. {
  843. int err = 0;
  844. struct mlx4_en_priv *priv = netdev_priv(dev);
  845. struct mlx4_en_dev *mdev = priv->mdev;
  846. if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
  847. return -EINVAL;
  848. switch (cmd->cmd) {
  849. case ETHTOOL_SRXCLSRLINS:
  850. err = mlx4_en_flow_replace(dev, cmd);
  851. break;
  852. case ETHTOOL_SRXCLSRLDEL:
  853. err = mlx4_en_flow_detach(dev, cmd);
  854. break;
  855. default:
  856. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  857. return -EINVAL;
  858. }
  859. return err;
  860. }
  861. const struct ethtool_ops mlx4_en_ethtool_ops = {
  862. .get_drvinfo = mlx4_en_get_drvinfo,
  863. .get_settings = mlx4_en_get_settings,
  864. .set_settings = mlx4_en_set_settings,
  865. .get_link = ethtool_op_get_link,
  866. .get_strings = mlx4_en_get_strings,
  867. .get_sset_count = mlx4_en_get_sset_count,
  868. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  869. .self_test = mlx4_en_self_test,
  870. .get_wol = mlx4_en_get_wol,
  871. .set_wol = mlx4_en_set_wol,
  872. .get_msglevel = mlx4_en_get_msglevel,
  873. .set_msglevel = mlx4_en_set_msglevel,
  874. .get_coalesce = mlx4_en_get_coalesce,
  875. .set_coalesce = mlx4_en_set_coalesce,
  876. .get_pauseparam = mlx4_en_get_pauseparam,
  877. .set_pauseparam = mlx4_en_set_pauseparam,
  878. .get_ringparam = mlx4_en_get_ringparam,
  879. .set_ringparam = mlx4_en_set_ringparam,
  880. .get_rxnfc = mlx4_en_get_rxnfc,
  881. .set_rxnfc = mlx4_en_set_rxnfc,
  882. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  883. .get_rxfh_indir = mlx4_en_get_rxfh_indir,
  884. .set_rxfh_indir = mlx4_en_set_rxfh_indir,
  885. };