be_ethtool.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Copyright (C) 2005 - 2011 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@emulex.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. #include <linux/ethtool.h>
  20. struct be_ethtool_stat {
  21. char desc[ETH_GSTRING_LEN];
  22. int type;
  23. int size;
  24. int offset;
  25. };
  26. enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
  27. #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
  28. offsetof(_struct, field)
  29. #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
  30. FIELDINFO(struct be_tx_stats, field)
  31. #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
  32. FIELDINFO(struct be_rx_stats, field)
  33. #define DRVSTAT_INFO(field) #field, DRVSTAT,\
  34. FIELDINFO(struct be_drv_stats, field)
  35. static const struct be_ethtool_stat et_stats[] = {
  36. {DRVSTAT_INFO(rx_crc_errors)},
  37. {DRVSTAT_INFO(rx_alignment_symbol_errors)},
  38. {DRVSTAT_INFO(rx_pause_frames)},
  39. {DRVSTAT_INFO(rx_control_frames)},
  40. /* Received packets dropped when the Ethernet length field
  41. * is not equal to the actual Ethernet data length.
  42. */
  43. {DRVSTAT_INFO(rx_in_range_errors)},
  44. /* Received packets dropped when their length field is >= 1501 bytes
  45. * and <= 1535 bytes.
  46. */
  47. {DRVSTAT_INFO(rx_out_range_errors)},
  48. /* Received packets dropped when they are longer than 9216 bytes */
  49. {DRVSTAT_INFO(rx_frame_too_long)},
  50. /* Received packets dropped when they don't pass the unicast or
  51. * multicast address filtering.
  52. */
  53. {DRVSTAT_INFO(rx_address_mismatch_drops)},
  54. /* Received packets dropped when IP packet length field is less than
  55. * the IP header length field.
  56. */
  57. {DRVSTAT_INFO(rx_dropped_too_small)},
  58. /* Received packets dropped when IP length field is greater than
  59. * the actual packet length.
  60. */
  61. {DRVSTAT_INFO(rx_dropped_too_short)},
  62. /* Received packets dropped when the IP header length field is less
  63. * than 5.
  64. */
  65. {DRVSTAT_INFO(rx_dropped_header_too_small)},
  66. /* Received packets dropped when the TCP header length field is less
  67. * than 5 or the TCP header length + IP header length is more
  68. * than IP packet length.
  69. */
  70. {DRVSTAT_INFO(rx_dropped_tcp_length)},
  71. {DRVSTAT_INFO(rx_dropped_runt)},
  72. /* Number of received packets dropped when a fifo for descriptors going
  73. * into the packet demux block overflows. In normal operation, this
  74. * fifo must never overflow.
  75. */
  76. {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
  77. {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
  78. {DRVSTAT_INFO(rx_ip_checksum_errs)},
  79. {DRVSTAT_INFO(rx_tcp_checksum_errs)},
  80. {DRVSTAT_INFO(rx_udp_checksum_errs)},
  81. {DRVSTAT_INFO(tx_pauseframes)},
  82. {DRVSTAT_INFO(tx_controlframes)},
  83. {DRVSTAT_INFO(rx_priority_pause_frames)},
  84. /* Received packets dropped when an internal fifo going into
  85. * main packet buffer tank (PMEM) overflows.
  86. */
  87. {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
  88. {DRVSTAT_INFO(jabber_events)},
  89. /* Received packets dropped due to lack of available HW packet buffers
  90. * used to temporarily hold the received packets.
  91. */
  92. {DRVSTAT_INFO(rx_drops_no_pbuf)},
  93. /* Received packets dropped due to input receive buffer
  94. * descriptor fifo overflowing.
  95. */
  96. {DRVSTAT_INFO(rx_drops_no_erx_descr)},
  97. /* Packets dropped because the internal FIFO to the offloaded TCP
  98. * receive processing block is full. This could happen only for
  99. * offloaded iSCSI or FCoE trarffic.
  100. */
  101. {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
  102. /* Received packets dropped when they need more than 8
  103. * receive buffers. This cannot happen as the driver configures
  104. * 2048 byte receive buffers.
  105. */
  106. {DRVSTAT_INFO(rx_drops_too_many_frags)},
  107. {DRVSTAT_INFO(forwarded_packets)},
  108. /* Received packets dropped when the frame length
  109. * is more than 9018 bytes
  110. */
  111. {DRVSTAT_INFO(rx_drops_mtu)},
  112. /* Number of packets dropped due to random early drop function */
  113. {DRVSTAT_INFO(eth_red_drops)},
  114. {DRVSTAT_INFO(be_on_die_temperature)}
  115. };
  116. #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
  117. /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
  118. * are first and second members respectively.
  119. */
  120. static const struct be_ethtool_stat et_rx_stats[] = {
  121. {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
  122. {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
  123. {DRVSTAT_RX_INFO(rx_compl)},
  124. {DRVSTAT_RX_INFO(rx_mcast_pkts)},
  125. /* Number of page allocation failures while posting receive buffers
  126. * to HW.
  127. */
  128. {DRVSTAT_RX_INFO(rx_post_fail)},
  129. /* Recevied packets dropped due to skb allocation failure */
  130. {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
  131. /* Received packets dropped due to lack of available fetched buffers
  132. * posted by the driver.
  133. */
  134. {DRVSTAT_RX_INFO(rx_drops_no_frags)}
  135. };
  136. #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
  137. /* Stats related to multi TX queues: get_stats routine assumes compl is the
  138. * first member
  139. */
  140. static const struct be_ethtool_stat et_tx_stats[] = {
  141. {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
  142. {DRVSTAT_TX_INFO(tx_bytes)},
  143. {DRVSTAT_TX_INFO(tx_pkts)},
  144. /* Number of skbs queued for trasmission by the driver */
  145. {DRVSTAT_TX_INFO(tx_reqs)},
  146. /* Number of TX work request blocks DMAed to HW */
  147. {DRVSTAT_TX_INFO(tx_wrbs)},
  148. /* Number of times the TX queue was stopped due to lack
  149. * of spaces in the TXQ.
  150. */
  151. {DRVSTAT_TX_INFO(tx_stops)}
  152. };
  153. #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
  154. static const char et_self_tests[][ETH_GSTRING_LEN] = {
  155. "MAC Loopback test",
  156. "PHY Loopback test",
  157. "External Loopback test",
  158. "DDR DMA test",
  159. "Link test"
  160. };
  161. #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
  162. #define BE_MAC_LOOPBACK 0x0
  163. #define BE_PHY_LOOPBACK 0x1
  164. #define BE_ONE_PORT_EXT_LOOPBACK 0x2
  165. #define BE_NO_LOOPBACK 0xff
  166. static void be_get_drvinfo(struct net_device *netdev,
  167. struct ethtool_drvinfo *drvinfo)
  168. {
  169. struct be_adapter *adapter = netdev_priv(netdev);
  170. char fw_on_flash[FW_VER_LEN];
  171. memset(fw_on_flash, 0 , sizeof(fw_on_flash));
  172. be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
  173. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  174. strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
  175. strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
  176. if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
  177. strcat(drvinfo->fw_version, " [");
  178. strcat(drvinfo->fw_version, fw_on_flash);
  179. strcat(drvinfo->fw_version, "]");
  180. }
  181. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  182. sizeof(drvinfo->bus_info));
  183. drvinfo->testinfo_len = 0;
  184. drvinfo->regdump_len = 0;
  185. drvinfo->eedump_len = 0;
  186. }
  187. static u32
  188. lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
  189. {
  190. u32 data_read = 0, eof;
  191. u8 addn_status;
  192. struct be_dma_mem data_len_cmd;
  193. int status;
  194. memset(&data_len_cmd, 0, sizeof(data_len_cmd));
  195. /* data_offset and data_size should be 0 to get reg len */
  196. status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
  197. file_name, &data_read, &eof, &addn_status);
  198. return data_read;
  199. }
  200. static int
  201. lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
  202. u32 buf_len, void *buf)
  203. {
  204. struct be_dma_mem read_cmd;
  205. u32 read_len = 0, total_read_len = 0, chunk_size;
  206. u32 eof = 0;
  207. u8 addn_status;
  208. int status = 0;
  209. read_cmd.size = LANCER_READ_FILE_CHUNK;
  210. read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
  211. &read_cmd.dma);
  212. if (!read_cmd.va) {
  213. dev_err(&adapter->pdev->dev,
  214. "Memory allocation failure while reading dump\n");
  215. return -ENOMEM;
  216. }
  217. while ((total_read_len < buf_len) && !eof) {
  218. chunk_size = min_t(u32, (buf_len - total_read_len),
  219. LANCER_READ_FILE_CHUNK);
  220. chunk_size = ALIGN(chunk_size, 4);
  221. status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
  222. total_read_len, file_name, &read_len,
  223. &eof, &addn_status);
  224. if (!status) {
  225. memcpy(buf + total_read_len, read_cmd.va, read_len);
  226. total_read_len += read_len;
  227. eof &= LANCER_READ_FILE_EOF_MASK;
  228. } else {
  229. status = -EIO;
  230. break;
  231. }
  232. }
  233. pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
  234. read_cmd.dma);
  235. return status;
  236. }
  237. static int
  238. be_get_reg_len(struct net_device *netdev)
  239. {
  240. struct be_adapter *adapter = netdev_priv(netdev);
  241. u32 log_size = 0;
  242. if (be_physfn(adapter)) {
  243. if (lancer_chip(adapter))
  244. log_size = lancer_cmd_get_file_len(adapter,
  245. LANCER_FW_DUMP_FILE);
  246. else
  247. be_cmd_get_reg_len(adapter, &log_size);
  248. }
  249. return log_size;
  250. }
  251. static void
  252. be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
  253. {
  254. struct be_adapter *adapter = netdev_priv(netdev);
  255. if (be_physfn(adapter)) {
  256. memset(buf, 0, regs->len);
  257. if (lancer_chip(adapter))
  258. lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
  259. regs->len, buf);
  260. else
  261. be_cmd_get_regs(adapter, regs->len, buf);
  262. }
  263. }
  264. static int be_get_coalesce(struct net_device *netdev,
  265. struct ethtool_coalesce *et)
  266. {
  267. struct be_adapter *adapter = netdev_priv(netdev);
  268. struct be_eq_obj *eqo = &adapter->eq_obj[0];
  269. et->rx_coalesce_usecs = eqo->cur_eqd;
  270. et->rx_coalesce_usecs_high = eqo->max_eqd;
  271. et->rx_coalesce_usecs_low = eqo->min_eqd;
  272. et->tx_coalesce_usecs = eqo->cur_eqd;
  273. et->tx_coalesce_usecs_high = eqo->max_eqd;
  274. et->tx_coalesce_usecs_low = eqo->min_eqd;
  275. et->use_adaptive_rx_coalesce = eqo->enable_aic;
  276. et->use_adaptive_tx_coalesce = eqo->enable_aic;
  277. return 0;
  278. }
  279. /* TX attributes are ignored. Only RX attributes are considered
  280. * eqd cmd is issued in the worker thread.
  281. */
  282. static int be_set_coalesce(struct net_device *netdev,
  283. struct ethtool_coalesce *et)
  284. {
  285. struct be_adapter *adapter = netdev_priv(netdev);
  286. struct be_eq_obj *eqo;
  287. int i;
  288. for_all_evt_queues(adapter, eqo, i) {
  289. eqo->enable_aic = et->use_adaptive_rx_coalesce;
  290. eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
  291. eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
  292. eqo->eqd = et->rx_coalesce_usecs;
  293. }
  294. return 0;
  295. }
  296. static void
  297. be_get_ethtool_stats(struct net_device *netdev,
  298. struct ethtool_stats *stats, uint64_t *data)
  299. {
  300. struct be_adapter *adapter = netdev_priv(netdev);
  301. struct be_rx_obj *rxo;
  302. struct be_tx_obj *txo;
  303. void *p;
  304. unsigned int i, j, base = 0, start;
  305. for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
  306. p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
  307. data[i] = *(u32 *)p;
  308. }
  309. base += ETHTOOL_STATS_NUM;
  310. for_all_rx_queues(adapter, rxo, j) {
  311. struct be_rx_stats *stats = rx_stats(rxo);
  312. do {
  313. start = u64_stats_fetch_begin_bh(&stats->sync);
  314. data[base] = stats->rx_bytes;
  315. data[base + 1] = stats->rx_pkts;
  316. } while (u64_stats_fetch_retry_bh(&stats->sync, start));
  317. for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
  318. p = (u8 *)stats + et_rx_stats[i].offset;
  319. data[base + i] = *(u32 *)p;
  320. }
  321. base += ETHTOOL_RXSTATS_NUM;
  322. }
  323. for_all_tx_queues(adapter, txo, j) {
  324. struct be_tx_stats *stats = tx_stats(txo);
  325. do {
  326. start = u64_stats_fetch_begin_bh(&stats->sync_compl);
  327. data[base] = stats->tx_compl;
  328. } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
  329. do {
  330. start = u64_stats_fetch_begin_bh(&stats->sync);
  331. for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
  332. p = (u8 *)stats + et_tx_stats[i].offset;
  333. data[base + i] =
  334. (et_tx_stats[i].size == sizeof(u64)) ?
  335. *(u64 *)p : *(u32 *)p;
  336. }
  337. } while (u64_stats_fetch_retry_bh(&stats->sync, start));
  338. base += ETHTOOL_TXSTATS_NUM;
  339. }
  340. }
  341. static void
  342. be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
  343. uint8_t *data)
  344. {
  345. struct be_adapter *adapter = netdev_priv(netdev);
  346. int i, j;
  347. switch (stringset) {
  348. case ETH_SS_STATS:
  349. for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
  350. memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
  351. data += ETH_GSTRING_LEN;
  352. }
  353. for (i = 0; i < adapter->num_rx_qs; i++) {
  354. for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
  355. sprintf(data, "rxq%d: %s", i,
  356. et_rx_stats[j].desc);
  357. data += ETH_GSTRING_LEN;
  358. }
  359. }
  360. for (i = 0; i < adapter->num_tx_qs; i++) {
  361. for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
  362. sprintf(data, "txq%d: %s", i,
  363. et_tx_stats[j].desc);
  364. data += ETH_GSTRING_LEN;
  365. }
  366. }
  367. break;
  368. case ETH_SS_TEST:
  369. for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
  370. memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
  371. data += ETH_GSTRING_LEN;
  372. }
  373. break;
  374. }
  375. }
  376. static int be_get_sset_count(struct net_device *netdev, int stringset)
  377. {
  378. struct be_adapter *adapter = netdev_priv(netdev);
  379. switch (stringset) {
  380. case ETH_SS_TEST:
  381. return ETHTOOL_TESTS_NUM;
  382. case ETH_SS_STATS:
  383. return ETHTOOL_STATS_NUM +
  384. adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
  385. adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
  386. default:
  387. return -EINVAL;
  388. }
  389. }
  390. static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
  391. {
  392. u32 port;
  393. switch (phy_type) {
  394. case PHY_TYPE_BASET_1GB:
  395. case PHY_TYPE_BASEX_1GB:
  396. case PHY_TYPE_SGMII:
  397. port = PORT_TP;
  398. break;
  399. case PHY_TYPE_SFP_PLUS_10GB:
  400. port = dac_cable_len ? PORT_DA : PORT_FIBRE;
  401. break;
  402. case PHY_TYPE_XFP_10GB:
  403. case PHY_TYPE_SFP_1GB:
  404. port = PORT_FIBRE;
  405. break;
  406. case PHY_TYPE_BASET_10GB:
  407. port = PORT_TP;
  408. break;
  409. default:
  410. port = PORT_OTHER;
  411. }
  412. return port;
  413. }
  414. static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
  415. {
  416. u32 val = 0;
  417. switch (if_type) {
  418. case PHY_TYPE_BASET_1GB:
  419. case PHY_TYPE_BASEX_1GB:
  420. case PHY_TYPE_SGMII:
  421. val |= SUPPORTED_TP;
  422. if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
  423. val |= SUPPORTED_1000baseT_Full;
  424. if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
  425. val |= SUPPORTED_100baseT_Full;
  426. if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
  427. val |= SUPPORTED_10baseT_Full;
  428. break;
  429. case PHY_TYPE_KX4_10GB:
  430. val |= SUPPORTED_Backplane;
  431. if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
  432. val |= SUPPORTED_1000baseKX_Full;
  433. if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
  434. val |= SUPPORTED_10000baseKX4_Full;
  435. break;
  436. case PHY_TYPE_KR_10GB:
  437. val |= SUPPORTED_Backplane |
  438. SUPPORTED_10000baseKR_Full;
  439. break;
  440. case PHY_TYPE_SFP_PLUS_10GB:
  441. case PHY_TYPE_XFP_10GB:
  442. case PHY_TYPE_SFP_1GB:
  443. val |= SUPPORTED_FIBRE;
  444. if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
  445. val |= SUPPORTED_10000baseT_Full;
  446. if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
  447. val |= SUPPORTED_1000baseT_Full;
  448. break;
  449. case PHY_TYPE_BASET_10GB:
  450. val |= SUPPORTED_TP;
  451. if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
  452. val |= SUPPORTED_10000baseT_Full;
  453. if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
  454. val |= SUPPORTED_1000baseT_Full;
  455. if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
  456. val |= SUPPORTED_100baseT_Full;
  457. break;
  458. default:
  459. val |= SUPPORTED_TP;
  460. }
  461. return val;
  462. }
  463. static int convert_to_et_speed(u32 be_speed)
  464. {
  465. int et_speed = SPEED_10000;
  466. switch (be_speed) {
  467. case PHY_LINK_SPEED_10MBPS:
  468. et_speed = SPEED_10;
  469. break;
  470. case PHY_LINK_SPEED_100MBPS:
  471. et_speed = SPEED_100;
  472. break;
  473. case PHY_LINK_SPEED_1GBPS:
  474. et_speed = SPEED_1000;
  475. break;
  476. case PHY_LINK_SPEED_10GBPS:
  477. et_speed = SPEED_10000;
  478. break;
  479. }
  480. return et_speed;
  481. }
  482. bool be_pause_supported(struct be_adapter *adapter)
  483. {
  484. return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
  485. adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
  486. false : true;
  487. }
  488. static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
  489. {
  490. struct be_adapter *adapter = netdev_priv(netdev);
  491. u8 port_speed = 0;
  492. u16 link_speed = 0;
  493. u8 link_status;
  494. u32 et_speed = 0;
  495. int status;
  496. if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
  497. if (adapter->phy.forced_port_speed < 0) {
  498. status = be_cmd_link_status_query(adapter, &port_speed,
  499. &link_speed, &link_status, 0);
  500. if (!status)
  501. be_link_status_update(adapter, link_status);
  502. if (link_speed)
  503. et_speed = link_speed * 10;
  504. else if (link_status)
  505. et_speed = convert_to_et_speed(port_speed);
  506. } else {
  507. et_speed = adapter->phy.forced_port_speed;
  508. }
  509. ethtool_cmd_speed_set(ecmd, et_speed);
  510. status = be_cmd_get_phy_info(adapter);
  511. if (status)
  512. return status;
  513. ecmd->supported =
  514. convert_to_et_setting(adapter->phy.interface_type,
  515. adapter->phy.auto_speeds_supported |
  516. adapter->phy.fixed_speeds_supported);
  517. ecmd->advertising =
  518. convert_to_et_setting(adapter->phy.interface_type,
  519. adapter->phy.auto_speeds_supported);
  520. ecmd->port = be_get_port_type(adapter->phy.interface_type,
  521. adapter->phy.dac_cable_len);
  522. if (adapter->phy.auto_speeds_supported) {
  523. ecmd->supported |= SUPPORTED_Autoneg;
  524. ecmd->autoneg = AUTONEG_ENABLE;
  525. ecmd->advertising |= ADVERTISED_Autoneg;
  526. }
  527. if (be_pause_supported(adapter)) {
  528. ecmd->supported |= SUPPORTED_Pause;
  529. ecmd->advertising |= ADVERTISED_Pause;
  530. }
  531. switch (adapter->phy.interface_type) {
  532. case PHY_TYPE_KR_10GB:
  533. case PHY_TYPE_KX4_10GB:
  534. ecmd->transceiver = XCVR_INTERNAL;
  535. break;
  536. default:
  537. ecmd->transceiver = XCVR_EXTERNAL;
  538. break;
  539. }
  540. /* Save for future use */
  541. adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
  542. adapter->phy.port_type = ecmd->port;
  543. adapter->phy.transceiver = ecmd->transceiver;
  544. adapter->phy.autoneg = ecmd->autoneg;
  545. adapter->phy.advertising = ecmd->advertising;
  546. adapter->phy.supported = ecmd->supported;
  547. } else {
  548. ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
  549. ecmd->port = adapter->phy.port_type;
  550. ecmd->transceiver = adapter->phy.transceiver;
  551. ecmd->autoneg = adapter->phy.autoneg;
  552. ecmd->advertising = adapter->phy.advertising;
  553. ecmd->supported = adapter->phy.supported;
  554. }
  555. ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
  556. ecmd->phy_address = adapter->port_num;
  557. return 0;
  558. }
  559. static void be_get_ringparam(struct net_device *netdev,
  560. struct ethtool_ringparam *ring)
  561. {
  562. struct be_adapter *adapter = netdev_priv(netdev);
  563. ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
  564. ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
  565. }
  566. static void
  567. be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
  568. {
  569. struct be_adapter *adapter = netdev_priv(netdev);
  570. be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
  571. ecmd->autoneg = adapter->phy.fc_autoneg;
  572. }
  573. static int
  574. be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
  575. {
  576. struct be_adapter *adapter = netdev_priv(netdev);
  577. int status;
  578. if (ecmd->autoneg != adapter->phy.fc_autoneg)
  579. return -EINVAL;
  580. adapter->tx_fc = ecmd->tx_pause;
  581. adapter->rx_fc = ecmd->rx_pause;
  582. status = be_cmd_set_flow_control(adapter,
  583. adapter->tx_fc, adapter->rx_fc);
  584. if (status)
  585. dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
  586. return status;
  587. }
  588. static int
  589. be_set_phys_id(struct net_device *netdev,
  590. enum ethtool_phys_id_state state)
  591. {
  592. struct be_adapter *adapter = netdev_priv(netdev);
  593. switch (state) {
  594. case ETHTOOL_ID_ACTIVE:
  595. be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
  596. &adapter->beacon_state);
  597. return 1; /* cycle on/off once per second */
  598. case ETHTOOL_ID_ON:
  599. be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
  600. BEACON_STATE_ENABLED);
  601. break;
  602. case ETHTOOL_ID_OFF:
  603. be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
  604. BEACON_STATE_DISABLED);
  605. break;
  606. case ETHTOOL_ID_INACTIVE:
  607. be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
  608. adapter->beacon_state);
  609. }
  610. return 0;
  611. }
  612. static void
  613. be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  614. {
  615. struct be_adapter *adapter = netdev_priv(netdev);
  616. if (be_is_wol_supported(adapter)) {
  617. wol->supported |= WAKE_MAGIC;
  618. wol->wolopts |= WAKE_MAGIC;
  619. } else
  620. wol->wolopts = 0;
  621. memset(&wol->sopass, 0, sizeof(wol->sopass));
  622. }
  623. static int
  624. be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  625. {
  626. struct be_adapter *adapter = netdev_priv(netdev);
  627. if (wol->wolopts & ~WAKE_MAGIC)
  628. return -EOPNOTSUPP;
  629. if (!be_is_wol_supported(adapter)) {
  630. dev_warn(&adapter->pdev->dev, "WOL not supported\n");
  631. return -EOPNOTSUPP;
  632. }
  633. if (wol->wolopts & WAKE_MAGIC)
  634. adapter->wol = true;
  635. else
  636. adapter->wol = false;
  637. return 0;
  638. }
  639. static int
  640. be_test_ddr_dma(struct be_adapter *adapter)
  641. {
  642. int ret, i;
  643. struct be_dma_mem ddrdma_cmd;
  644. static const u64 pattern[2] = {
  645. 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
  646. };
  647. ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
  648. ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
  649. &ddrdma_cmd.dma, GFP_KERNEL);
  650. if (!ddrdma_cmd.va) {
  651. dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
  652. return -ENOMEM;
  653. }
  654. for (i = 0; i < 2; i++) {
  655. ret = be_cmd_ddr_dma_test(adapter, pattern[i],
  656. 4096, &ddrdma_cmd);
  657. if (ret != 0)
  658. goto err;
  659. }
  660. err:
  661. dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
  662. ddrdma_cmd.dma);
  663. return ret;
  664. }
  665. static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
  666. u64 *status)
  667. {
  668. be_cmd_set_loopback(adapter, adapter->hba_port_num,
  669. loopback_type, 1);
  670. *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
  671. loopback_type, 1500,
  672. 2, 0xabc);
  673. be_cmd_set_loopback(adapter, adapter->hba_port_num,
  674. BE_NO_LOOPBACK, 1);
  675. return *status;
  676. }
  677. static void
  678. be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
  679. {
  680. struct be_adapter *adapter = netdev_priv(netdev);
  681. u8 mac_speed = 0;
  682. u16 qos_link_speed = 0;
  683. memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
  684. if (test->flags & ETH_TEST_FL_OFFLINE) {
  685. if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
  686. &data[0]) != 0) {
  687. test->flags |= ETH_TEST_FL_FAILED;
  688. }
  689. if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
  690. &data[1]) != 0) {
  691. test->flags |= ETH_TEST_FL_FAILED;
  692. }
  693. if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
  694. &data[2]) != 0) {
  695. test->flags |= ETH_TEST_FL_FAILED;
  696. }
  697. }
  698. if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
  699. data[3] = 1;
  700. test->flags |= ETH_TEST_FL_FAILED;
  701. }
  702. if (be_cmd_link_status_query(adapter, &mac_speed,
  703. &qos_link_speed, NULL, 0) != 0) {
  704. test->flags |= ETH_TEST_FL_FAILED;
  705. data[4] = -1;
  706. } else if (!mac_speed) {
  707. test->flags |= ETH_TEST_FL_FAILED;
  708. data[4] = 1;
  709. }
  710. }
  711. static int
  712. be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
  713. {
  714. struct be_adapter *adapter = netdev_priv(netdev);
  715. return be_load_fw(adapter, efl->data);
  716. }
  717. static int
  718. be_get_eeprom_len(struct net_device *netdev)
  719. {
  720. struct be_adapter *adapter = netdev_priv(netdev);
  721. if (lancer_chip(adapter)) {
  722. if (be_physfn(adapter))
  723. return lancer_cmd_get_file_len(adapter,
  724. LANCER_VPD_PF_FILE);
  725. else
  726. return lancer_cmd_get_file_len(adapter,
  727. LANCER_VPD_VF_FILE);
  728. } else {
  729. return BE_READ_SEEPROM_LEN;
  730. }
  731. }
  732. static int
  733. be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
  734. uint8_t *data)
  735. {
  736. struct be_adapter *adapter = netdev_priv(netdev);
  737. struct be_dma_mem eeprom_cmd;
  738. struct be_cmd_resp_seeprom_read *resp;
  739. int status;
  740. if (!eeprom->len)
  741. return -EINVAL;
  742. if (lancer_chip(adapter)) {
  743. if (be_physfn(adapter))
  744. return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
  745. eeprom->len, data);
  746. else
  747. return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
  748. eeprom->len, data);
  749. }
  750. eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
  751. memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
  752. eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
  753. eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
  754. &eeprom_cmd.dma, GFP_KERNEL);
  755. if (!eeprom_cmd.va) {
  756. dev_err(&adapter->pdev->dev,
  757. "Memory allocation failure. Could not read eeprom\n");
  758. return -ENOMEM;
  759. }
  760. status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
  761. if (!status) {
  762. resp = eeprom_cmd.va;
  763. memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
  764. }
  765. dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
  766. eeprom_cmd.dma);
  767. return status;
  768. }
  769. static u32 be_get_msg_level(struct net_device *netdev)
  770. {
  771. struct be_adapter *adapter = netdev_priv(netdev);
  772. if (lancer_chip(adapter)) {
  773. dev_err(&adapter->pdev->dev, "Operation not supported\n");
  774. return -EOPNOTSUPP;
  775. }
  776. return adapter->msg_enable;
  777. }
  778. static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
  779. {
  780. struct be_dma_mem extfat_cmd;
  781. struct be_fat_conf_params *cfgs;
  782. int status;
  783. int i, j;
  784. memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
  785. extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
  786. extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
  787. &extfat_cmd.dma);
  788. if (!extfat_cmd.va) {
  789. dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
  790. __func__);
  791. goto err;
  792. }
  793. status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
  794. if (!status) {
  795. cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
  796. sizeof(struct be_cmd_resp_hdr));
  797. for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
  798. u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
  799. for (j = 0; j < num_modes; j++) {
  800. if (cfgs->module[i].trace_lvl[j].mode ==
  801. MODE_UART)
  802. cfgs->module[i].trace_lvl[j].dbg_lvl =
  803. cpu_to_le32(level);
  804. }
  805. }
  806. status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
  807. cfgs);
  808. if (status)
  809. dev_err(&adapter->pdev->dev,
  810. "Message level set failed\n");
  811. } else {
  812. dev_err(&adapter->pdev->dev, "Message level get failed\n");
  813. }
  814. pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
  815. extfat_cmd.dma);
  816. err:
  817. return;
  818. }
  819. static void be_set_msg_level(struct net_device *netdev, u32 level)
  820. {
  821. struct be_adapter *adapter = netdev_priv(netdev);
  822. if (lancer_chip(adapter)) {
  823. dev_err(&adapter->pdev->dev, "Operation not supported\n");
  824. return;
  825. }
  826. if (adapter->msg_enable == level)
  827. return;
  828. if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
  829. be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
  830. FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
  831. adapter->msg_enable = level;
  832. return;
  833. }
  834. const struct ethtool_ops be_ethtool_ops = {
  835. .get_settings = be_get_settings,
  836. .get_drvinfo = be_get_drvinfo,
  837. .get_wol = be_get_wol,
  838. .set_wol = be_set_wol,
  839. .get_link = ethtool_op_get_link,
  840. .get_eeprom_len = be_get_eeprom_len,
  841. .get_eeprom = be_read_eeprom,
  842. .get_coalesce = be_get_coalesce,
  843. .set_coalesce = be_set_coalesce,
  844. .get_ringparam = be_get_ringparam,
  845. .get_pauseparam = be_get_pauseparam,
  846. .set_pauseparam = be_set_pauseparam,
  847. .get_strings = be_get_stat_strings,
  848. .set_phys_id = be_set_phys_id,
  849. .get_msglevel = be_get_msg_level,
  850. .set_msglevel = be_set_msg_level,
  851. .get_sset_count = be_get_sset_count,
  852. .get_ethtool_stats = be_get_ethtool_stats,
  853. .get_regs_len = be_get_reg_len,
  854. .get_regs = be_get_regs,
  855. .flash_device = be_do_flash,
  856. .self_test = be_self_test,
  857. };