e1000_ethtool.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975
  1. /*******************************************************************************
  2. Intel PRO/1000 Linux driver
  3. Copyright(c) 1999 - 2006 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. /* ethtool support for e1000 */
  22. #include "e1000.h"
  23. #include <asm/uaccess.h>
  24. extern char e1000_driver_name[];
  25. extern char e1000_driver_version[];
  26. extern int e1000_up(struct e1000_adapter *adapter);
  27. extern void e1000_down(struct e1000_adapter *adapter);
  28. extern void e1000_reinit_locked(struct e1000_adapter *adapter);
  29. extern void e1000_reset(struct e1000_adapter *adapter);
  30. extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
  31. extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  32. extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  33. extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  34. extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  35. extern void e1000_update_stats(struct e1000_adapter *adapter);
  36. struct e1000_stats {
  37. char stat_string[ETH_GSTRING_LEN];
  38. int sizeof_stat;
  39. int stat_offset;
  40. };
  41. #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
  42. offsetof(struct e1000_adapter, m)
  43. static const struct e1000_stats e1000_gstrings_stats[] = {
  44. { "rx_packets", E1000_STAT(stats.gprc) },
  45. { "tx_packets", E1000_STAT(stats.gptc) },
  46. { "rx_bytes", E1000_STAT(stats.gorcl) },
  47. { "tx_bytes", E1000_STAT(stats.gotcl) },
  48. { "rx_broadcast", E1000_STAT(stats.bprc) },
  49. { "tx_broadcast", E1000_STAT(stats.bptc) },
  50. { "rx_multicast", E1000_STAT(stats.mprc) },
  51. { "tx_multicast", E1000_STAT(stats.mptc) },
  52. { "rx_errors", E1000_STAT(stats.rxerrc) },
  53. { "tx_errors", E1000_STAT(stats.txerrc) },
  54. { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
  55. { "multicast", E1000_STAT(stats.mprc) },
  56. { "collisions", E1000_STAT(stats.colc) },
  57. { "rx_length_errors", E1000_STAT(stats.rlerrc) },
  58. { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
  59. { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
  60. { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
  61. { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
  62. { "rx_missed_errors", E1000_STAT(stats.mpc) },
  63. { "tx_aborted_errors", E1000_STAT(stats.ecol) },
  64. { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
  65. { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
  66. { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
  67. { "tx_window_errors", E1000_STAT(stats.latecol) },
  68. { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
  69. { "tx_deferred_ok", E1000_STAT(stats.dc) },
  70. { "tx_single_coll_ok", E1000_STAT(stats.scc) },
  71. { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
  72. { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
  73. { "rx_long_length_errors", E1000_STAT(stats.roc) },
  74. { "rx_short_length_errors", E1000_STAT(stats.ruc) },
  75. { "rx_align_errors", E1000_STAT(stats.algnerrc) },
  76. { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
  77. { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
  78. { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
  79. { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
  80. { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
  81. { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
  82. { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
  83. { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
  84. { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
  85. { "rx_header_split", E1000_STAT(rx_hdr_split) },
  86. { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
  87. };
  88. #define E1000_QUEUE_STATS_LEN 0
  89. #define E1000_GLOBAL_STATS_LEN \
  90. sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
  91. #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
  92. static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
  93. "Register test (offline)", "Eeprom test (offline)",
  94. "Interrupt test (offline)", "Loopback test (offline)",
  95. "Link test (on/offline)"
  96. };
  97. #define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
  98. static int
  99. e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
  100. {
  101. struct e1000_adapter *adapter = netdev_priv(netdev);
  102. struct e1000_hw *hw = &adapter->hw;
  103. if (hw->media_type == e1000_media_type_copper) {
  104. ecmd->supported = (SUPPORTED_10baseT_Half |
  105. SUPPORTED_10baseT_Full |
  106. SUPPORTED_100baseT_Half |
  107. SUPPORTED_100baseT_Full |
  108. SUPPORTED_1000baseT_Full|
  109. SUPPORTED_Autoneg |
  110. SUPPORTED_TP);
  111. if (hw->phy_type == e1000_phy_ife)
  112. ecmd->supported &= ~SUPPORTED_1000baseT_Full;
  113. ecmd->advertising = ADVERTISED_TP;
  114. if (hw->autoneg == 1) {
  115. ecmd->advertising |= ADVERTISED_Autoneg;
  116. /* the e1000 autoneg seems to match ethtool nicely */
  117. ecmd->advertising |= hw->autoneg_advertised;
  118. }
  119. ecmd->port = PORT_TP;
  120. ecmd->phy_address = hw->phy_addr;
  121. if (hw->mac_type == e1000_82543)
  122. ecmd->transceiver = XCVR_EXTERNAL;
  123. else
  124. ecmd->transceiver = XCVR_INTERNAL;
  125. } else {
  126. ecmd->supported = (SUPPORTED_1000baseT_Full |
  127. SUPPORTED_FIBRE |
  128. SUPPORTED_Autoneg);
  129. ecmd->advertising = (ADVERTISED_1000baseT_Full |
  130. ADVERTISED_FIBRE |
  131. ADVERTISED_Autoneg);
  132. ecmd->port = PORT_FIBRE;
  133. if (hw->mac_type >= e1000_82545)
  134. ecmd->transceiver = XCVR_INTERNAL;
  135. else
  136. ecmd->transceiver = XCVR_EXTERNAL;
  137. }
  138. if (netif_carrier_ok(adapter->netdev)) {
  139. e1000_get_speed_and_duplex(hw, &adapter->link_speed,
  140. &adapter->link_duplex);
  141. ecmd->speed = adapter->link_speed;
  142. /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
  143. * and HALF_DUPLEX != DUPLEX_HALF */
  144. if (adapter->link_duplex == FULL_DUPLEX)
  145. ecmd->duplex = DUPLEX_FULL;
  146. else
  147. ecmd->duplex = DUPLEX_HALF;
  148. } else {
  149. ecmd->speed = -1;
  150. ecmd->duplex = -1;
  151. }
  152. ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
  153. hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
  154. return 0;
  155. }
  156. static int
  157. e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
  158. {
  159. struct e1000_adapter *adapter = netdev_priv(netdev);
  160. struct e1000_hw *hw = &adapter->hw;
  161. /* When SoL/IDER sessions are active, autoneg/speed/duplex
  162. * cannot be changed */
  163. if (e1000_check_phy_reset_block(hw)) {
  164. DPRINTK(DRV, ERR, "Cannot change link characteristics "
  165. "when SoL/IDER is active.\n");
  166. return -EINVAL;
  167. }
  168. while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  169. msleep(1);
  170. if (ecmd->autoneg == AUTONEG_ENABLE) {
  171. hw->autoneg = 1;
  172. if (hw->media_type == e1000_media_type_fiber)
  173. hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
  174. ADVERTISED_FIBRE |
  175. ADVERTISED_Autoneg;
  176. else
  177. hw->autoneg_advertised = ecmd->advertising |
  178. ADVERTISED_TP |
  179. ADVERTISED_Autoneg;
  180. ecmd->advertising = hw->autoneg_advertised;
  181. } else
  182. if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
  183. clear_bit(__E1000_RESETTING, &adapter->flags);
  184. return -EINVAL;
  185. }
  186. /* reset the link */
  187. if (netif_running(adapter->netdev)) {
  188. e1000_down(adapter);
  189. e1000_up(adapter);
  190. } else
  191. e1000_reset(adapter);
  192. clear_bit(__E1000_RESETTING, &adapter->flags);
  193. return 0;
  194. }
  195. static void
  196. e1000_get_pauseparam(struct net_device *netdev,
  197. struct ethtool_pauseparam *pause)
  198. {
  199. struct e1000_adapter *adapter = netdev_priv(netdev);
  200. struct e1000_hw *hw = &adapter->hw;
  201. pause->autoneg =
  202. (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
  203. if (hw->fc == E1000_FC_RX_PAUSE)
  204. pause->rx_pause = 1;
  205. else if (hw->fc == E1000_FC_TX_PAUSE)
  206. pause->tx_pause = 1;
  207. else if (hw->fc == E1000_FC_FULL) {
  208. pause->rx_pause = 1;
  209. pause->tx_pause = 1;
  210. }
  211. }
  212. static int
  213. e1000_set_pauseparam(struct net_device *netdev,
  214. struct ethtool_pauseparam *pause)
  215. {
  216. struct e1000_adapter *adapter = netdev_priv(netdev);
  217. struct e1000_hw *hw = &adapter->hw;
  218. int retval = 0;
  219. adapter->fc_autoneg = pause->autoneg;
  220. while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  221. msleep(1);
  222. if (pause->rx_pause && pause->tx_pause)
  223. hw->fc = E1000_FC_FULL;
  224. else if (pause->rx_pause && !pause->tx_pause)
  225. hw->fc = E1000_FC_RX_PAUSE;
  226. else if (!pause->rx_pause && pause->tx_pause)
  227. hw->fc = E1000_FC_TX_PAUSE;
  228. else if (!pause->rx_pause && !pause->tx_pause)
  229. hw->fc = E1000_FC_NONE;
  230. hw->original_fc = hw->fc;
  231. if (adapter->fc_autoneg == AUTONEG_ENABLE) {
  232. if (netif_running(adapter->netdev)) {
  233. e1000_down(adapter);
  234. e1000_up(adapter);
  235. } else
  236. e1000_reset(adapter);
  237. } else
  238. retval = ((hw->media_type == e1000_media_type_fiber) ?
  239. e1000_setup_link(hw) : e1000_force_mac_fc(hw));
  240. clear_bit(__E1000_RESETTING, &adapter->flags);
  241. return retval;
  242. }
  243. static uint32_t
  244. e1000_get_rx_csum(struct net_device *netdev)
  245. {
  246. struct e1000_adapter *adapter = netdev_priv(netdev);
  247. return adapter->rx_csum;
  248. }
  249. static int
  250. e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
  251. {
  252. struct e1000_adapter *adapter = netdev_priv(netdev);
  253. adapter->rx_csum = data;
  254. if (netif_running(netdev))
  255. e1000_reinit_locked(adapter);
  256. else
  257. e1000_reset(adapter);
  258. return 0;
  259. }
  260. static uint32_t
  261. e1000_get_tx_csum(struct net_device *netdev)
  262. {
  263. return (netdev->features & NETIF_F_HW_CSUM) != 0;
  264. }
  265. static int
  266. e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
  267. {
  268. struct e1000_adapter *adapter = netdev_priv(netdev);
  269. if (adapter->hw.mac_type < e1000_82543) {
  270. if (!data)
  271. return -EINVAL;
  272. return 0;
  273. }
  274. if (data)
  275. netdev->features |= NETIF_F_HW_CSUM;
  276. else
  277. netdev->features &= ~NETIF_F_HW_CSUM;
  278. return 0;
  279. }
  280. #ifdef NETIF_F_TSO
  281. static int
  282. e1000_set_tso(struct net_device *netdev, uint32_t data)
  283. {
  284. struct e1000_adapter *adapter = netdev_priv(netdev);
  285. if ((adapter->hw.mac_type < e1000_82544) ||
  286. (adapter->hw.mac_type == e1000_82547))
  287. return data ? -EINVAL : 0;
  288. if (data)
  289. netdev->features |= NETIF_F_TSO;
  290. else
  291. netdev->features &= ~NETIF_F_TSO;
  292. DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
  293. adapter->tso_force = TRUE;
  294. return 0;
  295. }
  296. #endif /* NETIF_F_TSO */
  297. static uint32_t
  298. e1000_get_msglevel(struct net_device *netdev)
  299. {
  300. struct e1000_adapter *adapter = netdev_priv(netdev);
  301. return adapter->msg_enable;
  302. }
  303. static void
  304. e1000_set_msglevel(struct net_device *netdev, uint32_t data)
  305. {
  306. struct e1000_adapter *adapter = netdev_priv(netdev);
  307. adapter->msg_enable = data;
  308. }
  309. static int
  310. e1000_get_regs_len(struct net_device *netdev)
  311. {
  312. #define E1000_REGS_LEN 32
  313. return E1000_REGS_LEN * sizeof(uint32_t);
  314. }
  315. static void
  316. e1000_get_regs(struct net_device *netdev,
  317. struct ethtool_regs *regs, void *p)
  318. {
  319. struct e1000_adapter *adapter = netdev_priv(netdev);
  320. struct e1000_hw *hw = &adapter->hw;
  321. uint32_t *regs_buff = p;
  322. uint16_t phy_data;
  323. memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t));
  324. regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
  325. regs_buff[0] = E1000_READ_REG(hw, CTRL);
  326. regs_buff[1] = E1000_READ_REG(hw, STATUS);
  327. regs_buff[2] = E1000_READ_REG(hw, RCTL);
  328. regs_buff[3] = E1000_READ_REG(hw, RDLEN);
  329. regs_buff[4] = E1000_READ_REG(hw, RDH);
  330. regs_buff[5] = E1000_READ_REG(hw, RDT);
  331. regs_buff[6] = E1000_READ_REG(hw, RDTR);
  332. regs_buff[7] = E1000_READ_REG(hw, TCTL);
  333. regs_buff[8] = E1000_READ_REG(hw, TDLEN);
  334. regs_buff[9] = E1000_READ_REG(hw, TDH);
  335. regs_buff[10] = E1000_READ_REG(hw, TDT);
  336. regs_buff[11] = E1000_READ_REG(hw, TIDV);
  337. regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
  338. if (hw->phy_type == e1000_phy_igp) {
  339. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
  340. IGP01E1000_PHY_AGC_A);
  341. e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
  342. IGP01E1000_PHY_PAGE_SELECT, &phy_data);
  343. regs_buff[13] = (uint32_t)phy_data; /* cable length */
  344. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
  345. IGP01E1000_PHY_AGC_B);
  346. e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
  347. IGP01E1000_PHY_PAGE_SELECT, &phy_data);
  348. regs_buff[14] = (uint32_t)phy_data; /* cable length */
  349. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
  350. IGP01E1000_PHY_AGC_C);
  351. e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
  352. IGP01E1000_PHY_PAGE_SELECT, &phy_data);
  353. regs_buff[15] = (uint32_t)phy_data; /* cable length */
  354. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
  355. IGP01E1000_PHY_AGC_D);
  356. e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
  357. IGP01E1000_PHY_PAGE_SELECT, &phy_data);
  358. regs_buff[16] = (uint32_t)phy_data; /* cable length */
  359. regs_buff[17] = 0; /* extended 10bt distance (not needed) */
  360. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
  361. e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
  362. IGP01E1000_PHY_PAGE_SELECT, &phy_data);
  363. regs_buff[18] = (uint32_t)phy_data; /* cable polarity */
  364. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
  365. IGP01E1000_PHY_PCS_INIT_REG);
  366. e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
  367. IGP01E1000_PHY_PAGE_SELECT, &phy_data);
  368. regs_buff[19] = (uint32_t)phy_data; /* cable polarity */
  369. regs_buff[20] = 0; /* polarity correction enabled (always) */
  370. regs_buff[22] = 0; /* phy receive errors (unavailable) */
  371. regs_buff[23] = regs_buff[18]; /* mdix mode */
  372. e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
  373. } else {
  374. e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
  375. regs_buff[13] = (uint32_t)phy_data; /* cable length */
  376. regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
  377. regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
  378. regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
  379. e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
  380. regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
  381. regs_buff[18] = regs_buff[13]; /* cable polarity */
  382. regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
  383. regs_buff[20] = regs_buff[17]; /* polarity correction */
  384. /* phy receive errors */
  385. regs_buff[22] = adapter->phy_stats.receive_errors;
  386. regs_buff[23] = regs_buff[13]; /* mdix mode */
  387. }
  388. regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
  389. e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
  390. regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
  391. regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
  392. if (hw->mac_type >= e1000_82540 &&
  393. hw->media_type == e1000_media_type_copper) {
  394. regs_buff[26] = E1000_READ_REG(hw, MANC);
  395. }
  396. }
  397. static int
  398. e1000_get_eeprom_len(struct net_device *netdev)
  399. {
  400. struct e1000_adapter *adapter = netdev_priv(netdev);
  401. return adapter->hw.eeprom.word_size * 2;
  402. }
  403. static int
  404. e1000_get_eeprom(struct net_device *netdev,
  405. struct ethtool_eeprom *eeprom, uint8_t *bytes)
  406. {
  407. struct e1000_adapter *adapter = netdev_priv(netdev);
  408. struct e1000_hw *hw = &adapter->hw;
  409. uint16_t *eeprom_buff;
  410. int first_word, last_word;
  411. int ret_val = 0;
  412. uint16_t i;
  413. if (eeprom->len == 0)
  414. return -EINVAL;
  415. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  416. first_word = eeprom->offset >> 1;
  417. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  418. eeprom_buff = kmalloc(sizeof(uint16_t) *
  419. (last_word - first_word + 1), GFP_KERNEL);
  420. if (!eeprom_buff)
  421. return -ENOMEM;
  422. if (hw->eeprom.type == e1000_eeprom_spi)
  423. ret_val = e1000_read_eeprom(hw, first_word,
  424. last_word - first_word + 1,
  425. eeprom_buff);
  426. else {
  427. for (i = 0; i < last_word - first_word + 1; i++)
  428. if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
  429. &eeprom_buff[i])))
  430. break;
  431. }
  432. /* Device's eeprom is always little-endian, word addressable */
  433. for (i = 0; i < last_word - first_word + 1; i++)
  434. le16_to_cpus(&eeprom_buff[i]);
  435. memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
  436. eeprom->len);
  437. kfree(eeprom_buff);
  438. return ret_val;
  439. }
  440. static int
  441. e1000_set_eeprom(struct net_device *netdev,
  442. struct ethtool_eeprom *eeprom, uint8_t *bytes)
  443. {
  444. struct e1000_adapter *adapter = netdev_priv(netdev);
  445. struct e1000_hw *hw = &adapter->hw;
  446. uint16_t *eeprom_buff;
  447. void *ptr;
  448. int max_len, first_word, last_word, ret_val = 0;
  449. uint16_t i;
  450. if (eeprom->len == 0)
  451. return -EOPNOTSUPP;
  452. if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  453. return -EFAULT;
  454. max_len = hw->eeprom.word_size * 2;
  455. first_word = eeprom->offset >> 1;
  456. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  457. eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  458. if (!eeprom_buff)
  459. return -ENOMEM;
  460. ptr = (void *)eeprom_buff;
  461. if (eeprom->offset & 1) {
  462. /* need read/modify/write of first changed EEPROM word */
  463. /* only the second byte of the word is being modified */
  464. ret_val = e1000_read_eeprom(hw, first_word, 1,
  465. &eeprom_buff[0]);
  466. ptr++;
  467. }
  468. if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
  469. /* need read/modify/write of last changed EEPROM word */
  470. /* only the first byte of the word is being modified */
  471. ret_val = e1000_read_eeprom(hw, last_word, 1,
  472. &eeprom_buff[last_word - first_word]);
  473. }
  474. /* Device's eeprom is always little-endian, word addressable */
  475. for (i = 0; i < last_word - first_word + 1; i++)
  476. le16_to_cpus(&eeprom_buff[i]);
  477. memcpy(ptr, bytes, eeprom->len);
  478. for (i = 0; i < last_word - first_word + 1; i++)
  479. eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
  480. ret_val = e1000_write_eeprom(hw, first_word,
  481. last_word - first_word + 1, eeprom_buff);
  482. /* Update the checksum over the first part of the EEPROM if needed
  483. * and flush shadow RAM for 82573 conrollers */
  484. if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
  485. (hw->mac_type == e1000_82573)))
  486. e1000_update_eeprom_checksum(hw);
  487. kfree(eeprom_buff);
  488. return ret_val;
  489. }
  490. static void
  491. e1000_get_drvinfo(struct net_device *netdev,
  492. struct ethtool_drvinfo *drvinfo)
  493. {
  494. struct e1000_adapter *adapter = netdev_priv(netdev);
  495. char firmware_version[32];
  496. uint16_t eeprom_data;
  497. strncpy(drvinfo->driver, e1000_driver_name, 32);
  498. strncpy(drvinfo->version, e1000_driver_version, 32);
  499. /* EEPROM image version # is reported as firmware version # for
  500. * 8257{1|2|3} controllers */
  501. e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
  502. switch (adapter->hw.mac_type) {
  503. case e1000_82571:
  504. case e1000_82572:
  505. case e1000_82573:
  506. case e1000_80003es2lan:
  507. case e1000_ich8lan:
  508. sprintf(firmware_version, "%d.%d-%d",
  509. (eeprom_data & 0xF000) >> 12,
  510. (eeprom_data & 0x0FF0) >> 4,
  511. eeprom_data & 0x000F);
  512. break;
  513. default:
  514. sprintf(firmware_version, "N/A");
  515. }
  516. strncpy(drvinfo->fw_version, firmware_version, 32);
  517. strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
  518. drvinfo->n_stats = E1000_STATS_LEN;
  519. drvinfo->testinfo_len = E1000_TEST_LEN;
  520. drvinfo->regdump_len = e1000_get_regs_len(netdev);
  521. drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
  522. }
  523. static void
  524. e1000_get_ringparam(struct net_device *netdev,
  525. struct ethtool_ringparam *ring)
  526. {
  527. struct e1000_adapter *adapter = netdev_priv(netdev);
  528. e1000_mac_type mac_type = adapter->hw.mac_type;
  529. struct e1000_tx_ring *txdr = adapter->tx_ring;
  530. struct e1000_rx_ring *rxdr = adapter->rx_ring;
  531. ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
  532. E1000_MAX_82544_RXD;
  533. ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
  534. E1000_MAX_82544_TXD;
  535. ring->rx_mini_max_pending = 0;
  536. ring->rx_jumbo_max_pending = 0;
  537. ring->rx_pending = rxdr->count;
  538. ring->tx_pending = txdr->count;
  539. ring->rx_mini_pending = 0;
  540. ring->rx_jumbo_pending = 0;
  541. }
  542. static int
  543. e1000_set_ringparam(struct net_device *netdev,
  544. struct ethtool_ringparam *ring)
  545. {
  546. struct e1000_adapter *adapter = netdev_priv(netdev);
  547. e1000_mac_type mac_type = adapter->hw.mac_type;
  548. struct e1000_tx_ring *txdr, *tx_old;
  549. struct e1000_rx_ring *rxdr, *rx_old;
  550. int i, err, tx_ring_size, rx_ring_size;
  551. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  552. return -EINVAL;
  553. tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
  554. rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
  555. while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  556. msleep(1);
  557. if (netif_running(adapter->netdev))
  558. e1000_down(adapter);
  559. tx_old = adapter->tx_ring;
  560. rx_old = adapter->rx_ring;
  561. err = -ENOMEM;
  562. txdr = kzalloc(tx_ring_size, GFP_KERNEL);
  563. if (!txdr)
  564. goto err_alloc_tx;
  565. rxdr = kzalloc(rx_ring_size, GFP_KERNEL);
  566. if (!rxdr)
  567. goto err_alloc_rx;
  568. adapter->tx_ring = txdr;
  569. adapter->rx_ring = rxdr;
  570. rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
  571. rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
  572. E1000_MAX_RXD : E1000_MAX_82544_RXD));
  573. E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
  574. txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
  575. txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
  576. E1000_MAX_TXD : E1000_MAX_82544_TXD));
  577. E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
  578. for (i = 0; i < adapter->num_tx_queues; i++)
  579. txdr[i].count = txdr->count;
  580. for (i = 0; i < adapter->num_rx_queues; i++)
  581. rxdr[i].count = rxdr->count;
  582. if (netif_running(adapter->netdev)) {
  583. /* Try to get new resources before deleting old */
  584. if ((err = e1000_setup_all_rx_resources(adapter)))
  585. goto err_setup_rx;
  586. if ((err = e1000_setup_all_tx_resources(adapter)))
  587. goto err_setup_tx;
  588. /* save the new, restore the old in order to free it,
  589. * then restore the new back again */
  590. adapter->rx_ring = rx_old;
  591. adapter->tx_ring = tx_old;
  592. e1000_free_all_rx_resources(adapter);
  593. e1000_free_all_tx_resources(adapter);
  594. kfree(tx_old);
  595. kfree(rx_old);
  596. adapter->rx_ring = rxdr;
  597. adapter->tx_ring = txdr;
  598. if ((err = e1000_up(adapter)))
  599. goto err_setup;
  600. }
  601. clear_bit(__E1000_RESETTING, &adapter->flags);
  602. return 0;
  603. err_setup_tx:
  604. e1000_free_all_rx_resources(adapter);
  605. err_setup_rx:
  606. adapter->rx_ring = rx_old;
  607. adapter->tx_ring = tx_old;
  608. kfree(rxdr);
  609. err_alloc_rx:
  610. kfree(txdr);
  611. err_alloc_tx:
  612. e1000_up(adapter);
  613. err_setup:
  614. clear_bit(__E1000_RESETTING, &adapter->flags);
  615. return err;
  616. }
  617. #define REG_PATTERN_TEST(R, M, W) \
  618. { \
  619. uint32_t pat, value; \
  620. uint32_t test[] = \
  621. {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
  622. for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
  623. E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
  624. value = E1000_READ_REG(&adapter->hw, R); \
  625. if (value != (test[pat] & W & M)) { \
  626. DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
  627. "0x%08X expected 0x%08X\n", \
  628. E1000_##R, value, (test[pat] & W & M)); \
  629. *data = (adapter->hw.mac_type < e1000_82543) ? \
  630. E1000_82542_##R : E1000_##R; \
  631. return 1; \
  632. } \
  633. } \
  634. }
  635. #define REG_SET_AND_CHECK(R, M, W) \
  636. { \
  637. uint32_t value; \
  638. E1000_WRITE_REG(&adapter->hw, R, W & M); \
  639. value = E1000_READ_REG(&adapter->hw, R); \
  640. if ((W & M) != (value & M)) { \
  641. DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
  642. "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
  643. *data = (adapter->hw.mac_type < e1000_82543) ? \
  644. E1000_82542_##R : E1000_##R; \
  645. return 1; \
  646. } \
  647. }
  648. static int
  649. e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
  650. {
  651. uint32_t value, before, after;
  652. uint32_t i, toggle;
  653. /* The status register is Read Only, so a write should fail.
  654. * Some bits that get toggled are ignored.
  655. */
  656. switch (adapter->hw.mac_type) {
  657. /* there are several bits on newer hardware that are r/w */
  658. case e1000_82571:
  659. case e1000_82572:
  660. case e1000_80003es2lan:
  661. toggle = 0x7FFFF3FF;
  662. break;
  663. case e1000_82573:
  664. case e1000_ich8lan:
  665. toggle = 0x7FFFF033;
  666. break;
  667. default:
  668. toggle = 0xFFFFF833;
  669. break;
  670. }
  671. before = E1000_READ_REG(&adapter->hw, STATUS);
  672. value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
  673. E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
  674. after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
  675. if (value != after) {
  676. DPRINTK(DRV, ERR, "failed STATUS register test got: "
  677. "0x%08X expected: 0x%08X\n", after, value);
  678. *data = 1;
  679. return 1;
  680. }
  681. /* restore previous status */
  682. E1000_WRITE_REG(&adapter->hw, STATUS, before);
  683. if (adapter->hw.mac_type != e1000_ich8lan) {
  684. REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
  685. REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
  686. REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
  687. REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
  688. }
  689. REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
  690. REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
  691. REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
  692. REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
  693. REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
  694. REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
  695. REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
  696. REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
  697. REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
  698. REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
  699. REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
  700. before = (adapter->hw.mac_type == e1000_ich8lan ?
  701. 0x06C3B33E : 0x06DFB3FE);
  702. REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
  703. REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
  704. if (adapter->hw.mac_type >= e1000_82543) {
  705. REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
  706. REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
  707. if (adapter->hw.mac_type != e1000_ich8lan)
  708. REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
  709. REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
  710. REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
  711. value = (adapter->hw.mac_type == e1000_ich8lan ?
  712. E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
  713. for (i = 0; i < value; i++) {
  714. REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
  715. 0xFFFFFFFF);
  716. }
  717. } else {
  718. REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
  719. REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
  720. REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
  721. REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
  722. }
  723. value = (adapter->hw.mac_type == e1000_ich8lan ?
  724. E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
  725. for (i = 0; i < value; i++)
  726. REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
  727. *data = 0;
  728. return 0;
  729. }
  730. static int
  731. e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
  732. {
  733. uint16_t temp;
  734. uint16_t checksum = 0;
  735. uint16_t i;
  736. *data = 0;
  737. /* Read and add up the contents of the EEPROM */
  738. for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
  739. if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
  740. *data = 1;
  741. break;
  742. }
  743. checksum += temp;
  744. }
  745. /* If Checksum is not Correct return error else test passed */
  746. if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
  747. *data = 2;
  748. return *data;
  749. }
  750. static irqreturn_t
  751. e1000_test_intr(int irq,
  752. void *data)
  753. {
  754. struct net_device *netdev = (struct net_device *) data;
  755. struct e1000_adapter *adapter = netdev_priv(netdev);
  756. adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR);
  757. return IRQ_HANDLED;
  758. }
  759. static int
  760. e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
  761. {
  762. struct net_device *netdev = adapter->netdev;
  763. uint32_t mask, i=0, shared_int = TRUE;
  764. uint32_t irq = adapter->pdev->irq;
  765. *data = 0;
  766. /* NOTE: we don't test MSI interrupts here, yet */
  767. /* Hook up test interrupt handler just for this test */
  768. if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
  769. netdev->name, netdev))
  770. shared_int = FALSE;
  771. else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
  772. netdev->name, netdev)) {
  773. *data = 1;
  774. return -1;
  775. }
  776. DPRINTK(HW, INFO, "testing %s interrupt\n",
  777. (shared_int ? "shared" : "unshared"));
  778. /* Disable all the interrupts */
  779. E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
  780. msleep(10);
  781. /* Test each interrupt */
  782. for (; i < 10; i++) {
  783. if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
  784. continue;
  785. /* Interrupt to test */
  786. mask = 1 << i;
  787. if (!shared_int) {
  788. /* Disable the interrupt to be reported in
  789. * the cause register and then force the same
  790. * interrupt and see if one gets posted. If
  791. * an interrupt was posted to the bus, the
  792. * test failed.
  793. */
  794. adapter->test_icr = 0;
  795. E1000_WRITE_REG(&adapter->hw, IMC, mask);
  796. E1000_WRITE_REG(&adapter->hw, ICS, mask);
  797. msleep(10);
  798. if (adapter->test_icr & mask) {
  799. *data = 3;
  800. break;
  801. }
  802. }
  803. /* Enable the interrupt to be reported in
  804. * the cause register and then force the same
  805. * interrupt and see if one gets posted. If
  806. * an interrupt was not posted to the bus, the
  807. * test failed.
  808. */
  809. adapter->test_icr = 0;
  810. E1000_WRITE_REG(&adapter->hw, IMS, mask);
  811. E1000_WRITE_REG(&adapter->hw, ICS, mask);
  812. msleep(10);
  813. if (!(adapter->test_icr & mask)) {
  814. *data = 4;
  815. break;
  816. }
  817. if (!shared_int) {
  818. /* Disable the other interrupts to be reported in
  819. * the cause register and then force the other
  820. * interrupts and see if any get posted. If
  821. * an interrupt was posted to the bus, the
  822. * test failed.
  823. */
  824. adapter->test_icr = 0;
  825. E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
  826. E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
  827. msleep(10);
  828. if (adapter->test_icr) {
  829. *data = 5;
  830. break;
  831. }
  832. }
  833. }
  834. /* Disable all the interrupts */
  835. E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
  836. msleep(10);
  837. /* Unhook test interrupt handler */
  838. free_irq(irq, netdev);
  839. return *data;
  840. }
  841. static void
  842. e1000_free_desc_rings(struct e1000_adapter *adapter)
  843. {
  844. struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
  845. struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
  846. struct pci_dev *pdev = adapter->pdev;
  847. int i;
  848. if (txdr->desc && txdr->buffer_info) {
  849. for (i = 0; i < txdr->count; i++) {
  850. if (txdr->buffer_info[i].dma)
  851. pci_unmap_single(pdev, txdr->buffer_info[i].dma,
  852. txdr->buffer_info[i].length,
  853. PCI_DMA_TODEVICE);
  854. if (txdr->buffer_info[i].skb)
  855. dev_kfree_skb(txdr->buffer_info[i].skb);
  856. }
  857. }
  858. if (rxdr->desc && rxdr->buffer_info) {
  859. for (i = 0; i < rxdr->count; i++) {
  860. if (rxdr->buffer_info[i].dma)
  861. pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
  862. rxdr->buffer_info[i].length,
  863. PCI_DMA_FROMDEVICE);
  864. if (rxdr->buffer_info[i].skb)
  865. dev_kfree_skb(rxdr->buffer_info[i].skb);
  866. }
  867. }
  868. if (txdr->desc) {
  869. pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
  870. txdr->desc = NULL;
  871. }
  872. if (rxdr->desc) {
  873. pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
  874. rxdr->desc = NULL;
  875. }
  876. kfree(txdr->buffer_info);
  877. txdr->buffer_info = NULL;
  878. kfree(rxdr->buffer_info);
  879. rxdr->buffer_info = NULL;
  880. return;
  881. }
  882. static int
  883. e1000_setup_desc_rings(struct e1000_adapter *adapter)
  884. {
  885. struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
  886. struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
  887. struct pci_dev *pdev = adapter->pdev;
  888. uint32_t rctl;
  889. int size, i, ret_val;
  890. /* Setup Tx descriptor ring and Tx buffers */
  891. if (!txdr->count)
  892. txdr->count = E1000_DEFAULT_TXD;
  893. size = txdr->count * sizeof(struct e1000_buffer);
  894. if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
  895. ret_val = 1;
  896. goto err_nomem;
  897. }
  898. memset(txdr->buffer_info, 0, size);
  899. txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
  900. E1000_ROUNDUP(txdr->size, 4096);
  901. if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
  902. ret_val = 2;
  903. goto err_nomem;
  904. }
  905. memset(txdr->desc, 0, txdr->size);
  906. txdr->next_to_use = txdr->next_to_clean = 0;
  907. E1000_WRITE_REG(&adapter->hw, TDBAL,
  908. ((uint64_t) txdr->dma & 0x00000000FFFFFFFF));
  909. E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32));
  910. E1000_WRITE_REG(&adapter->hw, TDLEN,
  911. txdr->count * sizeof(struct e1000_tx_desc));
  912. E1000_WRITE_REG(&adapter->hw, TDH, 0);
  913. E1000_WRITE_REG(&adapter->hw, TDT, 0);
  914. E1000_WRITE_REG(&adapter->hw, TCTL,
  915. E1000_TCTL_PSP | E1000_TCTL_EN |
  916. E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
  917. E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
  918. for (i = 0; i < txdr->count; i++) {
  919. struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
  920. struct sk_buff *skb;
  921. unsigned int size = 1024;
  922. if (!(skb = alloc_skb(size, GFP_KERNEL))) {
  923. ret_val = 3;
  924. goto err_nomem;
  925. }
  926. skb_put(skb, size);
  927. txdr->buffer_info[i].skb = skb;
  928. txdr->buffer_info[i].length = skb->len;
  929. txdr->buffer_info[i].dma =
  930. pci_map_single(pdev, skb->data, skb->len,
  931. PCI_DMA_TODEVICE);
  932. tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
  933. tx_desc->lower.data = cpu_to_le32(skb->len);
  934. tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
  935. E1000_TXD_CMD_IFCS |
  936. E1000_TXD_CMD_RPS);
  937. tx_desc->upper.data = 0;
  938. }
  939. /* Setup Rx descriptor ring and Rx buffers */
  940. if (!rxdr->count)
  941. rxdr->count = E1000_DEFAULT_RXD;
  942. size = rxdr->count * sizeof(struct e1000_buffer);
  943. if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
  944. ret_val = 4;
  945. goto err_nomem;
  946. }
  947. memset(rxdr->buffer_info, 0, size);
  948. rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
  949. if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
  950. ret_val = 5;
  951. goto err_nomem;
  952. }
  953. memset(rxdr->desc, 0, rxdr->size);
  954. rxdr->next_to_use = rxdr->next_to_clean = 0;
  955. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  956. E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
  957. E1000_WRITE_REG(&adapter->hw, RDBAL,
  958. ((uint64_t) rxdr->dma & 0xFFFFFFFF));
  959. E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32));
  960. E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
  961. E1000_WRITE_REG(&adapter->hw, RDH, 0);
  962. E1000_WRITE_REG(&adapter->hw, RDT, 0);
  963. rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
  964. E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
  965. (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
  966. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  967. for (i = 0; i < rxdr->count; i++) {
  968. struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
  969. struct sk_buff *skb;
  970. if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
  971. GFP_KERNEL))) {
  972. ret_val = 6;
  973. goto err_nomem;
  974. }
  975. skb_reserve(skb, NET_IP_ALIGN);
  976. rxdr->buffer_info[i].skb = skb;
  977. rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
  978. rxdr->buffer_info[i].dma =
  979. pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
  980. PCI_DMA_FROMDEVICE);
  981. rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
  982. memset(skb->data, 0x00, skb->len);
  983. }
  984. return 0;
  985. err_nomem:
  986. e1000_free_desc_rings(adapter);
  987. return ret_val;
  988. }
  989. static void
  990. e1000_phy_disable_receiver(struct e1000_adapter *adapter)
  991. {
  992. /* Write out to PHY registers 29 and 30 to disable the Receiver. */
  993. e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
  994. e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
  995. e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
  996. e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
  997. }
  998. static void
  999. e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
  1000. {
  1001. uint16_t phy_reg;
  1002. /* Because we reset the PHY above, we need to re-force TX_CLK in the
  1003. * Extended PHY Specific Control Register to 25MHz clock. This
  1004. * value defaults back to a 2.5MHz clock when the PHY is reset.
  1005. */
  1006. e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
  1007. phy_reg |= M88E1000_EPSCR_TX_CLK_25;
  1008. e1000_write_phy_reg(&adapter->hw,
  1009. M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
  1010. /* In addition, because of the s/w reset above, we need to enable
  1011. * CRS on TX. This must be set for both full and half duplex
  1012. * operation.
  1013. */
  1014. e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
  1015. phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
  1016. e1000_write_phy_reg(&adapter->hw,
  1017. M88E1000_PHY_SPEC_CTRL, phy_reg);
  1018. }
  1019. static int
  1020. e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
  1021. {
  1022. uint32_t ctrl_reg;
  1023. uint16_t phy_reg;
  1024. /* Setup the Device Control Register for PHY loopback test. */
  1025. ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
  1026. ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
  1027. E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
  1028. E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
  1029. E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
  1030. E1000_CTRL_FD); /* Force Duplex to FULL */
  1031. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
  1032. /* Read the PHY Specific Control Register (0x10) */
  1033. e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
  1034. /* Clear Auto-Crossover bits in PHY Specific Control Register
  1035. * (bits 6:5).
  1036. */
  1037. phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
  1038. e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
  1039. /* Perform software reset on the PHY */
  1040. e1000_phy_reset(&adapter->hw);
  1041. /* Have to setup TX_CLK and TX_CRS after software reset */
  1042. e1000_phy_reset_clk_and_crs(adapter);
  1043. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8100);
  1044. /* Wait for reset to complete. */
  1045. udelay(500);
  1046. /* Have to setup TX_CLK and TX_CRS after software reset */
  1047. e1000_phy_reset_clk_and_crs(adapter);
  1048. /* Write out to PHY registers 29 and 30 to disable the Receiver. */
  1049. e1000_phy_disable_receiver(adapter);
  1050. /* Set the loopback bit in the PHY control register. */
  1051. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
  1052. phy_reg |= MII_CR_LOOPBACK;
  1053. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
  1054. /* Setup TX_CLK and TX_CRS one more time. */
  1055. e1000_phy_reset_clk_and_crs(adapter);
  1056. /* Check Phy Configuration */
  1057. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
  1058. if (phy_reg != 0x4100)
  1059. return 9;
  1060. e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
  1061. if (phy_reg != 0x0070)
  1062. return 10;
  1063. e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
  1064. if (phy_reg != 0x001A)
  1065. return 11;
  1066. return 0;
  1067. }
  1068. static int
  1069. e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
  1070. {
  1071. uint32_t ctrl_reg = 0;
  1072. uint32_t stat_reg = 0;
  1073. adapter->hw.autoneg = FALSE;
  1074. if (adapter->hw.phy_type == e1000_phy_m88) {
  1075. /* Auto-MDI/MDIX Off */
  1076. e1000_write_phy_reg(&adapter->hw,
  1077. M88E1000_PHY_SPEC_CTRL, 0x0808);
  1078. /* reset to update Auto-MDI/MDIX */
  1079. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
  1080. /* autoneg off */
  1081. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
  1082. } else if (adapter->hw.phy_type == e1000_phy_gg82563)
  1083. e1000_write_phy_reg(&adapter->hw,
  1084. GG82563_PHY_KMRN_MODE_CTRL,
  1085. 0x1CC);
  1086. ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
  1087. if (adapter->hw.phy_type == e1000_phy_ife) {
  1088. /* force 100, set loopback */
  1089. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100);
  1090. /* Now set up the MAC to the same speed/duplex as the PHY. */
  1091. ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
  1092. ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
  1093. E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
  1094. E1000_CTRL_SPD_100 |/* Force Speed to 100 */
  1095. E1000_CTRL_FD); /* Force Duplex to FULL */
  1096. } else {
  1097. /* force 1000, set loopback */
  1098. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
  1099. /* Now set up the MAC to the same speed/duplex as the PHY. */
  1100. ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
  1101. ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
  1102. ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
  1103. E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
  1104. E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
  1105. E1000_CTRL_FD); /* Force Duplex to FULL */
  1106. }
  1107. if (adapter->hw.media_type == e1000_media_type_copper &&
  1108. adapter->hw.phy_type == e1000_phy_m88)
  1109. ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
  1110. else {
  1111. /* Set the ILOS bit on the fiber Nic is half
  1112. * duplex link is detected. */
  1113. stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
  1114. if ((stat_reg & E1000_STATUS_FD) == 0)
  1115. ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
  1116. }
  1117. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
  1118. /* Disable the receiver on the PHY so when a cable is plugged in, the
  1119. * PHY does not begin to autoneg when a cable is reconnected to the NIC.
  1120. */
  1121. if (adapter->hw.phy_type == e1000_phy_m88)
  1122. e1000_phy_disable_receiver(adapter);
  1123. udelay(500);
  1124. return 0;
  1125. }
  1126. static int
  1127. e1000_set_phy_loopback(struct e1000_adapter *adapter)
  1128. {
  1129. uint16_t phy_reg = 0;
  1130. uint16_t count = 0;
  1131. switch (adapter->hw.mac_type) {
  1132. case e1000_82543:
  1133. if (adapter->hw.media_type == e1000_media_type_copper) {
  1134. /* Attempt to setup Loopback mode on Non-integrated PHY.
  1135. * Some PHY registers get corrupted at random, so
  1136. * attempt this 10 times.
  1137. */
  1138. while (e1000_nonintegrated_phy_loopback(adapter) &&
  1139. count++ < 10);
  1140. if (count < 11)
  1141. return 0;
  1142. }
  1143. break;
  1144. case e1000_82544:
  1145. case e1000_82540:
  1146. case e1000_82545:
  1147. case e1000_82545_rev_3:
  1148. case e1000_82546:
  1149. case e1000_82546_rev_3:
  1150. case e1000_82541:
  1151. case e1000_82541_rev_2:
  1152. case e1000_82547:
  1153. case e1000_82547_rev_2:
  1154. case e1000_82571:
  1155. case e1000_82572:
  1156. case e1000_82573:
  1157. case e1000_80003es2lan:
  1158. case e1000_ich8lan:
  1159. return e1000_integrated_phy_loopback(adapter);
  1160. break;
  1161. default:
  1162. /* Default PHY loopback work is to read the MII
  1163. * control register and assert bit 14 (loopback mode).
  1164. */
  1165. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
  1166. phy_reg |= MII_CR_LOOPBACK;
  1167. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
  1168. return 0;
  1169. break;
  1170. }
  1171. return 8;
  1172. }
  1173. static int
  1174. e1000_setup_loopback_test(struct e1000_adapter *adapter)
  1175. {
  1176. struct e1000_hw *hw = &adapter->hw;
  1177. uint32_t rctl;
  1178. if (hw->media_type == e1000_media_type_fiber ||
  1179. hw->media_type == e1000_media_type_internal_serdes) {
  1180. switch (hw->mac_type) {
  1181. case e1000_82545:
  1182. case e1000_82546:
  1183. case e1000_82545_rev_3:
  1184. case e1000_82546_rev_3:
  1185. return e1000_set_phy_loopback(adapter);
  1186. break;
  1187. case e1000_82571:
  1188. case e1000_82572:
  1189. #define E1000_SERDES_LB_ON 0x410
  1190. e1000_set_phy_loopback(adapter);
  1191. E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_ON);
  1192. msleep(10);
  1193. return 0;
  1194. break;
  1195. default:
  1196. rctl = E1000_READ_REG(hw, RCTL);
  1197. rctl |= E1000_RCTL_LBM_TCVR;
  1198. E1000_WRITE_REG(hw, RCTL, rctl);
  1199. return 0;
  1200. }
  1201. } else if (hw->media_type == e1000_media_type_copper)
  1202. return e1000_set_phy_loopback(adapter);
  1203. return 7;
  1204. }
  1205. static void
  1206. e1000_loopback_cleanup(struct e1000_adapter *adapter)
  1207. {
  1208. struct e1000_hw *hw = &adapter->hw;
  1209. uint32_t rctl;
  1210. uint16_t phy_reg;
  1211. rctl = E1000_READ_REG(hw, RCTL);
  1212. rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
  1213. E1000_WRITE_REG(hw, RCTL, rctl);
  1214. switch (hw->mac_type) {
  1215. case e1000_82571:
  1216. case e1000_82572:
  1217. if (hw->media_type == e1000_media_type_fiber ||
  1218. hw->media_type == e1000_media_type_internal_serdes) {
  1219. #define E1000_SERDES_LB_OFF 0x400
  1220. E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
  1221. msleep(10);
  1222. break;
  1223. }
  1224. /* Fall Through */
  1225. case e1000_82545:
  1226. case e1000_82546:
  1227. case e1000_82545_rev_3:
  1228. case e1000_82546_rev_3:
  1229. default:
  1230. hw->autoneg = TRUE;
  1231. if (hw->phy_type == e1000_phy_gg82563)
  1232. e1000_write_phy_reg(hw,
  1233. GG82563_PHY_KMRN_MODE_CTRL,
  1234. 0x180);
  1235. e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
  1236. if (phy_reg & MII_CR_LOOPBACK) {
  1237. phy_reg &= ~MII_CR_LOOPBACK;
  1238. e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
  1239. e1000_phy_reset(hw);
  1240. }
  1241. break;
  1242. }
  1243. }
  1244. static void
  1245. e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
  1246. {
  1247. memset(skb->data, 0xFF, frame_size);
  1248. frame_size &= ~1;
  1249. memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
  1250. memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
  1251. memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
  1252. }
  1253. static int
  1254. e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
  1255. {
  1256. frame_size &= ~1;
  1257. if (*(skb->data + 3) == 0xFF) {
  1258. if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
  1259. (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
  1260. return 0;
  1261. }
  1262. }
  1263. return 13;
  1264. }
  1265. static int
  1266. e1000_run_loopback_test(struct e1000_adapter *adapter)
  1267. {
  1268. struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
  1269. struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
  1270. struct pci_dev *pdev = adapter->pdev;
  1271. int i, j, k, l, lc, good_cnt, ret_val=0;
  1272. unsigned long time;
  1273. E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
  1274. /* Calculate the loop count based on the largest descriptor ring
  1275. * The idea is to wrap the largest ring a number of times using 64
  1276. * send/receive pairs during each loop
  1277. */
  1278. if (rxdr->count <= txdr->count)
  1279. lc = ((txdr->count / 64) * 2) + 1;
  1280. else
  1281. lc = ((rxdr->count / 64) * 2) + 1;
  1282. k = l = 0;
  1283. for (j = 0; j <= lc; j++) { /* loop count loop */
  1284. for (i = 0; i < 64; i++) { /* send the packets */
  1285. e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
  1286. 1024);
  1287. pci_dma_sync_single_for_device(pdev,
  1288. txdr->buffer_info[k].dma,
  1289. txdr->buffer_info[k].length,
  1290. PCI_DMA_TODEVICE);
  1291. if (unlikely(++k == txdr->count)) k = 0;
  1292. }
  1293. E1000_WRITE_REG(&adapter->hw, TDT, k);
  1294. msleep(200);
  1295. time = jiffies; /* set the start time for the receive */
  1296. good_cnt = 0;
  1297. do { /* receive the sent packets */
  1298. pci_dma_sync_single_for_cpu(pdev,
  1299. rxdr->buffer_info[l].dma,
  1300. rxdr->buffer_info[l].length,
  1301. PCI_DMA_FROMDEVICE);
  1302. ret_val = e1000_check_lbtest_frame(
  1303. rxdr->buffer_info[l].skb,
  1304. 1024);
  1305. if (!ret_val)
  1306. good_cnt++;
  1307. if (unlikely(++l == rxdr->count)) l = 0;
  1308. /* time + 20 msecs (200 msecs on 2.4) is more than
  1309. * enough time to complete the receives, if it's
  1310. * exceeded, break and error off
  1311. */
  1312. } while (good_cnt < 64 && jiffies < (time + 20));
  1313. if (good_cnt != 64) {
  1314. ret_val = 13; /* ret_val is the same as mis-compare */
  1315. break;
  1316. }
  1317. if (jiffies >= (time + 2)) {
  1318. ret_val = 14; /* error code for time out error */
  1319. break;
  1320. }
  1321. } /* end loop count loop */
  1322. return ret_val;
  1323. }
  1324. static int
  1325. e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
  1326. {
  1327. /* PHY loopback cannot be performed if SoL/IDER
  1328. * sessions are active */
  1329. if (e1000_check_phy_reset_block(&adapter->hw)) {
  1330. DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
  1331. "when SoL/IDER is active.\n");
  1332. *data = 0;
  1333. goto out;
  1334. }
  1335. if ((*data = e1000_setup_desc_rings(adapter)))
  1336. goto out;
  1337. if ((*data = e1000_setup_loopback_test(adapter)))
  1338. goto err_loopback;
  1339. *data = e1000_run_loopback_test(adapter);
  1340. e1000_loopback_cleanup(adapter);
  1341. err_loopback:
  1342. e1000_free_desc_rings(adapter);
  1343. out:
  1344. return *data;
  1345. }
  1346. static int
  1347. e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
  1348. {
  1349. *data = 0;
  1350. if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
  1351. int i = 0;
  1352. adapter->hw.serdes_link_down = TRUE;
  1353. /* On some blade server designs, link establishment
  1354. * could take as long as 2-3 minutes */
  1355. do {
  1356. e1000_check_for_link(&adapter->hw);
  1357. if (adapter->hw.serdes_link_down == FALSE)
  1358. return *data;
  1359. msleep(20);
  1360. } while (i++ < 3750);
  1361. *data = 1;
  1362. } else {
  1363. e1000_check_for_link(&adapter->hw);
  1364. if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
  1365. msleep(4000);
  1366. if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
  1367. *data = 1;
  1368. }
  1369. }
  1370. return *data;
  1371. }
  1372. static int
  1373. e1000_diag_test_count(struct net_device *netdev)
  1374. {
  1375. return E1000_TEST_LEN;
  1376. }
  1377. extern void e1000_power_up_phy(struct e1000_adapter *);
  1378. static void
  1379. e1000_diag_test(struct net_device *netdev,
  1380. struct ethtool_test *eth_test, uint64_t *data)
  1381. {
  1382. struct e1000_adapter *adapter = netdev_priv(netdev);
  1383. boolean_t if_running = netif_running(netdev);
  1384. set_bit(__E1000_TESTING, &adapter->flags);
  1385. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  1386. /* Offline tests */
  1387. /* save speed, duplex, autoneg settings */
  1388. uint16_t autoneg_advertised = adapter->hw.autoneg_advertised;
  1389. uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
  1390. uint8_t autoneg = adapter->hw.autoneg;
  1391. DPRINTK(HW, INFO, "offline testing starting\n");
  1392. /* Link test performed before hardware reset so autoneg doesn't
  1393. * interfere with test result */
  1394. if (e1000_link_test(adapter, &data[4]))
  1395. eth_test->flags |= ETH_TEST_FL_FAILED;
  1396. if (if_running)
  1397. /* indicate we're in test mode */
  1398. dev_close(netdev);
  1399. else
  1400. e1000_reset(adapter);
  1401. if (e1000_reg_test(adapter, &data[0]))
  1402. eth_test->flags |= ETH_TEST_FL_FAILED;
  1403. e1000_reset(adapter);
  1404. if (e1000_eeprom_test(adapter, &data[1]))
  1405. eth_test->flags |= ETH_TEST_FL_FAILED;
  1406. e1000_reset(adapter);
  1407. if (e1000_intr_test(adapter, &data[2]))
  1408. eth_test->flags |= ETH_TEST_FL_FAILED;
  1409. e1000_reset(adapter);
  1410. /* make sure the phy is powered up */
  1411. e1000_power_up_phy(adapter);
  1412. if (e1000_loopback_test(adapter, &data[3]))
  1413. eth_test->flags |= ETH_TEST_FL_FAILED;
  1414. /* restore speed, duplex, autoneg settings */
  1415. adapter->hw.autoneg_advertised = autoneg_advertised;
  1416. adapter->hw.forced_speed_duplex = forced_speed_duplex;
  1417. adapter->hw.autoneg = autoneg;
  1418. e1000_reset(adapter);
  1419. clear_bit(__E1000_TESTING, &adapter->flags);
  1420. if (if_running)
  1421. dev_open(netdev);
  1422. } else {
  1423. DPRINTK(HW, INFO, "online testing starting\n");
  1424. /* Online tests */
  1425. if (e1000_link_test(adapter, &data[4]))
  1426. eth_test->flags |= ETH_TEST_FL_FAILED;
  1427. /* Offline tests aren't run; pass by default */
  1428. data[0] = 0;
  1429. data[1] = 0;
  1430. data[2] = 0;
  1431. data[3] = 0;
  1432. clear_bit(__E1000_TESTING, &adapter->flags);
  1433. }
  1434. msleep_interruptible(4 * 1000);
  1435. }
  1436. static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
  1437. {
  1438. struct e1000_hw *hw = &adapter->hw;
  1439. int retval = 1; /* fail by default */
  1440. switch (hw->device_id) {
  1441. case E1000_DEV_ID_82543GC_FIBER:
  1442. case E1000_DEV_ID_82543GC_COPPER:
  1443. case E1000_DEV_ID_82544EI_FIBER:
  1444. case E1000_DEV_ID_82546EB_QUAD_COPPER:
  1445. case E1000_DEV_ID_82545EM_FIBER:
  1446. case E1000_DEV_ID_82545EM_COPPER:
  1447. case E1000_DEV_ID_82546GB_QUAD_COPPER:
  1448. case E1000_DEV_ID_82546GB_PCIE:
  1449. /* these don't support WoL at all */
  1450. wol->supported = 0;
  1451. break;
  1452. case E1000_DEV_ID_82546EB_FIBER:
  1453. case E1000_DEV_ID_82546GB_FIBER:
  1454. case E1000_DEV_ID_82571EB_FIBER:
  1455. case E1000_DEV_ID_82571EB_SERDES:
  1456. case E1000_DEV_ID_82571EB_COPPER:
  1457. /* Wake events not supported on port B */
  1458. if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
  1459. wol->supported = 0;
  1460. break;
  1461. }
  1462. /* return success for non excluded adapter ports */
  1463. retval = 0;
  1464. break;
  1465. case E1000_DEV_ID_82571EB_QUAD_COPPER:
  1466. case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
  1467. /* quad port adapters only support WoL on port A */
  1468. if (!adapter->quad_port_a) {
  1469. wol->supported = 0;
  1470. break;
  1471. }
  1472. /* return success for non excluded adapter ports */
  1473. retval = 0;
  1474. break;
  1475. default:
  1476. /* dual port cards only support WoL on port A from now on
  1477. * unless it was enabled in the eeprom for port B
  1478. * so exclude FUNC_1 ports from having WoL enabled */
  1479. if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 &&
  1480. !adapter->eeprom_wol) {
  1481. wol->supported = 0;
  1482. break;
  1483. }
  1484. retval = 0;
  1485. }
  1486. return retval;
  1487. }
  1488. static void
  1489. e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  1490. {
  1491. struct e1000_adapter *adapter = netdev_priv(netdev);
  1492. wol->supported = WAKE_UCAST | WAKE_MCAST |
  1493. WAKE_BCAST | WAKE_MAGIC;
  1494. wol->wolopts = 0;
  1495. /* this function will set ->supported = 0 and return 1 if wol is not
  1496. * supported by this hardware */
  1497. if (e1000_wol_exclusion(adapter, wol))
  1498. return;
  1499. /* apply any specific unsupported masks here */
  1500. switch (adapter->hw.device_id) {
  1501. case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
  1502. /* KSP3 does not suppport UCAST wake-ups */
  1503. wol->supported &= ~WAKE_UCAST;
  1504. if (adapter->wol & E1000_WUFC_EX)
  1505. DPRINTK(DRV, ERR, "Interface does not support "
  1506. "directed (unicast) frame wake-up packets\n");
  1507. break;
  1508. default:
  1509. break;
  1510. }
  1511. if (adapter->wol & E1000_WUFC_EX)
  1512. wol->wolopts |= WAKE_UCAST;
  1513. if (adapter->wol & E1000_WUFC_MC)
  1514. wol->wolopts |= WAKE_MCAST;
  1515. if (adapter->wol & E1000_WUFC_BC)
  1516. wol->wolopts |= WAKE_BCAST;
  1517. if (adapter->wol & E1000_WUFC_MAG)
  1518. wol->wolopts |= WAKE_MAGIC;
  1519. return;
  1520. }
  1521. static int
  1522. e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  1523. {
  1524. struct e1000_adapter *adapter = netdev_priv(netdev);
  1525. struct e1000_hw *hw = &adapter->hw;
  1526. if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
  1527. return -EOPNOTSUPP;
  1528. if (e1000_wol_exclusion(adapter, wol))
  1529. return wol->wolopts ? -EOPNOTSUPP : 0;
  1530. switch (hw->device_id) {
  1531. case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
  1532. if (wol->wolopts & WAKE_UCAST) {
  1533. DPRINTK(DRV, ERR, "Interface does not support "
  1534. "directed (unicast) frame wake-up packets\n");
  1535. return -EOPNOTSUPP;
  1536. }
  1537. break;
  1538. default:
  1539. break;
  1540. }
  1541. /* these settings will always override what we currently have */
  1542. adapter->wol = 0;
  1543. if (wol->wolopts & WAKE_UCAST)
  1544. adapter->wol |= E1000_WUFC_EX;
  1545. if (wol->wolopts & WAKE_MCAST)
  1546. adapter->wol |= E1000_WUFC_MC;
  1547. if (wol->wolopts & WAKE_BCAST)
  1548. adapter->wol |= E1000_WUFC_BC;
  1549. if (wol->wolopts & WAKE_MAGIC)
  1550. adapter->wol |= E1000_WUFC_MAG;
  1551. return 0;
  1552. }
  1553. /* toggle LED 4 times per second = 2 "blinks" per second */
  1554. #define E1000_ID_INTERVAL (HZ/4)
  1555. /* bit defines for adapter->led_status */
  1556. #define E1000_LED_ON 0
  1557. static void
  1558. e1000_led_blink_callback(unsigned long data)
  1559. {
  1560. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1561. if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
  1562. e1000_led_off(&adapter->hw);
  1563. else
  1564. e1000_led_on(&adapter->hw);
  1565. mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
  1566. }
  1567. static int
  1568. e1000_phys_id(struct net_device *netdev, uint32_t data)
  1569. {
  1570. struct e1000_adapter *adapter = netdev_priv(netdev);
  1571. if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
  1572. data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
  1573. if (adapter->hw.mac_type < e1000_82571) {
  1574. if (!adapter->blink_timer.function) {
  1575. init_timer(&adapter->blink_timer);
  1576. adapter->blink_timer.function = e1000_led_blink_callback;
  1577. adapter->blink_timer.data = (unsigned long) adapter;
  1578. }
  1579. e1000_setup_led(&adapter->hw);
  1580. mod_timer(&adapter->blink_timer, jiffies);
  1581. msleep_interruptible(data * 1000);
  1582. del_timer_sync(&adapter->blink_timer);
  1583. } else if (adapter->hw.phy_type == e1000_phy_ife) {
  1584. if (!adapter->blink_timer.function) {
  1585. init_timer(&adapter->blink_timer);
  1586. adapter->blink_timer.function = e1000_led_blink_callback;
  1587. adapter->blink_timer.data = (unsigned long) adapter;
  1588. }
  1589. mod_timer(&adapter->blink_timer, jiffies);
  1590. msleep_interruptible(data * 1000);
  1591. del_timer_sync(&adapter->blink_timer);
  1592. e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
  1593. } else {
  1594. e1000_blink_led_start(&adapter->hw);
  1595. msleep_interruptible(data * 1000);
  1596. }
  1597. e1000_led_off(&adapter->hw);
  1598. clear_bit(E1000_LED_ON, &adapter->led_status);
  1599. e1000_cleanup_led(&adapter->hw);
  1600. return 0;
  1601. }
  1602. static int
  1603. e1000_nway_reset(struct net_device *netdev)
  1604. {
  1605. struct e1000_adapter *adapter = netdev_priv(netdev);
  1606. if (netif_running(netdev))
  1607. e1000_reinit_locked(adapter);
  1608. return 0;
  1609. }
  1610. static int
  1611. e1000_get_stats_count(struct net_device *netdev)
  1612. {
  1613. return E1000_STATS_LEN;
  1614. }
  1615. static void
  1616. e1000_get_ethtool_stats(struct net_device *netdev,
  1617. struct ethtool_stats *stats, uint64_t *data)
  1618. {
  1619. struct e1000_adapter *adapter = netdev_priv(netdev);
  1620. int i;
  1621. e1000_update_stats(adapter);
  1622. for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
  1623. char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
  1624. data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
  1625. sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
  1626. }
  1627. /* BUG_ON(i != E1000_STATS_LEN); */
  1628. }
  1629. static void
  1630. e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
  1631. {
  1632. uint8_t *p = data;
  1633. int i;
  1634. switch (stringset) {
  1635. case ETH_SS_TEST:
  1636. memcpy(data, *e1000_gstrings_test,
  1637. E1000_TEST_LEN*ETH_GSTRING_LEN);
  1638. break;
  1639. case ETH_SS_STATS:
  1640. for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
  1641. memcpy(p, e1000_gstrings_stats[i].stat_string,
  1642. ETH_GSTRING_LEN);
  1643. p += ETH_GSTRING_LEN;
  1644. }
  1645. /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
  1646. break;
  1647. }
  1648. }
  1649. static const struct ethtool_ops e1000_ethtool_ops = {
  1650. .get_settings = e1000_get_settings,
  1651. .set_settings = e1000_set_settings,
  1652. .get_drvinfo = e1000_get_drvinfo,
  1653. .get_regs_len = e1000_get_regs_len,
  1654. .get_regs = e1000_get_regs,
  1655. .get_wol = e1000_get_wol,
  1656. .set_wol = e1000_set_wol,
  1657. .get_msglevel = e1000_get_msglevel,
  1658. .set_msglevel = e1000_set_msglevel,
  1659. .nway_reset = e1000_nway_reset,
  1660. .get_link = ethtool_op_get_link,
  1661. .get_eeprom_len = e1000_get_eeprom_len,
  1662. .get_eeprom = e1000_get_eeprom,
  1663. .set_eeprom = e1000_set_eeprom,
  1664. .get_ringparam = e1000_get_ringparam,
  1665. .set_ringparam = e1000_set_ringparam,
  1666. .get_pauseparam = e1000_get_pauseparam,
  1667. .set_pauseparam = e1000_set_pauseparam,
  1668. .get_rx_csum = e1000_get_rx_csum,
  1669. .set_rx_csum = e1000_set_rx_csum,
  1670. .get_tx_csum = e1000_get_tx_csum,
  1671. .set_tx_csum = e1000_set_tx_csum,
  1672. .get_sg = ethtool_op_get_sg,
  1673. .set_sg = ethtool_op_set_sg,
  1674. #ifdef NETIF_F_TSO
  1675. .get_tso = ethtool_op_get_tso,
  1676. .set_tso = e1000_set_tso,
  1677. #endif
  1678. .self_test_count = e1000_diag_test_count,
  1679. .self_test = e1000_diag_test,
  1680. .get_strings = e1000_get_strings,
  1681. .phys_id = e1000_phys_id,
  1682. .get_stats_count = e1000_get_stats_count,
  1683. .get_ethtool_stats = e1000_get_ethtool_stats,
  1684. .get_perm_addr = ethtool_op_get_perm_addr,
  1685. };
  1686. void e1000_set_ethtool_ops(struct net_device *netdev)
  1687. {
  1688. SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
  1689. }