xgmac.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "common.h"
  33. #include "regs.h"
  34. /*
  35. * # of exact address filters. The first one is used for the station address,
  36. * the rest are available for multicast addresses.
  37. */
  38. #define EXACT_ADDR_FILTERS 8
  39. static inline int macidx(const struct cmac *mac)
  40. {
  41. return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
  42. }
  43. static void xaui_serdes_reset(struct cmac *mac)
  44. {
  45. static const unsigned int clear[] = {
  46. F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
  47. F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
  48. };
  49. int i;
  50. struct adapter *adap = mac->adapter;
  51. u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
  52. t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
  53. F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
  54. F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
  55. F_RESETPLL23 | F_RESETPLL01);
  56. t3_read_reg(adap, ctrl);
  57. udelay(15);
  58. for (i = 0; i < ARRAY_SIZE(clear); i++) {
  59. t3_set_reg_field(adap, ctrl, clear[i], 0);
  60. udelay(15);
  61. }
  62. }
  63. void t3b_pcs_reset(struct cmac *mac)
  64. {
  65. t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
  66. F_PCS_RESET_, 0);
  67. udelay(20);
  68. t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
  69. F_PCS_RESET_);
  70. }
  71. int t3_mac_reset(struct cmac *mac)
  72. {
  73. static const struct addr_val_pair mac_reset_avp[] = {
  74. {A_XGM_TX_CTRL, 0},
  75. {A_XGM_RX_CTRL, 0},
  76. {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
  77. F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
  78. {A_XGM_RX_HASH_LOW, 0},
  79. {A_XGM_RX_HASH_HIGH, 0},
  80. {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
  81. {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
  82. {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
  83. {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
  84. {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
  85. {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
  86. {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
  87. {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
  88. {A_XGM_STAT_CTRL, F_CLRSTATS}
  89. };
  90. u32 val;
  91. struct adapter *adap = mac->adapter;
  92. unsigned int oft = mac->offset;
  93. t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
  94. t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
  95. t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
  96. t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
  97. F_RXSTRFRWRD | F_DISERRFRAMES,
  98. uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
  99. t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
  100. if (uses_xaui(adap)) {
  101. if (adap->params.rev == 0) {
  102. t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
  103. F_RXENABLE | F_TXENABLE);
  104. if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
  105. F_CMULOCK, 1, 5, 2)) {
  106. CH_ERR(adap,
  107. "MAC %d XAUI SERDES CMU lock failed\n",
  108. macidx(mac));
  109. return -1;
  110. }
  111. t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
  112. F_SERDESRESET_);
  113. } else
  114. xaui_serdes_reset(mac);
  115. }
  116. t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
  117. V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
  118. V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
  119. val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
  120. if (is_10G(adap))
  121. val |= F_PCS_RESET_;
  122. else if (uses_xaui(adap))
  123. val |= F_PCS_RESET_ | F_XG2G_RESET_;
  124. else
  125. val |= F_RGMII_RESET_ | F_XG2G_RESET_;
  126. t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
  127. t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
  128. if ((val & F_PCS_RESET_) && adap->params.rev) {
  129. msleep(1);
  130. t3b_pcs_reset(mac);
  131. }
  132. memset(&mac->stats, 0, sizeof(mac->stats));
  133. return 0;
  134. }
  135. static int t3b2_mac_reset(struct cmac *mac)
  136. {
  137. struct adapter *adap = mac->adapter;
  138. unsigned int oft = mac->offset, store;
  139. int idx = macidx(mac);
  140. u32 val;
  141. if (!macidx(mac))
  142. t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
  143. else
  144. t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
  145. /* Stop NIC traffic to reduce the number of TXTOGGLES */
  146. t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
  147. /* Ensure TX drains */
  148. t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
  149. t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
  150. t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
  151. /* Store A_TP_TX_DROP_CFG_CH0 */
  152. t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
  153. store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
  154. msleep(10);
  155. /* Change DROP_CFG to 0xc0000011 */
  156. t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
  157. t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
  158. /* Check for xgm Rx fifo empty */
  159. /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
  160. if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
  161. 0x80000000, 1, 1000, 2)) {
  162. CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
  163. macidx(mac));
  164. return -1;
  165. }
  166. t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
  167. t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
  168. val = F_MAC_RESET_;
  169. if (is_10G(adap))
  170. val |= F_PCS_RESET_;
  171. else if (uses_xaui(adap))
  172. val |= F_PCS_RESET_ | F_XG2G_RESET_;
  173. else
  174. val |= F_RGMII_RESET_ | F_XG2G_RESET_;
  175. t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
  176. t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
  177. if ((val & F_PCS_RESET_) && adap->params.rev) {
  178. msleep(1);
  179. t3b_pcs_reset(mac);
  180. }
  181. t3_write_reg(adap, A_XGM_RX_CFG + oft,
  182. F_DISPAUSEFRAMES | F_EN1536BFRAMES |
  183. F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
  184. /* Restore the DROP_CFG */
  185. t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
  186. t3_write_reg(adap, A_TP_PIO_DATA, store);
  187. if (!idx)
  188. t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
  189. else
  190. t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
  191. /* re-enable nic traffic */
  192. t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
  193. /* Set: re-enable NIC traffic */
  194. t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
  195. return 0;
  196. }
  197. /*
  198. * Set the exact match register 'idx' to recognize the given Ethernet address.
  199. */
  200. static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
  201. {
  202. u32 addr_lo, addr_hi;
  203. unsigned int oft = mac->offset + idx * 8;
  204. addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
  205. addr_hi = (addr[5] << 8) | addr[4];
  206. t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
  207. t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
  208. }
  209. /* Set one of the station's unicast MAC addresses. */
  210. int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
  211. {
  212. if (idx >= mac->nucast)
  213. return -EINVAL;
  214. set_addr_filter(mac, idx, addr);
  215. return 0;
  216. }
  217. /*
  218. * Specify the number of exact address filters that should be reserved for
  219. * unicast addresses. Caller should reload the unicast and multicast addresses
  220. * after calling this.
  221. */
  222. int t3_mac_set_num_ucast(struct cmac *mac, int n)
  223. {
  224. if (n > EXACT_ADDR_FILTERS)
  225. return -EINVAL;
  226. mac->nucast = n;
  227. return 0;
  228. }
  229. void t3_mac_disable_exact_filters(struct cmac *mac)
  230. {
  231. unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
  232. for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
  233. u32 v = t3_read_reg(mac->adapter, reg);
  234. t3_write_reg(mac->adapter, reg, v);
  235. }
  236. t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
  237. }
  238. void t3_mac_enable_exact_filters(struct cmac *mac)
  239. {
  240. unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
  241. for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
  242. u32 v = t3_read_reg(mac->adapter, reg);
  243. t3_write_reg(mac->adapter, reg, v);
  244. }
  245. t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
  246. }
  247. /* Calculate the RX hash filter index of an Ethernet address */
  248. static int hash_hw_addr(const u8 * addr)
  249. {
  250. int hash = 0, octet, bit, i = 0, c;
  251. for (octet = 0; octet < 6; ++octet)
  252. for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
  253. hash ^= (c & 1) << i;
  254. if (++i == 6)
  255. i = 0;
  256. }
  257. return hash;
  258. }
  259. int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
  260. {
  261. u32 val, hash_lo, hash_hi;
  262. struct adapter *adap = mac->adapter;
  263. unsigned int oft = mac->offset;
  264. val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
  265. if (rm->dev->flags & IFF_PROMISC)
  266. val |= F_COPYALLFRAMES;
  267. t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
  268. if (rm->dev->flags & IFF_ALLMULTI)
  269. hash_lo = hash_hi = 0xffffffff;
  270. else {
  271. u8 *addr;
  272. int exact_addr_idx = mac->nucast;
  273. hash_lo = hash_hi = 0;
  274. while ((addr = t3_get_next_mcaddr(rm)))
  275. if (exact_addr_idx < EXACT_ADDR_FILTERS)
  276. set_addr_filter(mac, exact_addr_idx++, addr);
  277. else {
  278. int hash = hash_hw_addr(addr);
  279. if (hash < 32)
  280. hash_lo |= (1 << hash);
  281. else
  282. hash_hi |= (1 << (hash - 32));
  283. }
  284. }
  285. t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
  286. t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
  287. return 0;
  288. }
  289. static int rx_fifo_hwm(int mtu)
  290. {
  291. int hwm;
  292. hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
  293. return min(hwm, MAC_RXFIFO_SIZE - 8192);
  294. }
  295. int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
  296. {
  297. int hwm, lwm, divisor;
  298. int ipg;
  299. unsigned int thres, v, reg;
  300. struct adapter *adap = mac->adapter;
  301. /*
  302. * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
  303. * packet size register includes header, but not FCS.
  304. */
  305. mtu += 14;
  306. if (mtu > MAX_FRAME_SIZE - 4)
  307. return -EINVAL;
  308. t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
  309. if (adap->params.rev >= T3_REV_B2 &&
  310. (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
  311. t3_mac_disable_exact_filters(mac);
  312. v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
  313. t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
  314. F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
  315. reg = adap->params.rev == T3_REV_B2 ?
  316. A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
  317. /* drain RX FIFO */
  318. if (t3_wait_op_done(adap, reg + mac->offset,
  319. F_RXFIFO_EMPTY, 1, 20, 5)) {
  320. t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
  321. t3_mac_enable_exact_filters(mac);
  322. return -EIO;
  323. }
  324. t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
  325. V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
  326. V_RXMAXPKTSIZE(mtu));
  327. t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
  328. t3_mac_enable_exact_filters(mac);
  329. } else
  330. t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
  331. V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
  332. V_RXMAXPKTSIZE(mtu));
  333. /*
  334. * Adjust the PAUSE frame watermarks. We always set the LWM, and the
  335. * HWM only if flow-control is enabled.
  336. */
  337. hwm = rx_fifo_hwm(mtu);
  338. lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
  339. v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
  340. v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
  341. v |= V_RXFIFOPAUSELWM(lwm / 8);
  342. if (G_RXFIFOPAUSEHWM(v))
  343. v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
  344. V_RXFIFOPAUSEHWM(hwm / 8);
  345. t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
  346. /* Adjust the TX FIFO threshold based on the MTU */
  347. thres = (adap->params.vpd.cclk * 1000) / 15625;
  348. thres = (thres * mtu) / 1000;
  349. if (is_10G(adap))
  350. thres /= 10;
  351. thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
  352. thres = max(thres, 8U); /* need at least 8 */
  353. ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
  354. t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
  355. V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
  356. V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
  357. if (adap->params.rev > 0) {
  358. divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
  359. t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
  360. (hwm - lwm) * 4 / divisor);
  361. }
  362. t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
  363. MAC_RXFIFO_SIZE * 4 * 8 / 512);
  364. return 0;
  365. }
  366. int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
  367. {
  368. u32 val;
  369. struct adapter *adap = mac->adapter;
  370. unsigned int oft = mac->offset;
  371. if (duplex >= 0 && duplex != DUPLEX_FULL)
  372. return -EINVAL;
  373. if (speed >= 0) {
  374. if (speed == SPEED_10)
  375. val = V_PORTSPEED(0);
  376. else if (speed == SPEED_100)
  377. val = V_PORTSPEED(1);
  378. else if (speed == SPEED_1000)
  379. val = V_PORTSPEED(2);
  380. else if (speed == SPEED_10000)
  381. val = V_PORTSPEED(3);
  382. else
  383. return -EINVAL;
  384. t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
  385. V_PORTSPEED(M_PORTSPEED), val);
  386. }
  387. val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
  388. val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
  389. if (fc & PAUSE_TX)
  390. val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(
  391. t3_read_reg(adap,
  392. A_XGM_RX_MAX_PKT_SIZE
  393. + oft)) / 8);
  394. t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
  395. t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
  396. (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
  397. return 0;
  398. }
  399. int t3_mac_enable(struct cmac *mac, int which)
  400. {
  401. int idx = macidx(mac);
  402. struct adapter *adap = mac->adapter;
  403. unsigned int oft = mac->offset;
  404. struct mac_stats *s = &mac->stats;
  405. if (which & MAC_DIRECTION_TX) {
  406. t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
  407. t3_write_reg(adap, A_TP_PIO_DATA,
  408. adap->params.rev == T3_REV_C ?
  409. 0xc4ffff01 : 0xc0ede401);
  410. t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
  411. t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
  412. adap->params.rev == T3_REV_C ? 0 : 1 << idx);
  413. t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
  414. t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
  415. mac->tx_mcnt = s->tx_frames;
  416. mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
  417. A_TP_PIO_DATA)));
  418. mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
  419. A_XGM_TX_SPI4_SOP_EOP_CNT +
  420. oft)));
  421. mac->rx_mcnt = s->rx_frames;
  422. mac->rx_pause = s->rx_pause;
  423. mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
  424. A_XGM_RX_SPI4_SOP_EOP_CNT +
  425. oft)));
  426. mac->rx_ocnt = s->rx_fifo_ovfl;
  427. mac->txen = F_TXEN;
  428. mac->toggle_cnt = 0;
  429. }
  430. if (which & MAC_DIRECTION_RX)
  431. t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
  432. return 0;
  433. }
  434. int t3_mac_disable(struct cmac *mac, int which)
  435. {
  436. struct adapter *adap = mac->adapter;
  437. if (which & MAC_DIRECTION_TX) {
  438. t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
  439. mac->txen = 0;
  440. }
  441. if (which & MAC_DIRECTION_RX) {
  442. int val = F_MAC_RESET_;
  443. t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
  444. F_PCS_RESET_, 0);
  445. msleep(100);
  446. t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
  447. if (is_10G(adap))
  448. val |= F_PCS_RESET_;
  449. else if (uses_xaui(adap))
  450. val |= F_PCS_RESET_ | F_XG2G_RESET_;
  451. else
  452. val |= F_RGMII_RESET_ | F_XG2G_RESET_;
  453. t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
  454. }
  455. return 0;
  456. }
  457. int t3b2_mac_watchdog_task(struct cmac *mac)
  458. {
  459. struct adapter *adap = mac->adapter;
  460. struct mac_stats *s = &mac->stats;
  461. unsigned int tx_tcnt, tx_xcnt;
  462. u64 tx_mcnt = s->tx_frames;
  463. int status;
  464. status = 0;
  465. tx_xcnt = 1; /* By default tx_xcnt is making progress */
  466. tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
  467. if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
  468. tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
  469. A_XGM_TX_SPI4_SOP_EOP_CNT +
  470. mac->offset)));
  471. if (tx_xcnt == 0) {
  472. t3_write_reg(adap, A_TP_PIO_ADDR,
  473. A_TP_TX_DROP_CNT_CH0 + macidx(mac));
  474. tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
  475. A_TP_PIO_DATA)));
  476. } else {
  477. goto out;
  478. }
  479. } else {
  480. mac->toggle_cnt = 0;
  481. goto out;
  482. }
  483. if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
  484. if (mac->toggle_cnt > 4) {
  485. status = 2;
  486. goto out;
  487. } else {
  488. status = 1;
  489. goto out;
  490. }
  491. } else {
  492. mac->toggle_cnt = 0;
  493. goto out;
  494. }
  495. out:
  496. mac->tx_tcnt = tx_tcnt;
  497. mac->tx_xcnt = tx_xcnt;
  498. mac->tx_mcnt = s->tx_frames;
  499. mac->rx_pause = s->rx_pause;
  500. if (status == 1) {
  501. t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
  502. t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
  503. t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
  504. t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
  505. mac->toggle_cnt++;
  506. } else if (status == 2) {
  507. t3b2_mac_reset(mac);
  508. mac->toggle_cnt = 0;
  509. }
  510. return status;
  511. }
  512. /*
  513. * This function is called periodically to accumulate the current values of the
  514. * RMON counters into the port statistics. Since the packet counters are only
  515. * 32 bits they can overflow in ~286 secs at 10G, so the function should be
  516. * called more frequently than that. The byte counters are 45-bit wide, they
  517. * would overflow in ~7.8 hours.
  518. */
  519. const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
  520. {
  521. #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
  522. #define RMON_UPDATE(mac, name, reg) \
  523. (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
  524. #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
  525. (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
  526. ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
  527. u32 v, lo;
  528. RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
  529. RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
  530. RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
  531. RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
  532. RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
  533. RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
  534. RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
  535. RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
  536. RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
  537. RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
  538. v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
  539. if (mac->adapter->params.rev == T3_REV_B2)
  540. v &= 0x7fffffff;
  541. mac->stats.rx_too_long += v;
  542. RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
  543. RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
  544. RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
  545. RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
  546. RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
  547. RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
  548. RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
  549. RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
  550. RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
  551. RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
  552. RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
  553. RMON_UPDATE(mac, tx_pause, TX_PAUSE);
  554. /* This counts error frames in general (bad FCS, underrun, etc). */
  555. RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
  556. RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
  557. RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
  558. RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
  559. RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
  560. RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
  561. RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
  562. RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
  563. /* The next stat isn't clear-on-read. */
  564. t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
  565. v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
  566. lo = (u32) mac->stats.rx_cong_drops;
  567. mac->stats.rx_cong_drops += (u64) (v - lo);
  568. return &mac->stats;
  569. }