hw.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /*
  2. * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
  3. *
  4. * This file is free software: you may copy, redistribute and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation, either version 2 of the License, or (at your
  7. * option) any later version.
  8. *
  9. * This file is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * This file incorporates work covered by the following copyright and
  18. * permission notice:
  19. *
  20. * Copyright (c) 2012 Qualcomm Atheros, Inc.
  21. *
  22. * Permission to use, copy, modify, and/or distribute this software for any
  23. * purpose with or without fee is hereby granted, provided that the above
  24. * copyright notice and this permission notice appear in all copies.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  28. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  29. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  30. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  31. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  32. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  33. */
  34. #include <linux/etherdevice.h>
  35. #include <linux/delay.h>
  36. #include <linux/pci.h>
  37. #include <linux/mdio.h>
  38. #include "reg.h"
  39. #include "hw.h"
  40. static inline bool alx_is_rev_a(u8 rev)
  41. {
  42. return rev == ALX_REV_A0 || rev == ALX_REV_A1;
  43. }
  44. static int alx_wait_mdio_idle(struct alx_hw *hw)
  45. {
  46. u32 val;
  47. int i;
  48. for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
  49. val = alx_read_mem32(hw, ALX_MDIO);
  50. if (!(val & ALX_MDIO_BUSY))
  51. return 0;
  52. udelay(10);
  53. }
  54. return -ETIMEDOUT;
  55. }
  56. static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
  57. u16 reg, u16 *phy_data)
  58. {
  59. u32 val, clk_sel;
  60. int err;
  61. *phy_data = 0;
  62. /* use slow clock when it's in hibernation status */
  63. clk_sel = hw->link_speed != SPEED_UNKNOWN ?
  64. ALX_MDIO_CLK_SEL_25MD4 :
  65. ALX_MDIO_CLK_SEL_25MD128;
  66. if (ext) {
  67. val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
  68. reg << ALX_MDIO_EXTN_REG_SHIFT;
  69. alx_write_mem32(hw, ALX_MDIO_EXTN, val);
  70. val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
  71. ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
  72. clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
  73. } else {
  74. val = ALX_MDIO_SPRES_PRMBL |
  75. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  76. reg << ALX_MDIO_REG_SHIFT |
  77. ALX_MDIO_START | ALX_MDIO_OP_READ;
  78. }
  79. alx_write_mem32(hw, ALX_MDIO, val);
  80. err = alx_wait_mdio_idle(hw);
  81. if (err)
  82. return err;
  83. val = alx_read_mem32(hw, ALX_MDIO);
  84. *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
  85. return 0;
  86. }
  87. static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
  88. u16 reg, u16 phy_data)
  89. {
  90. u32 val, clk_sel;
  91. /* use slow clock when it's in hibernation status */
  92. clk_sel = hw->link_speed != SPEED_UNKNOWN ?
  93. ALX_MDIO_CLK_SEL_25MD4 :
  94. ALX_MDIO_CLK_SEL_25MD128;
  95. if (ext) {
  96. val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
  97. reg << ALX_MDIO_EXTN_REG_SHIFT;
  98. alx_write_mem32(hw, ALX_MDIO_EXTN, val);
  99. val = ALX_MDIO_SPRES_PRMBL |
  100. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  101. phy_data << ALX_MDIO_DATA_SHIFT |
  102. ALX_MDIO_START | ALX_MDIO_MODE_EXT;
  103. } else {
  104. val = ALX_MDIO_SPRES_PRMBL |
  105. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  106. reg << ALX_MDIO_REG_SHIFT |
  107. phy_data << ALX_MDIO_DATA_SHIFT |
  108. ALX_MDIO_START;
  109. }
  110. alx_write_mem32(hw, ALX_MDIO, val);
  111. return alx_wait_mdio_idle(hw);
  112. }
  113. static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
  114. {
  115. return alx_read_phy_core(hw, false, 0, reg, phy_data);
  116. }
  117. static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
  118. {
  119. return alx_write_phy_core(hw, false, 0, reg, phy_data);
  120. }
  121. static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
  122. {
  123. return alx_read_phy_core(hw, true, dev, reg, pdata);
  124. }
  125. static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
  126. {
  127. return alx_write_phy_core(hw, true, dev, reg, data);
  128. }
  129. static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
  130. {
  131. int err;
  132. err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
  133. if (err)
  134. return err;
  135. return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
  136. }
  137. static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
  138. {
  139. int err;
  140. err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
  141. if (err)
  142. return err;
  143. return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
  144. }
  145. int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
  146. {
  147. int err;
  148. spin_lock(&hw->mdio_lock);
  149. err = __alx_read_phy_reg(hw, reg, phy_data);
  150. spin_unlock(&hw->mdio_lock);
  151. return err;
  152. }
  153. int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
  154. {
  155. int err;
  156. spin_lock(&hw->mdio_lock);
  157. err = __alx_write_phy_reg(hw, reg, phy_data);
  158. spin_unlock(&hw->mdio_lock);
  159. return err;
  160. }
  161. int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
  162. {
  163. int err;
  164. spin_lock(&hw->mdio_lock);
  165. err = __alx_read_phy_ext(hw, dev, reg, pdata);
  166. spin_unlock(&hw->mdio_lock);
  167. return err;
  168. }
  169. int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
  170. {
  171. int err;
  172. spin_lock(&hw->mdio_lock);
  173. err = __alx_write_phy_ext(hw, dev, reg, data);
  174. spin_unlock(&hw->mdio_lock);
  175. return err;
  176. }
  177. static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
  178. {
  179. int err;
  180. spin_lock(&hw->mdio_lock);
  181. err = __alx_read_phy_dbg(hw, reg, pdata);
  182. spin_unlock(&hw->mdio_lock);
  183. return err;
  184. }
  185. static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
  186. {
  187. int err;
  188. spin_lock(&hw->mdio_lock);
  189. err = __alx_write_phy_dbg(hw, reg, data);
  190. spin_unlock(&hw->mdio_lock);
  191. return err;
  192. }
  193. static u16 alx_get_phy_config(struct alx_hw *hw)
  194. {
  195. u32 val;
  196. u16 phy_val;
  197. val = alx_read_mem32(hw, ALX_PHY_CTRL);
  198. /* phy in reset */
  199. if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
  200. return ALX_DRV_PHY_UNKNOWN;
  201. val = alx_read_mem32(hw, ALX_DRV);
  202. val = ALX_GET_FIELD(val, ALX_DRV_PHY);
  203. if (ALX_DRV_PHY_UNKNOWN == val)
  204. return ALX_DRV_PHY_UNKNOWN;
  205. alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
  206. if (ALX_PHY_INITED == phy_val)
  207. return val;
  208. return ALX_DRV_PHY_UNKNOWN;
  209. }
  210. static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
  211. {
  212. u32 read;
  213. int i;
  214. for (i = 0; i < ALX_SLD_MAX_TO; i++) {
  215. read = alx_read_mem32(hw, reg);
  216. if ((read & wait) == 0) {
  217. if (val)
  218. *val = read;
  219. return true;
  220. }
  221. mdelay(1);
  222. }
  223. return false;
  224. }
  225. static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
  226. {
  227. u32 mac0, mac1;
  228. mac0 = alx_read_mem32(hw, ALX_STAD0);
  229. mac1 = alx_read_mem32(hw, ALX_STAD1);
  230. /* addr should be big-endian */
  231. put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2));
  232. put_unaligned(cpu_to_be16(mac1), (__be16 *)addr);
  233. return is_valid_ether_addr(addr);
  234. }
  235. int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
  236. {
  237. u32 val;
  238. /* try to get it from register first */
  239. if (alx_read_macaddr(hw, addr))
  240. return 0;
  241. /* try to load from efuse */
  242. if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
  243. return -EIO;
  244. alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
  245. if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
  246. return -EIO;
  247. if (alx_read_macaddr(hw, addr))
  248. return 0;
  249. /* try to load from flash/eeprom (if present) */
  250. val = alx_read_mem32(hw, ALX_EFLD);
  251. if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
  252. if (!alx_wait_reg(hw, ALX_EFLD,
  253. ALX_EFLD_STAT | ALX_EFLD_START, &val))
  254. return -EIO;
  255. alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
  256. if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
  257. return -EIO;
  258. if (alx_read_macaddr(hw, addr))
  259. return 0;
  260. }
  261. return -EIO;
  262. }
  263. void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
  264. {
  265. u32 val;
  266. /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
  267. val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2)));
  268. alx_write_mem32(hw, ALX_STAD0, val);
  269. val = be16_to_cpu(get_unaligned((__be16 *)addr));
  270. alx_write_mem32(hw, ALX_STAD1, val);
  271. }
  272. static void alx_enable_osc(struct alx_hw *hw)
  273. {
  274. u32 val;
  275. /* rising edge */
  276. val = alx_read_mem32(hw, ALX_MISC);
  277. alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
  278. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  279. }
  280. static void alx_reset_osc(struct alx_hw *hw, u8 rev)
  281. {
  282. u32 val, val2;
  283. /* clear Internal OSC settings, switching OSC by hw itself */
  284. val = alx_read_mem32(hw, ALX_MISC3);
  285. alx_write_mem32(hw, ALX_MISC3,
  286. (val & ~ALX_MISC3_25M_BY_SW) |
  287. ALX_MISC3_25M_NOTO_INTNL);
  288. /* 25M clk from chipset may be unstable 1s after de-assert of
  289. * PERST, driver need re-calibrate before enter Sleep for WoL
  290. */
  291. val = alx_read_mem32(hw, ALX_MISC);
  292. if (rev >= ALX_REV_B0) {
  293. /* restore over current protection def-val,
  294. * this val could be reset by MAC-RST
  295. */
  296. ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
  297. /* a 0->1 change will update the internal val of osc */
  298. val &= ~ALX_MISC_INTNLOSC_OPEN;
  299. alx_write_mem32(hw, ALX_MISC, val);
  300. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  301. /* hw will automatically dis OSC after cab. */
  302. val2 = alx_read_mem32(hw, ALX_MSIC2);
  303. val2 &= ~ALX_MSIC2_CALB_START;
  304. alx_write_mem32(hw, ALX_MSIC2, val2);
  305. alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
  306. } else {
  307. val &= ~ALX_MISC_INTNLOSC_OPEN;
  308. /* disable isolate for rev A devices */
  309. if (alx_is_rev_a(rev))
  310. val &= ~ALX_MISC_ISO_EN;
  311. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  312. alx_write_mem32(hw, ALX_MISC, val);
  313. }
  314. udelay(20);
  315. }
  316. static int alx_stop_mac(struct alx_hw *hw)
  317. {
  318. u32 rxq, txq, val;
  319. u16 i;
  320. rxq = alx_read_mem32(hw, ALX_RXQ0);
  321. alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
  322. txq = alx_read_mem32(hw, ALX_TXQ0);
  323. alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
  324. udelay(40);
  325. hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
  326. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  327. for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
  328. val = alx_read_mem32(hw, ALX_MAC_STS);
  329. if (!(val & ALX_MAC_STS_IDLE))
  330. return 0;
  331. udelay(10);
  332. }
  333. return -ETIMEDOUT;
  334. }
  335. int alx_reset_mac(struct alx_hw *hw)
  336. {
  337. u32 val, pmctrl;
  338. int i, ret;
  339. u8 rev;
  340. bool a_cr;
  341. pmctrl = 0;
  342. rev = alx_hw_revision(hw);
  343. a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
  344. /* disable all interrupts, RXQ/TXQ */
  345. alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
  346. alx_write_mem32(hw, ALX_IMR, 0);
  347. alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
  348. ret = alx_stop_mac(hw);
  349. if (ret)
  350. return ret;
  351. /* mac reset workaroud */
  352. alx_write_mem32(hw, ALX_RFD_PIDX, 1);
  353. /* dis l0s/l1 before mac reset */
  354. if (a_cr) {
  355. pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
  356. if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
  357. alx_write_mem32(hw, ALX_PMCTRL,
  358. pmctrl & ~(ALX_PMCTRL_L1_EN |
  359. ALX_PMCTRL_L0S_EN));
  360. }
  361. /* reset whole mac safely */
  362. val = alx_read_mem32(hw, ALX_MASTER);
  363. alx_write_mem32(hw, ALX_MASTER,
  364. val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
  365. /* make sure it's real idle */
  366. udelay(10);
  367. for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
  368. val = alx_read_mem32(hw, ALX_RFD_PIDX);
  369. if (val == 0)
  370. break;
  371. udelay(10);
  372. }
  373. for (; i < ALX_DMA_MAC_RST_TO; i++) {
  374. val = alx_read_mem32(hw, ALX_MASTER);
  375. if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
  376. break;
  377. udelay(10);
  378. }
  379. if (i == ALX_DMA_MAC_RST_TO)
  380. return -EIO;
  381. udelay(10);
  382. if (a_cr) {
  383. alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
  384. /* restore l0s / l1 */
  385. if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
  386. alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
  387. }
  388. alx_reset_osc(hw, rev);
  389. /* clear Internal OSC settings, switching OSC by hw itself,
  390. * disable isolate for rev A devices
  391. */
  392. val = alx_read_mem32(hw, ALX_MISC3);
  393. alx_write_mem32(hw, ALX_MISC3,
  394. (val & ~ALX_MISC3_25M_BY_SW) |
  395. ALX_MISC3_25M_NOTO_INTNL);
  396. val = alx_read_mem32(hw, ALX_MISC);
  397. val &= ~ALX_MISC_INTNLOSC_OPEN;
  398. if (alx_is_rev_a(rev))
  399. val &= ~ALX_MISC_ISO_EN;
  400. alx_write_mem32(hw, ALX_MISC, val);
  401. udelay(20);
  402. /* driver control speed/duplex, hash-alg */
  403. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  404. val = alx_read_mem32(hw, ALX_SERDES);
  405. alx_write_mem32(hw, ALX_SERDES,
  406. val | ALX_SERDES_MACCLK_SLWDWN |
  407. ALX_SERDES_PHYCLK_SLWDWN);
  408. return 0;
  409. }
  410. void alx_reset_phy(struct alx_hw *hw)
  411. {
  412. int i;
  413. u32 val;
  414. u16 phy_val;
  415. /* (DSP)reset PHY core */
  416. val = alx_read_mem32(hw, ALX_PHY_CTRL);
  417. val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
  418. ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
  419. ALX_PHY_CTRL_CLS);
  420. val |= ALX_PHY_CTRL_RST_ANALOG;
  421. val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
  422. alx_write_mem32(hw, ALX_PHY_CTRL, val);
  423. udelay(10);
  424. alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
  425. for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
  426. udelay(10);
  427. /* phy power saving & hib */
  428. alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
  429. alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
  430. ALX_SYSMODCTRL_IECHOADJ_DEF);
  431. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
  432. ALX_VDRVBIAS_DEF);
  433. /* EEE advertisement */
  434. val = alx_read_mem32(hw, ALX_LPI_CTRL);
  435. alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
  436. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
  437. /* phy power saving */
  438. alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
  439. alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
  440. alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
  441. alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
  442. alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
  443. alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
  444. phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
  445. /* rtl8139c, 120m issue */
  446. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
  447. ALX_MIIEXT_NLP78_120M_DEF);
  448. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
  449. ALX_MIIEXT_S3DIG10_DEF);
  450. if (hw->lnk_patch) {
  451. /* Turn off half amplitude */
  452. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
  453. &phy_val);
  454. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
  455. phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
  456. /* Turn off Green feature */
  457. alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
  458. alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
  459. phy_val | ALX_GREENCFG2_BP_GREEN);
  460. /* Turn off half Bias */
  461. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
  462. &phy_val);
  463. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
  464. phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
  465. }
  466. /* set phy interrupt mask */
  467. alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
  468. }
  469. #define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
  470. void alx_reset_pcie(struct alx_hw *hw)
  471. {
  472. u8 rev = alx_hw_revision(hw);
  473. u32 val;
  474. u16 val16;
  475. /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
  476. pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
  477. if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
  478. val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
  479. pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
  480. }
  481. /* clear WoL setting/status */
  482. val = alx_read_mem32(hw, ALX_WOL0);
  483. alx_write_mem32(hw, ALX_WOL0, 0);
  484. val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
  485. alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
  486. /* mask some pcie error bits */
  487. val = alx_read_mem32(hw, ALX_UE_SVRT);
  488. val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
  489. alx_write_mem32(hw, ALX_UE_SVRT, val);
  490. /* wol 25M & pclk */
  491. val = alx_read_mem32(hw, ALX_MASTER);
  492. if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
  493. if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
  494. (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
  495. alx_write_mem32(hw, ALX_MASTER,
  496. val | ALX_MASTER_PCLKSEL_SRDS |
  497. ALX_MASTER_WAKEN_25M);
  498. } else {
  499. if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
  500. (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
  501. alx_write_mem32(hw, ALX_MASTER,
  502. (val & ~ALX_MASTER_PCLKSEL_SRDS) |
  503. ALX_MASTER_WAKEN_25M);
  504. }
  505. /* ASPM setting */
  506. alx_enable_aspm(hw, true, true);
  507. udelay(10);
  508. }
  509. void alx_start_mac(struct alx_hw *hw)
  510. {
  511. u32 mac, txq, rxq;
  512. rxq = alx_read_mem32(hw, ALX_RXQ0);
  513. alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
  514. txq = alx_read_mem32(hw, ALX_TXQ0);
  515. alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
  516. mac = hw->rx_ctrl;
  517. if (hw->duplex == DUPLEX_FULL)
  518. mac |= ALX_MAC_CTRL_FULLD;
  519. else
  520. mac &= ~ALX_MAC_CTRL_FULLD;
  521. ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
  522. hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
  523. ALX_MAC_CTRL_SPEED_10_100);
  524. mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
  525. hw->rx_ctrl = mac;
  526. alx_write_mem32(hw, ALX_MAC_CTRL, mac);
  527. }
  528. void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
  529. {
  530. if (fc & ALX_FC_RX)
  531. hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
  532. else
  533. hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
  534. if (fc & ALX_FC_TX)
  535. hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
  536. else
  537. hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
  538. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  539. }
  540. void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
  541. {
  542. u32 pmctrl;
  543. u8 rev = alx_hw_revision(hw);
  544. pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
  545. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
  546. ALX_PMCTRL_LCKDET_TIMER_DEF);
  547. pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
  548. ALX_PMCTRL_L1_CLKSW_EN |
  549. ALX_PMCTRL_L1_SRDSRX_PWD;
  550. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
  551. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
  552. pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
  553. ALX_PMCTRL_L1_SRDSPLL_EN |
  554. ALX_PMCTRL_L1_BUFSRX_EN |
  555. ALX_PMCTRL_SADLY_EN |
  556. ALX_PMCTRL_HOTRST_WTEN|
  557. ALX_PMCTRL_L0S_EN |
  558. ALX_PMCTRL_L1_EN |
  559. ALX_PMCTRL_ASPM_FCEN |
  560. ALX_PMCTRL_TXL1_AFTER_L0S |
  561. ALX_PMCTRL_RXL1_AFTER_L0S);
  562. if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
  563. pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
  564. if (l0s_en)
  565. pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
  566. if (l1_en)
  567. pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
  568. alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
  569. }
  570. static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
  571. {
  572. u32 cfg = 0;
  573. if (ethadv_cfg & ADVERTISED_Autoneg) {
  574. cfg |= ALX_DRV_PHY_AUTO;
  575. if (ethadv_cfg & ADVERTISED_10baseT_Half)
  576. cfg |= ALX_DRV_PHY_10;
  577. if (ethadv_cfg & ADVERTISED_10baseT_Full)
  578. cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
  579. if (ethadv_cfg & ADVERTISED_100baseT_Half)
  580. cfg |= ALX_DRV_PHY_100;
  581. if (ethadv_cfg & ADVERTISED_100baseT_Full)
  582. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  583. if (ethadv_cfg & ADVERTISED_1000baseT_Half)
  584. cfg |= ALX_DRV_PHY_1000;
  585. if (ethadv_cfg & ADVERTISED_1000baseT_Full)
  586. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  587. if (ethadv_cfg & ADVERTISED_Pause)
  588. cfg |= ADVERTISE_PAUSE_CAP;
  589. if (ethadv_cfg & ADVERTISED_Asym_Pause)
  590. cfg |= ADVERTISE_PAUSE_ASYM;
  591. } else {
  592. switch (ethadv_cfg) {
  593. case ADVERTISED_10baseT_Half:
  594. cfg |= ALX_DRV_PHY_10;
  595. break;
  596. case ADVERTISED_100baseT_Half:
  597. cfg |= ALX_DRV_PHY_100;
  598. break;
  599. case ADVERTISED_10baseT_Full:
  600. cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
  601. break;
  602. case ADVERTISED_100baseT_Full:
  603. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  604. break;
  605. }
  606. }
  607. return cfg;
  608. }
  609. int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
  610. {
  611. u16 adv, giga, cr;
  612. u32 val;
  613. int err = 0;
  614. alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
  615. val = alx_read_mem32(hw, ALX_DRV);
  616. ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
  617. if (ethadv & ADVERTISED_Autoneg) {
  618. adv = ADVERTISE_CSMA;
  619. adv |= ethtool_adv_to_mii_adv_t(ethadv);
  620. if (flowctrl & ALX_FC_ANEG) {
  621. if (flowctrl & ALX_FC_RX) {
  622. adv |= ADVERTISED_Pause;
  623. if (!(flowctrl & ALX_FC_TX))
  624. adv |= ADVERTISED_Asym_Pause;
  625. } else if (flowctrl & ALX_FC_TX) {
  626. adv |= ADVERTISED_Asym_Pause;
  627. }
  628. }
  629. giga = 0;
  630. if (alx_hw_giga(hw))
  631. giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
  632. cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
  633. if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
  634. alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
  635. alx_write_phy_reg(hw, MII_BMCR, cr))
  636. err = -EBUSY;
  637. } else {
  638. cr = BMCR_RESET;
  639. if (ethadv == ADVERTISED_100baseT_Half ||
  640. ethadv == ADVERTISED_100baseT_Full)
  641. cr |= BMCR_SPEED100;
  642. if (ethadv == ADVERTISED_10baseT_Full ||
  643. ethadv == ADVERTISED_100baseT_Full)
  644. cr |= BMCR_FULLDPLX;
  645. err = alx_write_phy_reg(hw, MII_BMCR, cr);
  646. }
  647. if (!err) {
  648. alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
  649. val |= ethadv_to_hw_cfg(hw, ethadv);
  650. }
  651. alx_write_mem32(hw, ALX_DRV, val);
  652. return err;
  653. }
  654. void alx_post_phy_link(struct alx_hw *hw)
  655. {
  656. u16 phy_val, len, agc;
  657. u8 revid = alx_hw_revision(hw);
  658. bool adj_th = revid == ALX_REV_B0;
  659. if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
  660. return;
  661. /* 1000BT/AZ, wrong cable length */
  662. if (hw->link_speed != SPEED_UNKNOWN) {
  663. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
  664. &phy_val);
  665. len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
  666. alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
  667. agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
  668. if ((hw->link_speed == SPEED_1000 &&
  669. (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
  670. (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
  671. (hw->link_speed == SPEED_100 &&
  672. (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
  673. (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
  674. alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
  675. ALX_AZ_ANADECT_LONG);
  676. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  677. &phy_val);
  678. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  679. phy_val | ALX_AFE_10BT_100M_TH);
  680. } else {
  681. alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
  682. ALX_AZ_ANADECT_DEF);
  683. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
  684. ALX_MIIEXT_AFE, &phy_val);
  685. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  686. phy_val & ~ALX_AFE_10BT_100M_TH);
  687. }
  688. /* threshold adjust */
  689. if (adj_th && hw->lnk_patch) {
  690. if (hw->link_speed == SPEED_100) {
  691. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
  692. ALX_MSE16DB_UP);
  693. } else if (hw->link_speed == SPEED_1000) {
  694. /*
  695. * Giga link threshold, raise the tolerance of
  696. * noise 50%
  697. */
  698. alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
  699. &phy_val);
  700. ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
  701. ALX_MSE20DB_TH_HI);
  702. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
  703. phy_val);
  704. }
  705. }
  706. } else {
  707. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  708. &phy_val);
  709. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  710. phy_val & ~ALX_AFE_10BT_100M_TH);
  711. if (adj_th && hw->lnk_patch) {
  712. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
  713. ALX_MSE16DB_DOWN);
  714. alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
  715. ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
  716. ALX_MSE20DB_TH_DEF);
  717. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
  718. }
  719. }
  720. }
  721. /* NOTE:
  722. * 1. phy link must be established before calling this function
  723. * 2. wol option (pattern,magic,link,etc.) is configed before call it.
  724. */
  725. int alx_pre_suspend(struct alx_hw *hw, int speed, u8 duplex)
  726. {
  727. u32 master, mac, phy, val;
  728. int err = 0;
  729. master = alx_read_mem32(hw, ALX_MASTER);
  730. master &= ~ALX_MASTER_PCLKSEL_SRDS;
  731. mac = hw->rx_ctrl;
  732. /* 10/100 half */
  733. ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
  734. mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
  735. phy = alx_read_mem32(hw, ALX_PHY_CTRL);
  736. phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
  737. phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
  738. ALX_PHY_CTRL_HIB_EN;
  739. /* without any activity */
  740. if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
  741. err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
  742. if (err)
  743. return err;
  744. phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
  745. } else {
  746. if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
  747. mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
  748. if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
  749. mac |= ALX_MAC_CTRL_TX_EN;
  750. if (duplex == DUPLEX_FULL)
  751. mac |= ALX_MAC_CTRL_FULLD;
  752. if (speed == SPEED_1000)
  753. ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
  754. ALX_MAC_CTRL_SPEED_1000);
  755. phy |= ALX_PHY_CTRL_DSPRST_OUT;
  756. err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
  757. ALX_MIIEXT_S3DIG10,
  758. ALX_MIIEXT_S3DIG10_SL);
  759. if (err)
  760. return err;
  761. }
  762. alx_enable_osc(hw);
  763. hw->rx_ctrl = mac;
  764. alx_write_mem32(hw, ALX_MASTER, master);
  765. alx_write_mem32(hw, ALX_MAC_CTRL, mac);
  766. alx_write_mem32(hw, ALX_PHY_CTRL, phy);
  767. /* set val of PDLL D3PLLOFF */
  768. val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
  769. val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
  770. alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
  771. return 0;
  772. }
  773. bool alx_phy_configured(struct alx_hw *hw)
  774. {
  775. u32 cfg, hw_cfg;
  776. cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
  777. cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
  778. hw_cfg = alx_get_phy_config(hw);
  779. if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
  780. return false;
  781. return cfg == hw_cfg;
  782. }
  783. int alx_read_phy_link(struct alx_hw *hw)
  784. {
  785. struct pci_dev *pdev = hw->pdev;
  786. u16 bmsr, giga;
  787. int err;
  788. err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
  789. if (err)
  790. return err;
  791. err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
  792. if (err)
  793. return err;
  794. if (!(bmsr & BMSR_LSTATUS)) {
  795. hw->link_speed = SPEED_UNKNOWN;
  796. hw->duplex = DUPLEX_UNKNOWN;
  797. return 0;
  798. }
  799. /* speed/duplex result is saved in PHY Specific Status Register */
  800. err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
  801. if (err)
  802. return err;
  803. if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
  804. goto wrong_speed;
  805. switch (giga & ALX_GIGA_PSSR_SPEED) {
  806. case ALX_GIGA_PSSR_1000MBS:
  807. hw->link_speed = SPEED_1000;
  808. break;
  809. case ALX_GIGA_PSSR_100MBS:
  810. hw->link_speed = SPEED_100;
  811. break;
  812. case ALX_GIGA_PSSR_10MBS:
  813. hw->link_speed = SPEED_10;
  814. break;
  815. default:
  816. goto wrong_speed;
  817. }
  818. hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
  819. return 0;
  820. wrong_speed:
  821. dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
  822. return -EINVAL;
  823. }
  824. int alx_clear_phy_intr(struct alx_hw *hw)
  825. {
  826. u16 isr;
  827. /* clear interrupt status by reading it */
  828. return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
  829. }
  830. int alx_config_wol(struct alx_hw *hw)
  831. {
  832. u32 wol = 0;
  833. int err = 0;
  834. /* turn on magic packet event */
  835. if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
  836. wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
  837. /* turn on link up event */
  838. if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
  839. wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
  840. /* only link up can wake up */
  841. err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
  842. }
  843. alx_write_mem32(hw, ALX_WOL0, wol);
  844. return err;
  845. }
  846. void alx_disable_rss(struct alx_hw *hw)
  847. {
  848. u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
  849. ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
  850. alx_write_mem32(hw, ALX_RXQ0, ctrl);
  851. }
  852. void alx_configure_basic(struct alx_hw *hw)
  853. {
  854. u32 val, raw_mtu, max_payload;
  855. u16 val16;
  856. u8 chip_rev = alx_hw_revision(hw);
  857. alx_set_macaddr(hw, hw->mac_addr);
  858. alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
  859. /* idle timeout to switch clk_125M */
  860. if (chip_rev >= ALX_REV_B0)
  861. alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
  862. ALX_IDLE_DECISN_TIMER_DEF);
  863. alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
  864. val = alx_read_mem32(hw, ALX_MASTER);
  865. val |= ALX_MASTER_IRQMOD2_EN |
  866. ALX_MASTER_IRQMOD1_EN |
  867. ALX_MASTER_SYSALVTIMER_EN;
  868. alx_write_mem32(hw, ALX_MASTER, val);
  869. alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
  870. (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
  871. /* intr re-trig timeout */
  872. alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
  873. /* tpd threshold to trig int */
  874. alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
  875. alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
  876. raw_mtu = hw->mtu + ETH_HLEN;
  877. alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
  878. if (raw_mtu > ALX_MTU_JUMBO_TH)
  879. hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
  880. if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
  881. val = (raw_mtu + 8 + 7) >> 3;
  882. else
  883. val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
  884. alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
  885. max_payload = pcie_get_readrq(hw->pdev) >> 8;
  886. /*
  887. * if BIOS had changed the default dma read max length,
  888. * restore it to default value
  889. */
  890. if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
  891. pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
  892. val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
  893. ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
  894. ALX_TXQ0_SUPT_IPOPT |
  895. ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
  896. alx_write_mem32(hw, ALX_TXQ0, val);
  897. val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
  898. ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
  899. ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
  900. ALX_HQTPD_BURST_EN;
  901. alx_write_mem32(hw, ALX_HQTPD, val);
  902. /* rxq, flow control */
  903. val = alx_read_mem32(hw, ALX_SRAM5);
  904. val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
  905. if (val > ALX_SRAM_RXF_LEN_8K) {
  906. val16 = ALX_MTU_STD_ALGN >> 3;
  907. val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
  908. } else {
  909. val16 = ALX_MTU_STD_ALGN >> 3;
  910. val = (val - ALX_MTU_STD_ALGN) >> 3;
  911. }
  912. alx_write_mem32(hw, ALX_RXQ2,
  913. val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
  914. val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
  915. val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
  916. ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
  917. ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
  918. ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
  919. ALX_RXQ0_IPV6_PARSE_EN;
  920. if (alx_hw_giga(hw))
  921. ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
  922. ALX_RXQ0_ASPM_THRESH_100M);
  923. alx_write_mem32(hw, ALX_RXQ0, val);
  924. val = alx_read_mem32(hw, ALX_DMA);
  925. val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
  926. ALX_DMA_RREQ_PRI_DATA |
  927. max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
  928. ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
  929. ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
  930. (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
  931. alx_write_mem32(hw, ALX_DMA, val);
  932. /* default multi-tx-q weights */
  933. val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
  934. 4 << ALX_WRR_PRI0_SHIFT |
  935. 4 << ALX_WRR_PRI1_SHIFT |
  936. 4 << ALX_WRR_PRI2_SHIFT |
  937. 4 << ALX_WRR_PRI3_SHIFT;
  938. alx_write_mem32(hw, ALX_WRR, val);
  939. }
  940. int alx_select_powersaving_speed(struct alx_hw *hw, int *speed, u8 *duplex)
  941. {
  942. int i, err;
  943. u16 lpa;
  944. err = alx_read_phy_link(hw);
  945. if (err)
  946. return err;
  947. if (hw->link_speed == SPEED_UNKNOWN) {
  948. *speed = SPEED_UNKNOWN;
  949. *duplex = DUPLEX_UNKNOWN;
  950. return 0;
  951. }
  952. err = alx_read_phy_reg(hw, MII_LPA, &lpa);
  953. if (err)
  954. return err;
  955. if (!(lpa & LPA_LPACK)) {
  956. *speed = hw->link_speed;
  957. return 0;
  958. }
  959. if (lpa & LPA_10FULL) {
  960. *speed = SPEED_10;
  961. *duplex = DUPLEX_FULL;
  962. } else if (lpa & LPA_10HALF) {
  963. *speed = SPEED_10;
  964. *duplex = DUPLEX_HALF;
  965. } else if (lpa & LPA_100FULL) {
  966. *speed = SPEED_100;
  967. *duplex = DUPLEX_FULL;
  968. } else {
  969. *speed = SPEED_100;
  970. *duplex = DUPLEX_HALF;
  971. }
  972. if (*speed == hw->link_speed && *duplex == hw->duplex)
  973. return 0;
  974. err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
  975. if (err)
  976. return err;
  977. err = alx_setup_speed_duplex(hw, alx_speed_to_ethadv(*speed, *duplex) |
  978. ADVERTISED_Autoneg, ALX_FC_ANEG |
  979. ALX_FC_RX | ALX_FC_TX);
  980. if (err)
  981. return err;
  982. /* wait for linkup */
  983. for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
  984. msleep(100);
  985. err = alx_read_phy_link(hw);
  986. if (err < 0)
  987. return err;
  988. if (hw->link_speed != SPEED_UNKNOWN)
  989. break;
  990. }
  991. if (i == ALX_MAX_SETUP_LNK_CYCLE)
  992. return -ETIMEDOUT;
  993. return 0;
  994. }
  995. bool alx_get_phy_info(struct alx_hw *hw)
  996. {
  997. u16 devs1, devs2;
  998. if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
  999. alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
  1000. return false;
  1001. /* since we haven't PMA/PMD status2 register, we can't
  1002. * use mdio45_probe function for prtad and mmds.
  1003. * use fixed MMD3 to get mmds.
  1004. */
  1005. if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
  1006. alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
  1007. return false;
  1008. hw->mdio.mmds = devs1 | devs2 << 16;
  1009. return true;
  1010. }