hw.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*
  2. * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
  3. *
  4. * This file is free software: you may copy, redistribute and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation, either version 2 of the License, or (at your
  7. * option) any later version.
  8. *
  9. * This file is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * This file incorporates work covered by the following copyright and
  18. * permission notice:
  19. *
  20. * Copyright (c) 2012 Qualcomm Atheros, Inc.
  21. *
  22. * Permission to use, copy, modify, and/or distribute this software for any
  23. * purpose with or without fee is hereby granted, provided that the above
  24. * copyright notice and this permission notice appear in all copies.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  28. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  29. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  30. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  31. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  32. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  33. */
  34. #include <linux/etherdevice.h>
  35. #include <linux/delay.h>
  36. #include <linux/pci.h>
  37. #include <linux/mdio.h>
  38. #include "reg.h"
  39. #include "hw.h"
  40. static inline bool alx_is_rev_a(u8 rev)
  41. {
  42. return rev == ALX_REV_A0 || rev == ALX_REV_A1;
  43. }
  44. static int alx_wait_mdio_idle(struct alx_hw *hw)
  45. {
  46. u32 val;
  47. int i;
  48. for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
  49. val = alx_read_mem32(hw, ALX_MDIO);
  50. if (!(val & ALX_MDIO_BUSY))
  51. return 0;
  52. udelay(10);
  53. }
  54. return -ETIMEDOUT;
  55. }
  56. static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
  57. u16 reg, u16 *phy_data)
  58. {
  59. u32 val, clk_sel;
  60. int err;
  61. *phy_data = 0;
  62. /* use slow clock when it's in hibernation status */
  63. clk_sel = hw->link_speed != SPEED_UNKNOWN ?
  64. ALX_MDIO_CLK_SEL_25MD4 :
  65. ALX_MDIO_CLK_SEL_25MD128;
  66. if (ext) {
  67. val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
  68. reg << ALX_MDIO_EXTN_REG_SHIFT;
  69. alx_write_mem32(hw, ALX_MDIO_EXTN, val);
  70. val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
  71. ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
  72. clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
  73. } else {
  74. val = ALX_MDIO_SPRES_PRMBL |
  75. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  76. reg << ALX_MDIO_REG_SHIFT |
  77. ALX_MDIO_START | ALX_MDIO_OP_READ;
  78. }
  79. alx_write_mem32(hw, ALX_MDIO, val);
  80. err = alx_wait_mdio_idle(hw);
  81. if (err)
  82. return err;
  83. val = alx_read_mem32(hw, ALX_MDIO);
  84. *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
  85. return 0;
  86. }
  87. static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
  88. u16 reg, u16 phy_data)
  89. {
  90. u32 val, clk_sel;
  91. /* use slow clock when it's in hibernation status */
  92. clk_sel = hw->link_speed != SPEED_UNKNOWN ?
  93. ALX_MDIO_CLK_SEL_25MD4 :
  94. ALX_MDIO_CLK_SEL_25MD128;
  95. if (ext) {
  96. val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
  97. reg << ALX_MDIO_EXTN_REG_SHIFT;
  98. alx_write_mem32(hw, ALX_MDIO_EXTN, val);
  99. val = ALX_MDIO_SPRES_PRMBL |
  100. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  101. phy_data << ALX_MDIO_DATA_SHIFT |
  102. ALX_MDIO_START | ALX_MDIO_MODE_EXT;
  103. } else {
  104. val = ALX_MDIO_SPRES_PRMBL |
  105. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  106. reg << ALX_MDIO_REG_SHIFT |
  107. phy_data << ALX_MDIO_DATA_SHIFT |
  108. ALX_MDIO_START;
  109. }
  110. alx_write_mem32(hw, ALX_MDIO, val);
  111. return alx_wait_mdio_idle(hw);
  112. }
  113. static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
  114. {
  115. return alx_read_phy_core(hw, false, 0, reg, phy_data);
  116. }
  117. static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
  118. {
  119. return alx_write_phy_core(hw, false, 0, reg, phy_data);
  120. }
  121. static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
  122. {
  123. return alx_read_phy_core(hw, true, dev, reg, pdata);
  124. }
  125. static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
  126. {
  127. return alx_write_phy_core(hw, true, dev, reg, data);
  128. }
  129. static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
  130. {
  131. int err;
  132. err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
  133. if (err)
  134. return err;
  135. return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
  136. }
  137. static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
  138. {
  139. int err;
  140. err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
  141. if (err)
  142. return err;
  143. return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
  144. }
  145. int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
  146. {
  147. int err;
  148. spin_lock(&hw->mdio_lock);
  149. err = __alx_read_phy_reg(hw, reg, phy_data);
  150. spin_unlock(&hw->mdio_lock);
  151. return err;
  152. }
  153. int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
  154. {
  155. int err;
  156. spin_lock(&hw->mdio_lock);
  157. err = __alx_write_phy_reg(hw, reg, phy_data);
  158. spin_unlock(&hw->mdio_lock);
  159. return err;
  160. }
  161. int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
  162. {
  163. int err;
  164. spin_lock(&hw->mdio_lock);
  165. err = __alx_read_phy_ext(hw, dev, reg, pdata);
  166. spin_unlock(&hw->mdio_lock);
  167. return err;
  168. }
  169. int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
  170. {
  171. int err;
  172. spin_lock(&hw->mdio_lock);
  173. err = __alx_write_phy_ext(hw, dev, reg, data);
  174. spin_unlock(&hw->mdio_lock);
  175. return err;
  176. }
  177. static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
  178. {
  179. int err;
  180. spin_lock(&hw->mdio_lock);
  181. err = __alx_read_phy_dbg(hw, reg, pdata);
  182. spin_unlock(&hw->mdio_lock);
  183. return err;
  184. }
  185. static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
  186. {
  187. int err;
  188. spin_lock(&hw->mdio_lock);
  189. err = __alx_write_phy_dbg(hw, reg, data);
  190. spin_unlock(&hw->mdio_lock);
  191. return err;
  192. }
  193. static u16 alx_get_phy_config(struct alx_hw *hw)
  194. {
  195. u32 val;
  196. u16 phy_val;
  197. val = alx_read_mem32(hw, ALX_PHY_CTRL);
  198. /* phy in reset */
  199. if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
  200. return ALX_DRV_PHY_UNKNOWN;
  201. val = alx_read_mem32(hw, ALX_DRV);
  202. val = ALX_GET_FIELD(val, ALX_DRV_PHY);
  203. if (ALX_DRV_PHY_UNKNOWN == val)
  204. return ALX_DRV_PHY_UNKNOWN;
  205. alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
  206. if (ALX_PHY_INITED == phy_val)
  207. return val;
  208. return ALX_DRV_PHY_UNKNOWN;
  209. }
  210. static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
  211. {
  212. u32 read;
  213. int i;
  214. for (i = 0; i < ALX_SLD_MAX_TO; i++) {
  215. read = alx_read_mem32(hw, reg);
  216. if ((read & wait) == 0) {
  217. if (val)
  218. *val = read;
  219. return true;
  220. }
  221. mdelay(1);
  222. }
  223. return false;
  224. }
  225. static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
  226. {
  227. u32 mac0, mac1;
  228. mac0 = alx_read_mem32(hw, ALX_STAD0);
  229. mac1 = alx_read_mem32(hw, ALX_STAD1);
  230. /* addr should be big-endian */
  231. put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2));
  232. put_unaligned(cpu_to_be16(mac1), (__be16 *)addr);
  233. return is_valid_ether_addr(addr);
  234. }
  235. int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
  236. {
  237. u32 val;
  238. /* try to get it from register first */
  239. if (alx_read_macaddr(hw, addr))
  240. return 0;
  241. /* try to load from efuse */
  242. if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
  243. return -EIO;
  244. alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
  245. if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
  246. return -EIO;
  247. if (alx_read_macaddr(hw, addr))
  248. return 0;
  249. /* try to load from flash/eeprom (if present) */
  250. val = alx_read_mem32(hw, ALX_EFLD);
  251. if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
  252. if (!alx_wait_reg(hw, ALX_EFLD,
  253. ALX_EFLD_STAT | ALX_EFLD_START, &val))
  254. return -EIO;
  255. alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
  256. if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
  257. return -EIO;
  258. if (alx_read_macaddr(hw, addr))
  259. return 0;
  260. }
  261. return -EIO;
  262. }
  263. void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
  264. {
  265. u32 val;
  266. /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
  267. val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2)));
  268. alx_write_mem32(hw, ALX_STAD0, val);
  269. val = be16_to_cpu(get_unaligned((__be16 *)addr));
  270. alx_write_mem32(hw, ALX_STAD1, val);
  271. }
  272. static void alx_reset_osc(struct alx_hw *hw, u8 rev)
  273. {
  274. u32 val, val2;
  275. /* clear Internal OSC settings, switching OSC by hw itself */
  276. val = alx_read_mem32(hw, ALX_MISC3);
  277. alx_write_mem32(hw, ALX_MISC3,
  278. (val & ~ALX_MISC3_25M_BY_SW) |
  279. ALX_MISC3_25M_NOTO_INTNL);
  280. /* 25M clk from chipset may be unstable 1s after de-assert of
  281. * PERST, driver need re-calibrate before enter Sleep for WoL
  282. */
  283. val = alx_read_mem32(hw, ALX_MISC);
  284. if (rev >= ALX_REV_B0) {
  285. /* restore over current protection def-val,
  286. * this val could be reset by MAC-RST
  287. */
  288. ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
  289. /* a 0->1 change will update the internal val of osc */
  290. val &= ~ALX_MISC_INTNLOSC_OPEN;
  291. alx_write_mem32(hw, ALX_MISC, val);
  292. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  293. /* hw will automatically dis OSC after cab. */
  294. val2 = alx_read_mem32(hw, ALX_MSIC2);
  295. val2 &= ~ALX_MSIC2_CALB_START;
  296. alx_write_mem32(hw, ALX_MSIC2, val2);
  297. alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
  298. } else {
  299. val &= ~ALX_MISC_INTNLOSC_OPEN;
  300. /* disable isolate for rev A devices */
  301. if (alx_is_rev_a(rev))
  302. val &= ~ALX_MISC_ISO_EN;
  303. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  304. alx_write_mem32(hw, ALX_MISC, val);
  305. }
  306. udelay(20);
  307. }
  308. static int alx_stop_mac(struct alx_hw *hw)
  309. {
  310. u32 rxq, txq, val;
  311. u16 i;
  312. rxq = alx_read_mem32(hw, ALX_RXQ0);
  313. alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
  314. txq = alx_read_mem32(hw, ALX_TXQ0);
  315. alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
  316. udelay(40);
  317. hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
  318. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  319. for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
  320. val = alx_read_mem32(hw, ALX_MAC_STS);
  321. if (!(val & ALX_MAC_STS_IDLE))
  322. return 0;
  323. udelay(10);
  324. }
  325. return -ETIMEDOUT;
  326. }
  327. int alx_reset_mac(struct alx_hw *hw)
  328. {
  329. u32 val, pmctrl;
  330. int i, ret;
  331. u8 rev;
  332. bool a_cr;
  333. pmctrl = 0;
  334. rev = alx_hw_revision(hw);
  335. a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
  336. /* disable all interrupts, RXQ/TXQ */
  337. alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
  338. alx_write_mem32(hw, ALX_IMR, 0);
  339. alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
  340. ret = alx_stop_mac(hw);
  341. if (ret)
  342. return ret;
  343. /* mac reset workaroud */
  344. alx_write_mem32(hw, ALX_RFD_PIDX, 1);
  345. /* dis l0s/l1 before mac reset */
  346. if (a_cr) {
  347. pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
  348. if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
  349. alx_write_mem32(hw, ALX_PMCTRL,
  350. pmctrl & ~(ALX_PMCTRL_L1_EN |
  351. ALX_PMCTRL_L0S_EN));
  352. }
  353. /* reset whole mac safely */
  354. val = alx_read_mem32(hw, ALX_MASTER);
  355. alx_write_mem32(hw, ALX_MASTER,
  356. val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
  357. /* make sure it's real idle */
  358. udelay(10);
  359. for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
  360. val = alx_read_mem32(hw, ALX_RFD_PIDX);
  361. if (val == 0)
  362. break;
  363. udelay(10);
  364. }
  365. for (; i < ALX_DMA_MAC_RST_TO; i++) {
  366. val = alx_read_mem32(hw, ALX_MASTER);
  367. if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
  368. break;
  369. udelay(10);
  370. }
  371. if (i == ALX_DMA_MAC_RST_TO)
  372. return -EIO;
  373. udelay(10);
  374. if (a_cr) {
  375. alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
  376. /* restore l0s / l1 */
  377. if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
  378. alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
  379. }
  380. alx_reset_osc(hw, rev);
  381. /* clear Internal OSC settings, switching OSC by hw itself,
  382. * disable isolate for rev A devices
  383. */
  384. val = alx_read_mem32(hw, ALX_MISC3);
  385. alx_write_mem32(hw, ALX_MISC3,
  386. (val & ~ALX_MISC3_25M_BY_SW) |
  387. ALX_MISC3_25M_NOTO_INTNL);
  388. val = alx_read_mem32(hw, ALX_MISC);
  389. val &= ~ALX_MISC_INTNLOSC_OPEN;
  390. if (alx_is_rev_a(rev))
  391. val &= ~ALX_MISC_ISO_EN;
  392. alx_write_mem32(hw, ALX_MISC, val);
  393. udelay(20);
  394. /* driver control speed/duplex, hash-alg */
  395. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  396. val = alx_read_mem32(hw, ALX_SERDES);
  397. alx_write_mem32(hw, ALX_SERDES,
  398. val | ALX_SERDES_MACCLK_SLWDWN |
  399. ALX_SERDES_PHYCLK_SLWDWN);
  400. return 0;
  401. }
  402. void alx_reset_phy(struct alx_hw *hw)
  403. {
  404. int i;
  405. u32 val;
  406. u16 phy_val;
  407. /* (DSP)reset PHY core */
  408. val = alx_read_mem32(hw, ALX_PHY_CTRL);
  409. val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
  410. ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
  411. ALX_PHY_CTRL_CLS);
  412. val |= ALX_PHY_CTRL_RST_ANALOG;
  413. val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
  414. alx_write_mem32(hw, ALX_PHY_CTRL, val);
  415. udelay(10);
  416. alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
  417. for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
  418. udelay(10);
  419. /* phy power saving & hib */
  420. alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
  421. alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
  422. ALX_SYSMODCTRL_IECHOADJ_DEF);
  423. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
  424. ALX_VDRVBIAS_DEF);
  425. /* EEE advertisement */
  426. val = alx_read_mem32(hw, ALX_LPI_CTRL);
  427. alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
  428. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
  429. /* phy power saving */
  430. alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
  431. alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
  432. alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
  433. alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
  434. alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
  435. alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
  436. phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
  437. /* rtl8139c, 120m issue */
  438. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
  439. ALX_MIIEXT_NLP78_120M_DEF);
  440. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
  441. ALX_MIIEXT_S3DIG10_DEF);
  442. if (hw->lnk_patch) {
  443. /* Turn off half amplitude */
  444. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
  445. &phy_val);
  446. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
  447. phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
  448. /* Turn off Green feature */
  449. alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
  450. alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
  451. phy_val | ALX_GREENCFG2_BP_GREEN);
  452. /* Turn off half Bias */
  453. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
  454. &phy_val);
  455. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
  456. phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
  457. }
  458. /* set phy interrupt mask */
  459. alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
  460. }
  461. #define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
  462. void alx_reset_pcie(struct alx_hw *hw)
  463. {
  464. u8 rev = alx_hw_revision(hw);
  465. u32 val;
  466. u16 val16;
  467. /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
  468. pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
  469. if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
  470. val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
  471. pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
  472. }
  473. /* clear WoL setting/status */
  474. val = alx_read_mem32(hw, ALX_WOL0);
  475. alx_write_mem32(hw, ALX_WOL0, 0);
  476. val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
  477. alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
  478. /* mask some pcie error bits */
  479. val = alx_read_mem32(hw, ALX_UE_SVRT);
  480. val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
  481. alx_write_mem32(hw, ALX_UE_SVRT, val);
  482. /* wol 25M & pclk */
  483. val = alx_read_mem32(hw, ALX_MASTER);
  484. if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
  485. if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
  486. (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
  487. alx_write_mem32(hw, ALX_MASTER,
  488. val | ALX_MASTER_PCLKSEL_SRDS |
  489. ALX_MASTER_WAKEN_25M);
  490. } else {
  491. if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
  492. (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
  493. alx_write_mem32(hw, ALX_MASTER,
  494. (val & ~ALX_MASTER_PCLKSEL_SRDS) |
  495. ALX_MASTER_WAKEN_25M);
  496. }
  497. /* ASPM setting */
  498. alx_enable_aspm(hw, true, true);
  499. udelay(10);
  500. }
  501. void alx_start_mac(struct alx_hw *hw)
  502. {
  503. u32 mac, txq, rxq;
  504. rxq = alx_read_mem32(hw, ALX_RXQ0);
  505. alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
  506. txq = alx_read_mem32(hw, ALX_TXQ0);
  507. alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
  508. mac = hw->rx_ctrl;
  509. if (hw->duplex == DUPLEX_FULL)
  510. mac |= ALX_MAC_CTRL_FULLD;
  511. else
  512. mac &= ~ALX_MAC_CTRL_FULLD;
  513. ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
  514. hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
  515. ALX_MAC_CTRL_SPEED_10_100);
  516. mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
  517. hw->rx_ctrl = mac;
  518. alx_write_mem32(hw, ALX_MAC_CTRL, mac);
  519. }
  520. void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
  521. {
  522. if (fc & ALX_FC_RX)
  523. hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
  524. else
  525. hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
  526. if (fc & ALX_FC_TX)
  527. hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
  528. else
  529. hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
  530. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  531. }
  532. void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
  533. {
  534. u32 pmctrl;
  535. u8 rev = alx_hw_revision(hw);
  536. pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
  537. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
  538. ALX_PMCTRL_LCKDET_TIMER_DEF);
  539. pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
  540. ALX_PMCTRL_L1_CLKSW_EN |
  541. ALX_PMCTRL_L1_SRDSRX_PWD;
  542. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
  543. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
  544. pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
  545. ALX_PMCTRL_L1_SRDSPLL_EN |
  546. ALX_PMCTRL_L1_BUFSRX_EN |
  547. ALX_PMCTRL_SADLY_EN |
  548. ALX_PMCTRL_HOTRST_WTEN|
  549. ALX_PMCTRL_L0S_EN |
  550. ALX_PMCTRL_L1_EN |
  551. ALX_PMCTRL_ASPM_FCEN |
  552. ALX_PMCTRL_TXL1_AFTER_L0S |
  553. ALX_PMCTRL_RXL1_AFTER_L0S);
  554. if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
  555. pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
  556. if (l0s_en)
  557. pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
  558. if (l1_en)
  559. pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
  560. alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
  561. }
  562. static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
  563. {
  564. u32 cfg = 0;
  565. if (ethadv_cfg & ADVERTISED_Autoneg) {
  566. cfg |= ALX_DRV_PHY_AUTO;
  567. if (ethadv_cfg & ADVERTISED_10baseT_Half)
  568. cfg |= ALX_DRV_PHY_10;
  569. if (ethadv_cfg & ADVERTISED_10baseT_Full)
  570. cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
  571. if (ethadv_cfg & ADVERTISED_100baseT_Half)
  572. cfg |= ALX_DRV_PHY_100;
  573. if (ethadv_cfg & ADVERTISED_100baseT_Full)
  574. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  575. if (ethadv_cfg & ADVERTISED_1000baseT_Half)
  576. cfg |= ALX_DRV_PHY_1000;
  577. if (ethadv_cfg & ADVERTISED_1000baseT_Full)
  578. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  579. if (ethadv_cfg & ADVERTISED_Pause)
  580. cfg |= ADVERTISE_PAUSE_CAP;
  581. if (ethadv_cfg & ADVERTISED_Asym_Pause)
  582. cfg |= ADVERTISE_PAUSE_ASYM;
  583. } else {
  584. switch (ethadv_cfg) {
  585. case ADVERTISED_10baseT_Half:
  586. cfg |= ALX_DRV_PHY_10;
  587. break;
  588. case ADVERTISED_100baseT_Half:
  589. cfg |= ALX_DRV_PHY_100;
  590. break;
  591. case ADVERTISED_10baseT_Full:
  592. cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
  593. break;
  594. case ADVERTISED_100baseT_Full:
  595. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  596. break;
  597. }
  598. }
  599. return cfg;
  600. }
  601. int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
  602. {
  603. u16 adv, giga, cr;
  604. u32 val;
  605. int err = 0;
  606. alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
  607. val = alx_read_mem32(hw, ALX_DRV);
  608. ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
  609. if (ethadv & ADVERTISED_Autoneg) {
  610. adv = ADVERTISE_CSMA;
  611. adv |= ethtool_adv_to_mii_adv_t(ethadv);
  612. if (flowctrl & ALX_FC_ANEG) {
  613. if (flowctrl & ALX_FC_RX) {
  614. adv |= ADVERTISED_Pause;
  615. if (!(flowctrl & ALX_FC_TX))
  616. adv |= ADVERTISED_Asym_Pause;
  617. } else if (flowctrl & ALX_FC_TX) {
  618. adv |= ADVERTISED_Asym_Pause;
  619. }
  620. }
  621. giga = 0;
  622. if (alx_hw_giga(hw))
  623. giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
  624. cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
  625. if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
  626. alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
  627. alx_write_phy_reg(hw, MII_BMCR, cr))
  628. err = -EBUSY;
  629. } else {
  630. cr = BMCR_RESET;
  631. if (ethadv == ADVERTISED_100baseT_Half ||
  632. ethadv == ADVERTISED_100baseT_Full)
  633. cr |= BMCR_SPEED100;
  634. if (ethadv == ADVERTISED_10baseT_Full ||
  635. ethadv == ADVERTISED_100baseT_Full)
  636. cr |= BMCR_FULLDPLX;
  637. err = alx_write_phy_reg(hw, MII_BMCR, cr);
  638. }
  639. if (!err) {
  640. alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
  641. val |= ethadv_to_hw_cfg(hw, ethadv);
  642. }
  643. alx_write_mem32(hw, ALX_DRV, val);
  644. return err;
  645. }
  646. void alx_post_phy_link(struct alx_hw *hw)
  647. {
  648. u16 phy_val, len, agc;
  649. u8 revid = alx_hw_revision(hw);
  650. bool adj_th = revid == ALX_REV_B0;
  651. if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
  652. return;
  653. /* 1000BT/AZ, wrong cable length */
  654. if (hw->link_speed != SPEED_UNKNOWN) {
  655. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
  656. &phy_val);
  657. len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
  658. alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
  659. agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
  660. if ((hw->link_speed == SPEED_1000 &&
  661. (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
  662. (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
  663. (hw->link_speed == SPEED_100 &&
  664. (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
  665. (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
  666. alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
  667. ALX_AZ_ANADECT_LONG);
  668. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  669. &phy_val);
  670. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  671. phy_val | ALX_AFE_10BT_100M_TH);
  672. } else {
  673. alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
  674. ALX_AZ_ANADECT_DEF);
  675. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
  676. ALX_MIIEXT_AFE, &phy_val);
  677. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  678. phy_val & ~ALX_AFE_10BT_100M_TH);
  679. }
  680. /* threshold adjust */
  681. if (adj_th && hw->lnk_patch) {
  682. if (hw->link_speed == SPEED_100) {
  683. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
  684. ALX_MSE16DB_UP);
  685. } else if (hw->link_speed == SPEED_1000) {
  686. /*
  687. * Giga link threshold, raise the tolerance of
  688. * noise 50%
  689. */
  690. alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
  691. &phy_val);
  692. ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
  693. ALX_MSE20DB_TH_HI);
  694. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
  695. phy_val);
  696. }
  697. }
  698. } else {
  699. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  700. &phy_val);
  701. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  702. phy_val & ~ALX_AFE_10BT_100M_TH);
  703. if (adj_th && hw->lnk_patch) {
  704. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
  705. ALX_MSE16DB_DOWN);
  706. alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
  707. ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
  708. ALX_MSE20DB_TH_DEF);
  709. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
  710. }
  711. }
  712. }
  713. bool alx_phy_configured(struct alx_hw *hw)
  714. {
  715. u32 cfg, hw_cfg;
  716. cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
  717. cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
  718. hw_cfg = alx_get_phy_config(hw);
  719. if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
  720. return false;
  721. return cfg == hw_cfg;
  722. }
  723. int alx_read_phy_link(struct alx_hw *hw)
  724. {
  725. struct pci_dev *pdev = hw->pdev;
  726. u16 bmsr, giga;
  727. int err;
  728. err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
  729. if (err)
  730. return err;
  731. err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
  732. if (err)
  733. return err;
  734. if (!(bmsr & BMSR_LSTATUS)) {
  735. hw->link_speed = SPEED_UNKNOWN;
  736. hw->duplex = DUPLEX_UNKNOWN;
  737. return 0;
  738. }
  739. /* speed/duplex result is saved in PHY Specific Status Register */
  740. err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
  741. if (err)
  742. return err;
  743. if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
  744. goto wrong_speed;
  745. switch (giga & ALX_GIGA_PSSR_SPEED) {
  746. case ALX_GIGA_PSSR_1000MBS:
  747. hw->link_speed = SPEED_1000;
  748. break;
  749. case ALX_GIGA_PSSR_100MBS:
  750. hw->link_speed = SPEED_100;
  751. break;
  752. case ALX_GIGA_PSSR_10MBS:
  753. hw->link_speed = SPEED_10;
  754. break;
  755. default:
  756. goto wrong_speed;
  757. }
  758. hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
  759. return 0;
  760. wrong_speed:
  761. dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
  762. return -EINVAL;
  763. }
  764. int alx_clear_phy_intr(struct alx_hw *hw)
  765. {
  766. u16 isr;
  767. /* clear interrupt status by reading it */
  768. return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
  769. }
  770. void alx_disable_rss(struct alx_hw *hw)
  771. {
  772. u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
  773. ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
  774. alx_write_mem32(hw, ALX_RXQ0, ctrl);
  775. }
  776. void alx_configure_basic(struct alx_hw *hw)
  777. {
  778. u32 val, raw_mtu, max_payload;
  779. u16 val16;
  780. u8 chip_rev = alx_hw_revision(hw);
  781. alx_set_macaddr(hw, hw->mac_addr);
  782. alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
  783. /* idle timeout to switch clk_125M */
  784. if (chip_rev >= ALX_REV_B0)
  785. alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
  786. ALX_IDLE_DECISN_TIMER_DEF);
  787. alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
  788. val = alx_read_mem32(hw, ALX_MASTER);
  789. val |= ALX_MASTER_IRQMOD2_EN |
  790. ALX_MASTER_IRQMOD1_EN |
  791. ALX_MASTER_SYSALVTIMER_EN;
  792. alx_write_mem32(hw, ALX_MASTER, val);
  793. alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
  794. (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
  795. /* intr re-trig timeout */
  796. alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
  797. /* tpd threshold to trig int */
  798. alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
  799. alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
  800. raw_mtu = hw->mtu + ETH_HLEN;
  801. alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
  802. if (raw_mtu > ALX_MTU_JUMBO_TH)
  803. hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
  804. if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
  805. val = (raw_mtu + 8 + 7) >> 3;
  806. else
  807. val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
  808. alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
  809. max_payload = pcie_get_readrq(hw->pdev) >> 8;
  810. /*
  811. * if BIOS had changed the default dma read max length,
  812. * restore it to default value
  813. */
  814. if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
  815. pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
  816. val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
  817. ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
  818. ALX_TXQ0_SUPT_IPOPT |
  819. ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
  820. alx_write_mem32(hw, ALX_TXQ0, val);
  821. val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
  822. ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
  823. ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
  824. ALX_HQTPD_BURST_EN;
  825. alx_write_mem32(hw, ALX_HQTPD, val);
  826. /* rxq, flow control */
  827. val = alx_read_mem32(hw, ALX_SRAM5);
  828. val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
  829. if (val > ALX_SRAM_RXF_LEN_8K) {
  830. val16 = ALX_MTU_STD_ALGN >> 3;
  831. val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
  832. } else {
  833. val16 = ALX_MTU_STD_ALGN >> 3;
  834. val = (val - ALX_MTU_STD_ALGN) >> 3;
  835. }
  836. alx_write_mem32(hw, ALX_RXQ2,
  837. val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
  838. val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
  839. val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
  840. ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
  841. ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
  842. ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
  843. ALX_RXQ0_IPV6_PARSE_EN;
  844. if (alx_hw_giga(hw))
  845. ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
  846. ALX_RXQ0_ASPM_THRESH_100M);
  847. alx_write_mem32(hw, ALX_RXQ0, val);
  848. val = alx_read_mem32(hw, ALX_DMA);
  849. val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
  850. ALX_DMA_RREQ_PRI_DATA |
  851. max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
  852. ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
  853. ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
  854. (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
  855. alx_write_mem32(hw, ALX_DMA, val);
  856. /* default multi-tx-q weights */
  857. val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
  858. 4 << ALX_WRR_PRI0_SHIFT |
  859. 4 << ALX_WRR_PRI1_SHIFT |
  860. 4 << ALX_WRR_PRI2_SHIFT |
  861. 4 << ALX_WRR_PRI3_SHIFT;
  862. alx_write_mem32(hw, ALX_WRR, val);
  863. }
  864. bool alx_get_phy_info(struct alx_hw *hw)
  865. {
  866. u16 devs1, devs2;
  867. if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
  868. alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
  869. return false;
  870. /* since we haven't PMA/PMD status2 register, we can't
  871. * use mdio45_probe function for prtad and mmds.
  872. * use fixed MMD3 to get mmds.
  873. */
  874. if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
  875. alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
  876. return false;
  877. hw->mdio.mmds = devs1 | devs2 << 16;
  878. return true;
  879. }