phy.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190
  1. /*
  2. * drivers/net/phy/phy.c
  3. *
  4. * Framework for configuring and reading PHY devices
  5. * Based on code in sungem_phy.c and gianfar_phy.c
  6. *
  7. * Author: Andy Fleming
  8. *
  9. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  10. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/kernel.h>
  20. #include <linux/string.h>
  21. #include <linux/errno.h>
  22. #include <linux/unistd.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/mm.h>
  30. #include <linux/module.h>
  31. #include <linux/mii.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/phy.h>
  34. #include <linux/timer.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/mdio.h>
  37. #include <linux/atomic.h>
  38. #include <asm/io.h>
  39. #include <asm/irq.h>
  40. #include <asm/uaccess.h>
  41. /**
  42. * phy_print_status - Convenience function to print out the current phy status
  43. * @phydev: the phy_device struct
  44. */
  45. void phy_print_status(struct phy_device *phydev)
  46. {
  47. if (phydev->link)
  48. pr_info("%s - Link is Up - %d/%s\n",
  49. dev_name(&phydev->dev),
  50. phydev->speed,
  51. DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
  52. else
  53. pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
  54. }
  55. EXPORT_SYMBOL(phy_print_status);
  56. /**
  57. * phy_clear_interrupt - Ack the phy device's interrupt
  58. * @phydev: the phy_device struct
  59. *
  60. * If the @phydev driver has an ack_interrupt function, call it to
  61. * ack and clear the phy device's interrupt.
  62. *
  63. * Returns 0 on success on < 0 on error.
  64. */
  65. static int phy_clear_interrupt(struct phy_device *phydev)
  66. {
  67. int err = 0;
  68. if (phydev->drv->ack_interrupt)
  69. err = phydev->drv->ack_interrupt(phydev);
  70. return err;
  71. }
  72. /**
  73. * phy_config_interrupt - configure the PHY device for the requested interrupts
  74. * @phydev: the phy_device struct
  75. * @interrupts: interrupt flags to configure for this @phydev
  76. *
  77. * Returns 0 on success on < 0 on error.
  78. */
  79. static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  80. {
  81. int err = 0;
  82. phydev->interrupts = interrupts;
  83. if (phydev->drv->config_intr)
  84. err = phydev->drv->config_intr(phydev);
  85. return err;
  86. }
  87. /**
  88. * phy_aneg_done - return auto-negotiation status
  89. * @phydev: target phy_device struct
  90. *
  91. * Description: Reads the status register and returns 0 either if
  92. * auto-negotiation is incomplete, or if there was an error.
  93. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
  94. */
  95. static inline int phy_aneg_done(struct phy_device *phydev)
  96. {
  97. int retval;
  98. retval = phy_read(phydev, MII_BMSR);
  99. return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  100. }
  101. /* A structure for mapping a particular speed and duplex
  102. * combination to a particular SUPPORTED and ADVERTISED value */
  103. struct phy_setting {
  104. int speed;
  105. int duplex;
  106. u32 setting;
  107. };
  108. /* A mapping of all SUPPORTED settings to speed/duplex */
  109. static const struct phy_setting settings[] = {
  110. {
  111. .speed = 10000,
  112. .duplex = DUPLEX_FULL,
  113. .setting = SUPPORTED_10000baseT_Full,
  114. },
  115. {
  116. .speed = SPEED_1000,
  117. .duplex = DUPLEX_FULL,
  118. .setting = SUPPORTED_1000baseT_Full,
  119. },
  120. {
  121. .speed = SPEED_1000,
  122. .duplex = DUPLEX_HALF,
  123. .setting = SUPPORTED_1000baseT_Half,
  124. },
  125. {
  126. .speed = SPEED_100,
  127. .duplex = DUPLEX_FULL,
  128. .setting = SUPPORTED_100baseT_Full,
  129. },
  130. {
  131. .speed = SPEED_100,
  132. .duplex = DUPLEX_HALF,
  133. .setting = SUPPORTED_100baseT_Half,
  134. },
  135. {
  136. .speed = SPEED_10,
  137. .duplex = DUPLEX_FULL,
  138. .setting = SUPPORTED_10baseT_Full,
  139. },
  140. {
  141. .speed = SPEED_10,
  142. .duplex = DUPLEX_HALF,
  143. .setting = SUPPORTED_10baseT_Half,
  144. },
  145. };
  146. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  147. /**
  148. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  149. * @speed: speed to match
  150. * @duplex: duplex to match
  151. *
  152. * Description: Searches the settings array for the setting which
  153. * matches the desired speed and duplex, and returns the index
  154. * of that setting. Returns the index of the last setting if
  155. * none of the others match.
  156. */
  157. static inline int phy_find_setting(int speed, int duplex)
  158. {
  159. int idx = 0;
  160. while (idx < ARRAY_SIZE(settings) &&
  161. (settings[idx].speed != speed ||
  162. settings[idx].duplex != duplex))
  163. idx++;
  164. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  165. }
  166. /**
  167. * phy_find_valid - find a PHY setting that matches the requested features mask
  168. * @idx: The first index in settings[] to search
  169. * @features: A mask of the valid settings
  170. *
  171. * Description: Returns the index of the first valid setting less
  172. * than or equal to the one pointed to by idx, as determined by
  173. * the mask in features. Returns the index of the last setting
  174. * if nothing else matches.
  175. */
  176. static inline int phy_find_valid(int idx, u32 features)
  177. {
  178. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  179. idx++;
  180. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  181. }
  182. /**
  183. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  184. * @phydev: the target phy_device struct
  185. *
  186. * Description: Make sure the PHY is set to supported speeds and
  187. * duplexes. Drop down by one in this order: 1000/FULL,
  188. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  189. */
  190. static void phy_sanitize_settings(struct phy_device *phydev)
  191. {
  192. u32 features = phydev->supported;
  193. int idx;
  194. /* Sanitize settings based on PHY capabilities */
  195. if ((features & SUPPORTED_Autoneg) == 0)
  196. phydev->autoneg = AUTONEG_DISABLE;
  197. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  198. features);
  199. phydev->speed = settings[idx].speed;
  200. phydev->duplex = settings[idx].duplex;
  201. }
  202. /**
  203. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  204. * @phydev: target phy_device struct
  205. * @cmd: ethtool_cmd
  206. *
  207. * A few notes about parameter checking:
  208. * - We don't set port or transceiver, so we don't care what they
  209. * were set to.
  210. * - phy_start_aneg() will make sure forced settings are sane, and
  211. * choose the next best ones from the ones selected, so we don't
  212. * care if ethtool tries to give us bad values.
  213. */
  214. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  215. {
  216. u32 speed = ethtool_cmd_speed(cmd);
  217. if (cmd->phy_address != phydev->addr)
  218. return -EINVAL;
  219. /* We make sure that we don't pass unsupported
  220. * values in to the PHY */
  221. cmd->advertising &= phydev->supported;
  222. /* Verify the settings we care about. */
  223. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  224. return -EINVAL;
  225. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  226. return -EINVAL;
  227. if (cmd->autoneg == AUTONEG_DISABLE &&
  228. ((speed != SPEED_1000 &&
  229. speed != SPEED_100 &&
  230. speed != SPEED_10) ||
  231. (cmd->duplex != DUPLEX_HALF &&
  232. cmd->duplex != DUPLEX_FULL)))
  233. return -EINVAL;
  234. phydev->autoneg = cmd->autoneg;
  235. phydev->speed = speed;
  236. phydev->advertising = cmd->advertising;
  237. if (AUTONEG_ENABLE == cmd->autoneg)
  238. phydev->advertising |= ADVERTISED_Autoneg;
  239. else
  240. phydev->advertising &= ~ADVERTISED_Autoneg;
  241. phydev->duplex = cmd->duplex;
  242. /* Restart the PHY */
  243. phy_start_aneg(phydev);
  244. return 0;
  245. }
  246. EXPORT_SYMBOL(phy_ethtool_sset);
  247. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  248. {
  249. cmd->supported = phydev->supported;
  250. cmd->advertising = phydev->advertising;
  251. ethtool_cmd_speed_set(cmd, phydev->speed);
  252. cmd->duplex = phydev->duplex;
  253. cmd->port = PORT_MII;
  254. cmd->phy_address = phydev->addr;
  255. cmd->transceiver = XCVR_EXTERNAL;
  256. cmd->autoneg = phydev->autoneg;
  257. return 0;
  258. }
  259. EXPORT_SYMBOL(phy_ethtool_gset);
  260. /**
  261. * phy_mii_ioctl - generic PHY MII ioctl interface
  262. * @phydev: the phy_device struct
  263. * @ifr: &struct ifreq for socket ioctl's
  264. * @cmd: ioctl cmd to execute
  265. *
  266. * Note that this function is currently incompatible with the
  267. * PHYCONTROL layer. It changes registers without regard to
  268. * current state. Use at own risk.
  269. */
  270. int phy_mii_ioctl(struct phy_device *phydev,
  271. struct ifreq *ifr, int cmd)
  272. {
  273. struct mii_ioctl_data *mii_data = if_mii(ifr);
  274. u16 val = mii_data->val_in;
  275. switch (cmd) {
  276. case SIOCGMIIPHY:
  277. mii_data->phy_id = phydev->addr;
  278. /* fall through */
  279. case SIOCGMIIREG:
  280. mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
  281. mii_data->reg_num);
  282. break;
  283. case SIOCSMIIREG:
  284. if (mii_data->phy_id == phydev->addr) {
  285. switch(mii_data->reg_num) {
  286. case MII_BMCR:
  287. if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
  288. phydev->autoneg = AUTONEG_DISABLE;
  289. else
  290. phydev->autoneg = AUTONEG_ENABLE;
  291. if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
  292. phydev->duplex = DUPLEX_FULL;
  293. else
  294. phydev->duplex = DUPLEX_HALF;
  295. if ((!phydev->autoneg) &&
  296. (val & BMCR_SPEED1000))
  297. phydev->speed = SPEED_1000;
  298. else if ((!phydev->autoneg) &&
  299. (val & BMCR_SPEED100))
  300. phydev->speed = SPEED_100;
  301. break;
  302. case MII_ADVERTISE:
  303. phydev->advertising = val;
  304. break;
  305. default:
  306. /* do nothing */
  307. break;
  308. }
  309. }
  310. mdiobus_write(phydev->bus, mii_data->phy_id,
  311. mii_data->reg_num, val);
  312. if (mii_data->reg_num == MII_BMCR &&
  313. val & BMCR_RESET &&
  314. phydev->drv->config_init) {
  315. phy_scan_fixups(phydev);
  316. phydev->drv->config_init(phydev);
  317. }
  318. break;
  319. case SIOCSHWTSTAMP:
  320. if (phydev->drv->hwtstamp)
  321. return phydev->drv->hwtstamp(phydev, ifr);
  322. /* fall through */
  323. default:
  324. return -EOPNOTSUPP;
  325. }
  326. return 0;
  327. }
  328. EXPORT_SYMBOL(phy_mii_ioctl);
  329. /**
  330. * phy_start_aneg - start auto-negotiation for this PHY device
  331. * @phydev: the phy_device struct
  332. *
  333. * Description: Sanitizes the settings (if we're not autonegotiating
  334. * them), and then calls the driver's config_aneg function.
  335. * If the PHYCONTROL Layer is operating, we change the state to
  336. * reflect the beginning of Auto-negotiation or forcing.
  337. */
  338. int phy_start_aneg(struct phy_device *phydev)
  339. {
  340. int err;
  341. mutex_lock(&phydev->lock);
  342. if (AUTONEG_DISABLE == phydev->autoneg)
  343. phy_sanitize_settings(phydev);
  344. err = phydev->drv->config_aneg(phydev);
  345. if (err < 0)
  346. goto out_unlock;
  347. if (phydev->state != PHY_HALTED) {
  348. if (AUTONEG_ENABLE == phydev->autoneg) {
  349. phydev->state = PHY_AN;
  350. phydev->link_timeout = PHY_AN_TIMEOUT;
  351. } else {
  352. phydev->state = PHY_FORCING;
  353. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  354. }
  355. }
  356. out_unlock:
  357. mutex_unlock(&phydev->lock);
  358. return err;
  359. }
  360. EXPORT_SYMBOL(phy_start_aneg);
  361. static void phy_change(struct work_struct *work);
  362. /**
  363. * phy_start_machine - start PHY state machine tracking
  364. * @phydev: the phy_device struct
  365. * @handler: callback function for state change notifications
  366. *
  367. * Description: The PHY infrastructure can run a state machine
  368. * which tracks whether the PHY is starting up, negotiating,
  369. * etc. This function starts the timer which tracks the state
  370. * of the PHY. If you want to be notified when the state changes,
  371. * pass in the callback @handler, otherwise, pass NULL. If you
  372. * want to maintain your own state machine, do not call this
  373. * function.
  374. */
  375. void phy_start_machine(struct phy_device *phydev,
  376. void (*handler)(struct net_device *))
  377. {
  378. phydev->adjust_state = handler;
  379. schedule_delayed_work(&phydev->state_queue, HZ);
  380. }
  381. /**
  382. * phy_stop_machine - stop the PHY state machine tracking
  383. * @phydev: target phy_device struct
  384. *
  385. * Description: Stops the state machine timer, sets the state to UP
  386. * (unless it wasn't up yet). This function must be called BEFORE
  387. * phy_detach.
  388. */
  389. void phy_stop_machine(struct phy_device *phydev)
  390. {
  391. cancel_delayed_work_sync(&phydev->state_queue);
  392. mutex_lock(&phydev->lock);
  393. if (phydev->state > PHY_UP)
  394. phydev->state = PHY_UP;
  395. mutex_unlock(&phydev->lock);
  396. phydev->adjust_state = NULL;
  397. }
  398. /**
  399. * phy_force_reduction - reduce PHY speed/duplex settings by one step
  400. * @phydev: target phy_device struct
  401. *
  402. * Description: Reduces the speed/duplex settings by one notch,
  403. * in this order--
  404. * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  405. * The function bottoms out at 10/HALF.
  406. */
  407. static void phy_force_reduction(struct phy_device *phydev)
  408. {
  409. int idx;
  410. idx = phy_find_setting(phydev->speed, phydev->duplex);
  411. idx++;
  412. idx = phy_find_valid(idx, phydev->supported);
  413. phydev->speed = settings[idx].speed;
  414. phydev->duplex = settings[idx].duplex;
  415. pr_info("Trying %d/%s\n",
  416. phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
  417. }
  418. /**
  419. * phy_error - enter HALTED state for this PHY device
  420. * @phydev: target phy_device struct
  421. *
  422. * Moves the PHY to the HALTED state in response to a read
  423. * or write error, and tells the controller the link is down.
  424. * Must not be called from interrupt context, or while the
  425. * phydev->lock is held.
  426. */
  427. static void phy_error(struct phy_device *phydev)
  428. {
  429. mutex_lock(&phydev->lock);
  430. phydev->state = PHY_HALTED;
  431. mutex_unlock(&phydev->lock);
  432. }
  433. /**
  434. * phy_interrupt - PHY interrupt handler
  435. * @irq: interrupt line
  436. * @phy_dat: phy_device pointer
  437. *
  438. * Description: When a PHY interrupt occurs, the handler disables
  439. * interrupts, and schedules a work task to clear the interrupt.
  440. */
  441. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  442. {
  443. struct phy_device *phydev = phy_dat;
  444. if (PHY_HALTED == phydev->state)
  445. return IRQ_NONE; /* It can't be ours. */
  446. /* The MDIO bus is not allowed to be written in interrupt
  447. * context, so we need to disable the irq here. A work
  448. * queue will write the PHY to disable and clear the
  449. * interrupt, and then reenable the irq line. */
  450. disable_irq_nosync(irq);
  451. atomic_inc(&phydev->irq_disable);
  452. schedule_work(&phydev->phy_queue);
  453. return IRQ_HANDLED;
  454. }
  455. /**
  456. * phy_enable_interrupts - Enable the interrupts from the PHY side
  457. * @phydev: target phy_device struct
  458. */
  459. static int phy_enable_interrupts(struct phy_device *phydev)
  460. {
  461. int err;
  462. err = phy_clear_interrupt(phydev);
  463. if (err < 0)
  464. return err;
  465. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  466. return err;
  467. }
  468. /**
  469. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  470. * @phydev: target phy_device struct
  471. */
  472. static int phy_disable_interrupts(struct phy_device *phydev)
  473. {
  474. int err;
  475. /* Disable PHY interrupts */
  476. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  477. if (err)
  478. goto phy_err;
  479. /* Clear the interrupt */
  480. err = phy_clear_interrupt(phydev);
  481. if (err)
  482. goto phy_err;
  483. return 0;
  484. phy_err:
  485. phy_error(phydev);
  486. return err;
  487. }
  488. /**
  489. * phy_start_interrupts - request and enable interrupts for a PHY device
  490. * @phydev: target phy_device struct
  491. *
  492. * Description: Request the interrupt for the given PHY.
  493. * If this fails, then we set irq to PHY_POLL.
  494. * Otherwise, we enable the interrupts in the PHY.
  495. * This should only be called with a valid IRQ number.
  496. * Returns 0 on success or < 0 on error.
  497. */
  498. int phy_start_interrupts(struct phy_device *phydev)
  499. {
  500. int err = 0;
  501. INIT_WORK(&phydev->phy_queue, phy_change);
  502. atomic_set(&phydev->irq_disable, 0);
  503. if (request_irq(phydev->irq, phy_interrupt,
  504. IRQF_SHARED,
  505. "phy_interrupt",
  506. phydev) < 0) {
  507. pr_warn("%s: Can't get IRQ %d (PHY)\n",
  508. phydev->bus->name, phydev->irq);
  509. phydev->irq = PHY_POLL;
  510. return 0;
  511. }
  512. err = phy_enable_interrupts(phydev);
  513. return err;
  514. }
  515. EXPORT_SYMBOL(phy_start_interrupts);
  516. /**
  517. * phy_stop_interrupts - disable interrupts from a PHY device
  518. * @phydev: target phy_device struct
  519. */
  520. int phy_stop_interrupts(struct phy_device *phydev)
  521. {
  522. int err;
  523. err = phy_disable_interrupts(phydev);
  524. if (err)
  525. phy_error(phydev);
  526. free_irq(phydev->irq, phydev);
  527. /*
  528. * Cannot call flush_scheduled_work() here as desired because
  529. * of rtnl_lock(), but we do not really care about what would
  530. * be done, except from enable_irq(), so cancel any work
  531. * possibly pending and take care of the matter below.
  532. */
  533. cancel_work_sync(&phydev->phy_queue);
  534. /*
  535. * If work indeed has been cancelled, disable_irq() will have
  536. * been left unbalanced from phy_interrupt() and enable_irq()
  537. * has to be called so that other devices on the line work.
  538. */
  539. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  540. enable_irq(phydev->irq);
  541. return err;
  542. }
  543. EXPORT_SYMBOL(phy_stop_interrupts);
  544. /**
  545. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  546. * @work: work_struct that describes the work to be done
  547. */
  548. static void phy_change(struct work_struct *work)
  549. {
  550. int err;
  551. struct phy_device *phydev =
  552. container_of(work, struct phy_device, phy_queue);
  553. if (phydev->drv->did_interrupt &&
  554. !phydev->drv->did_interrupt(phydev))
  555. goto ignore;
  556. err = phy_disable_interrupts(phydev);
  557. if (err)
  558. goto phy_err;
  559. mutex_lock(&phydev->lock);
  560. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  561. phydev->state = PHY_CHANGELINK;
  562. mutex_unlock(&phydev->lock);
  563. atomic_dec(&phydev->irq_disable);
  564. enable_irq(phydev->irq);
  565. /* Reenable interrupts */
  566. if (PHY_HALTED != phydev->state)
  567. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  568. if (err)
  569. goto irq_enable_err;
  570. /* reschedule state queue work to run as soon as possible */
  571. cancel_delayed_work_sync(&phydev->state_queue);
  572. schedule_delayed_work(&phydev->state_queue, 0);
  573. return;
  574. ignore:
  575. atomic_dec(&phydev->irq_disable);
  576. enable_irq(phydev->irq);
  577. return;
  578. irq_enable_err:
  579. disable_irq(phydev->irq);
  580. atomic_inc(&phydev->irq_disable);
  581. phy_err:
  582. phy_error(phydev);
  583. }
  584. /**
  585. * phy_stop - Bring down the PHY link, and stop checking the status
  586. * @phydev: target phy_device struct
  587. */
  588. void phy_stop(struct phy_device *phydev)
  589. {
  590. mutex_lock(&phydev->lock);
  591. if (PHY_HALTED == phydev->state)
  592. goto out_unlock;
  593. if (phydev->irq != PHY_POLL) {
  594. /* Disable PHY Interrupts */
  595. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  596. /* Clear any pending interrupts */
  597. phy_clear_interrupt(phydev);
  598. }
  599. phydev->state = PHY_HALTED;
  600. out_unlock:
  601. mutex_unlock(&phydev->lock);
  602. /*
  603. * Cannot call flush_scheduled_work() here as desired because
  604. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  605. * will not reenable interrupts.
  606. */
  607. }
  608. /**
  609. * phy_start - start or restart a PHY device
  610. * @phydev: target phy_device struct
  611. *
  612. * Description: Indicates the attached device's readiness to
  613. * handle PHY-related work. Used during startup to start the
  614. * PHY, and after a call to phy_stop() to resume operation.
  615. * Also used to indicate the MDIO bus has cleared an error
  616. * condition.
  617. */
  618. void phy_start(struct phy_device *phydev)
  619. {
  620. mutex_lock(&phydev->lock);
  621. switch (phydev->state) {
  622. case PHY_STARTING:
  623. phydev->state = PHY_PENDING;
  624. break;
  625. case PHY_READY:
  626. phydev->state = PHY_UP;
  627. break;
  628. case PHY_HALTED:
  629. phydev->state = PHY_RESUMING;
  630. default:
  631. break;
  632. }
  633. mutex_unlock(&phydev->lock);
  634. }
  635. EXPORT_SYMBOL(phy_stop);
  636. EXPORT_SYMBOL(phy_start);
  637. /**
  638. * phy_state_machine - Handle the state machine
  639. * @work: work_struct that describes the work to be done
  640. */
  641. void phy_state_machine(struct work_struct *work)
  642. {
  643. struct delayed_work *dwork = to_delayed_work(work);
  644. struct phy_device *phydev =
  645. container_of(dwork, struct phy_device, state_queue);
  646. int needs_aneg = 0;
  647. int err = 0;
  648. mutex_lock(&phydev->lock);
  649. if (phydev->adjust_state)
  650. phydev->adjust_state(phydev->attached_dev);
  651. switch(phydev->state) {
  652. case PHY_DOWN:
  653. case PHY_STARTING:
  654. case PHY_READY:
  655. case PHY_PENDING:
  656. break;
  657. case PHY_UP:
  658. needs_aneg = 1;
  659. phydev->link_timeout = PHY_AN_TIMEOUT;
  660. break;
  661. case PHY_AN:
  662. err = phy_read_status(phydev);
  663. if (err < 0)
  664. break;
  665. /* If the link is down, give up on
  666. * negotiation for now */
  667. if (!phydev->link) {
  668. phydev->state = PHY_NOLINK;
  669. netif_carrier_off(phydev->attached_dev);
  670. phydev->adjust_link(phydev->attached_dev);
  671. break;
  672. }
  673. /* Check if negotiation is done. Break
  674. * if there's an error */
  675. err = phy_aneg_done(phydev);
  676. if (err < 0)
  677. break;
  678. /* If AN is done, we're running */
  679. if (err > 0) {
  680. phydev->state = PHY_RUNNING;
  681. netif_carrier_on(phydev->attached_dev);
  682. phydev->adjust_link(phydev->attached_dev);
  683. } else if (0 == phydev->link_timeout--) {
  684. int idx;
  685. needs_aneg = 1;
  686. /* If we have the magic_aneg bit,
  687. * we try again */
  688. if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  689. break;
  690. /* The timer expired, and we still
  691. * don't have a setting, so we try
  692. * forcing it until we find one that
  693. * works, starting from the fastest speed,
  694. * and working our way down */
  695. idx = phy_find_valid(0, phydev->supported);
  696. phydev->speed = settings[idx].speed;
  697. phydev->duplex = settings[idx].duplex;
  698. phydev->autoneg = AUTONEG_DISABLE;
  699. pr_info("Trying %d/%s\n",
  700. phydev->speed,
  701. DUPLEX_FULL == phydev->duplex ?
  702. "FULL" : "HALF");
  703. }
  704. break;
  705. case PHY_NOLINK:
  706. err = phy_read_status(phydev);
  707. if (err)
  708. break;
  709. if (phydev->link) {
  710. phydev->state = PHY_RUNNING;
  711. netif_carrier_on(phydev->attached_dev);
  712. phydev->adjust_link(phydev->attached_dev);
  713. }
  714. break;
  715. case PHY_FORCING:
  716. err = genphy_update_link(phydev);
  717. if (err)
  718. break;
  719. if (phydev->link) {
  720. phydev->state = PHY_RUNNING;
  721. netif_carrier_on(phydev->attached_dev);
  722. } else {
  723. if (0 == phydev->link_timeout--) {
  724. phy_force_reduction(phydev);
  725. needs_aneg = 1;
  726. }
  727. }
  728. phydev->adjust_link(phydev->attached_dev);
  729. break;
  730. case PHY_RUNNING:
  731. /* Only register a CHANGE if we are
  732. * polling */
  733. if (PHY_POLL == phydev->irq)
  734. phydev->state = PHY_CHANGELINK;
  735. break;
  736. case PHY_CHANGELINK:
  737. err = phy_read_status(phydev);
  738. if (err)
  739. break;
  740. if (phydev->link) {
  741. phydev->state = PHY_RUNNING;
  742. netif_carrier_on(phydev->attached_dev);
  743. } else {
  744. phydev->state = PHY_NOLINK;
  745. netif_carrier_off(phydev->attached_dev);
  746. }
  747. phydev->adjust_link(phydev->attached_dev);
  748. if (PHY_POLL != phydev->irq)
  749. err = phy_config_interrupt(phydev,
  750. PHY_INTERRUPT_ENABLED);
  751. break;
  752. case PHY_HALTED:
  753. if (phydev->link) {
  754. phydev->link = 0;
  755. netif_carrier_off(phydev->attached_dev);
  756. phydev->adjust_link(phydev->attached_dev);
  757. }
  758. break;
  759. case PHY_RESUMING:
  760. err = phy_clear_interrupt(phydev);
  761. if (err)
  762. break;
  763. err = phy_config_interrupt(phydev,
  764. PHY_INTERRUPT_ENABLED);
  765. if (err)
  766. break;
  767. if (AUTONEG_ENABLE == phydev->autoneg) {
  768. err = phy_aneg_done(phydev);
  769. if (err < 0)
  770. break;
  771. /* err > 0 if AN is done.
  772. * Otherwise, it's 0, and we're
  773. * still waiting for AN */
  774. if (err > 0) {
  775. err = phy_read_status(phydev);
  776. if (err)
  777. break;
  778. if (phydev->link) {
  779. phydev->state = PHY_RUNNING;
  780. netif_carrier_on(phydev->attached_dev);
  781. } else
  782. phydev->state = PHY_NOLINK;
  783. phydev->adjust_link(phydev->attached_dev);
  784. } else {
  785. phydev->state = PHY_AN;
  786. phydev->link_timeout = PHY_AN_TIMEOUT;
  787. }
  788. } else {
  789. err = phy_read_status(phydev);
  790. if (err)
  791. break;
  792. if (phydev->link) {
  793. phydev->state = PHY_RUNNING;
  794. netif_carrier_on(phydev->attached_dev);
  795. } else
  796. phydev->state = PHY_NOLINK;
  797. phydev->adjust_link(phydev->attached_dev);
  798. }
  799. break;
  800. }
  801. mutex_unlock(&phydev->lock);
  802. if (needs_aneg)
  803. err = phy_start_aneg(phydev);
  804. if (err < 0)
  805. phy_error(phydev);
  806. schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
  807. }
  808. static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  809. int addr)
  810. {
  811. /* Write the desired MMD Devad */
  812. bus->write(bus, addr, MII_MMD_CTRL, devad);
  813. /* Write the desired MMD register address */
  814. bus->write(bus, addr, MII_MMD_DATA, prtad);
  815. /* Select the Function : DATA with no post increment */
  816. bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
  817. }
  818. /**
  819. * phy_read_mmd_indirect - reads data from the MMD registers
  820. * @bus: the target MII bus
  821. * @prtad: MMD Address
  822. * @devad: MMD DEVAD
  823. * @addr: PHY address on the MII bus
  824. *
  825. * Description: it reads data from the MMD registers (clause 22 to access to
  826. * clause 45) of the specified phy address.
  827. * To read these register we have:
  828. * 1) Write reg 13 // DEVAD
  829. * 2) Write reg 14 // MMD Address
  830. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  831. * 3) Read reg 14 // Read MMD data
  832. */
  833. static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  834. int addr)
  835. {
  836. u32 ret;
  837. mmd_phy_indirect(bus, prtad, devad, addr);
  838. /* Read the content of the MMD's selected register */
  839. ret = bus->read(bus, addr, MII_MMD_DATA);
  840. return ret;
  841. }
  842. /**
  843. * phy_write_mmd_indirect - writes data to the MMD registers
  844. * @bus: the target MII bus
  845. * @prtad: MMD Address
  846. * @devad: MMD DEVAD
  847. * @addr: PHY address on the MII bus
  848. * @data: data to write in the MMD register
  849. *
  850. * Description: Write data from the MMD registers of the specified
  851. * phy address.
  852. * To write these register we have:
  853. * 1) Write reg 13 // DEVAD
  854. * 2) Write reg 14 // MMD Address
  855. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  856. * 3) Write reg 14 // Write MMD data
  857. */
  858. static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  859. int addr, u32 data)
  860. {
  861. mmd_phy_indirect(bus, prtad, devad, addr);
  862. /* Write the data into MMD's selected register */
  863. bus->write(bus, addr, MII_MMD_DATA, data);
  864. }
  865. /**
  866. * phy_init_eee - init and check the EEE feature
  867. * @phydev: target phy_device struct
  868. * @clk_stop_enable: PHY may stop the clock during LPI
  869. *
  870. * Description: it checks if the Energy-Efficient Ethernet (EEE)
  871. * is supported by looking at the MMD registers 3.20 and 7.60/61
  872. * and it programs the MMD register 3.0 setting the "Clock stop enable"
  873. * bit if required.
  874. */
  875. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  876. {
  877. int ret = -EPROTONOSUPPORT;
  878. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  879. * Also EEE feature is active when core is operating with MII, GMII
  880. * or RGMII.
  881. */
  882. if ((phydev->duplex == DUPLEX_FULL) &&
  883. ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
  884. (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
  885. (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
  886. int eee_lp, eee_cap, eee_adv;
  887. u32 lp, cap, adv;
  888. int idx, status;
  889. /* Read phy status to properly get the right settings */
  890. status = phy_read_status(phydev);
  891. if (status)
  892. return status;
  893. /* First check if the EEE ability is supported */
  894. eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
  895. MDIO_MMD_PCS, phydev->addr);
  896. if (eee_cap < 0)
  897. return eee_cap;
  898. cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  899. if (!cap)
  900. goto eee_exit;
  901. /* Check which link settings negotiated and verify it in
  902. * the EEE advertising registers.
  903. */
  904. eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
  905. MDIO_MMD_AN, phydev->addr);
  906. if (eee_lp < 0)
  907. return eee_lp;
  908. eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
  909. MDIO_MMD_AN, phydev->addr);
  910. if (eee_adv < 0)
  911. return eee_adv;
  912. adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
  913. lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  914. idx = phy_find_setting(phydev->speed, phydev->duplex);
  915. if ((lp & adv & settings[idx].setting))
  916. goto eee_exit;
  917. if (clk_stop_enable) {
  918. /* Configure the PHY to stop receiving xMII
  919. * clock while it is signaling LPI.
  920. */
  921. int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
  922. MDIO_MMD_PCS,
  923. phydev->addr);
  924. if (val < 0)
  925. return val;
  926. val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
  927. phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
  928. MDIO_MMD_PCS, phydev->addr, val);
  929. }
  930. ret = 0; /* EEE supported */
  931. }
  932. eee_exit:
  933. return ret;
  934. }
  935. EXPORT_SYMBOL(phy_init_eee);
  936. /**
  937. * phy_get_eee_err - report the EEE wake error count
  938. * @phydev: target phy_device struct
  939. *
  940. * Description: it is to report the number of time where the PHY
  941. * failed to complete its normal wake sequence.
  942. */
  943. int phy_get_eee_err(struct phy_device *phydev)
  944. {
  945. return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
  946. MDIO_MMD_PCS, phydev->addr);
  947. }
  948. EXPORT_SYMBOL(phy_get_eee_err);
  949. /**
  950. * phy_ethtool_get_eee - get EEE supported and status
  951. * @phydev: target phy_device struct
  952. * @data: ethtool_eee data
  953. *
  954. * Description: it reportes the Supported/Advertisement/LP Advertisement
  955. * capabilities.
  956. */
  957. int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
  958. {
  959. int val;
  960. /* Get Supported EEE */
  961. val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
  962. MDIO_MMD_PCS, phydev->addr);
  963. if (val < 0)
  964. return val;
  965. data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
  966. /* Get advertisement EEE */
  967. val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
  968. MDIO_MMD_AN, phydev->addr);
  969. if (val < 0)
  970. return val;
  971. data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  972. /* Get LP advertisement EEE */
  973. val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
  974. MDIO_MMD_AN, phydev->addr);
  975. if (val < 0)
  976. return val;
  977. data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  978. return 0;
  979. }
  980. EXPORT_SYMBOL(phy_ethtool_get_eee);
  981. /**
  982. * phy_ethtool_set_eee - set EEE supported and status
  983. * @phydev: target phy_device struct
  984. * @data: ethtool_eee data
  985. *
  986. * Description: it is to program the Advertisement EEE register.
  987. */
  988. int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  989. {
  990. int val;
  991. val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  992. phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
  993. phydev->addr, val);
  994. return 0;
  995. }
  996. EXPORT_SYMBOL(phy_ethtool_set_eee);