phy.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*
  2. * drivers/net/phy/phy.c
  3. *
  4. * Framework for configuring and reading PHY devices
  5. * Based on code in sungem_phy.c and gianfar_phy.c
  6. *
  7. * Author: Andy Fleming
  8. *
  9. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  10. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/kernel.h>
  20. #include <linux/string.h>
  21. #include <linux/errno.h>
  22. #include <linux/unistd.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/mm.h>
  30. #include <linux/module.h>
  31. #include <linux/mii.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/phy.h>
  34. #include <linux/timer.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/mdio.h>
  37. #include <linux/atomic.h>
  38. #include <asm/io.h>
  39. #include <asm/irq.h>
  40. #include <asm/uaccess.h>
  41. /**
  42. * phy_print_status - Convenience function to print out the current phy status
  43. * @phydev: the phy_device struct
  44. */
  45. void phy_print_status(struct phy_device *phydev)
  46. {
  47. if (phydev->link)
  48. pr_info("%s - Link is Up - %d/%s\n",
  49. dev_name(&phydev->dev),
  50. phydev->speed,
  51. DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
  52. else
  53. pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
  54. }
  55. EXPORT_SYMBOL(phy_print_status);
  56. /**
  57. * phy_clear_interrupt - Ack the phy device's interrupt
  58. * @phydev: the phy_device struct
  59. *
  60. * If the @phydev driver has an ack_interrupt function, call it to
  61. * ack and clear the phy device's interrupt.
  62. *
  63. * Returns 0 on success on < 0 on error.
  64. */
  65. static int phy_clear_interrupt(struct phy_device *phydev)
  66. {
  67. int err = 0;
  68. if (phydev->drv->ack_interrupt)
  69. err = phydev->drv->ack_interrupt(phydev);
  70. return err;
  71. }
  72. /**
  73. * phy_config_interrupt - configure the PHY device for the requested interrupts
  74. * @phydev: the phy_device struct
  75. * @interrupts: interrupt flags to configure for this @phydev
  76. *
  77. * Returns 0 on success on < 0 on error.
  78. */
  79. static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  80. {
  81. int err = 0;
  82. phydev->interrupts = interrupts;
  83. if (phydev->drv->config_intr)
  84. err = phydev->drv->config_intr(phydev);
  85. return err;
  86. }
  87. /**
  88. * phy_aneg_done - return auto-negotiation status
  89. * @phydev: target phy_device struct
  90. *
  91. * Description: Reads the status register and returns 0 either if
  92. * auto-negotiation is incomplete, or if there was an error.
  93. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
  94. */
  95. static inline int phy_aneg_done(struct phy_device *phydev)
  96. {
  97. int retval;
  98. retval = phy_read(phydev, MII_BMSR);
  99. return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  100. }
  101. /* A structure for mapping a particular speed and duplex
  102. * combination to a particular SUPPORTED and ADVERTISED value */
  103. struct phy_setting {
  104. int speed;
  105. int duplex;
  106. u32 setting;
  107. };
  108. /* A mapping of all SUPPORTED settings to speed/duplex */
  109. static const struct phy_setting settings[] = {
  110. {
  111. .speed = 10000,
  112. .duplex = DUPLEX_FULL,
  113. .setting = SUPPORTED_10000baseT_Full,
  114. },
  115. {
  116. .speed = SPEED_1000,
  117. .duplex = DUPLEX_FULL,
  118. .setting = SUPPORTED_1000baseT_Full,
  119. },
  120. {
  121. .speed = SPEED_1000,
  122. .duplex = DUPLEX_HALF,
  123. .setting = SUPPORTED_1000baseT_Half,
  124. },
  125. {
  126. .speed = SPEED_100,
  127. .duplex = DUPLEX_FULL,
  128. .setting = SUPPORTED_100baseT_Full,
  129. },
  130. {
  131. .speed = SPEED_100,
  132. .duplex = DUPLEX_HALF,
  133. .setting = SUPPORTED_100baseT_Half,
  134. },
  135. {
  136. .speed = SPEED_10,
  137. .duplex = DUPLEX_FULL,
  138. .setting = SUPPORTED_10baseT_Full,
  139. },
  140. {
  141. .speed = SPEED_10,
  142. .duplex = DUPLEX_HALF,
  143. .setting = SUPPORTED_10baseT_Half,
  144. },
  145. };
  146. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  147. /**
  148. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  149. * @speed: speed to match
  150. * @duplex: duplex to match
  151. *
  152. * Description: Searches the settings array for the setting which
  153. * matches the desired speed and duplex, and returns the index
  154. * of that setting. Returns the index of the last setting if
  155. * none of the others match.
  156. */
  157. static inline int phy_find_setting(int speed, int duplex)
  158. {
  159. int idx = 0;
  160. while (idx < ARRAY_SIZE(settings) &&
  161. (settings[idx].speed != speed ||
  162. settings[idx].duplex != duplex))
  163. idx++;
  164. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  165. }
  166. /**
  167. * phy_find_valid - find a PHY setting that matches the requested features mask
  168. * @idx: The first index in settings[] to search
  169. * @features: A mask of the valid settings
  170. *
  171. * Description: Returns the index of the first valid setting less
  172. * than or equal to the one pointed to by idx, as determined by
  173. * the mask in features. Returns the index of the last setting
  174. * if nothing else matches.
  175. */
  176. static inline int phy_find_valid(int idx, u32 features)
  177. {
  178. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  179. idx++;
  180. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  181. }
  182. /**
  183. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  184. * @phydev: the target phy_device struct
  185. *
  186. * Description: Make sure the PHY is set to supported speeds and
  187. * duplexes. Drop down by one in this order: 1000/FULL,
  188. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  189. */
  190. static void phy_sanitize_settings(struct phy_device *phydev)
  191. {
  192. u32 features = phydev->supported;
  193. int idx;
  194. /* Sanitize settings based on PHY capabilities */
  195. if ((features & SUPPORTED_Autoneg) == 0)
  196. phydev->autoneg = AUTONEG_DISABLE;
  197. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  198. features);
  199. phydev->speed = settings[idx].speed;
  200. phydev->duplex = settings[idx].duplex;
  201. }
  202. /**
  203. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  204. * @phydev: target phy_device struct
  205. * @cmd: ethtool_cmd
  206. *
  207. * A few notes about parameter checking:
  208. * - We don't set port or transceiver, so we don't care what they
  209. * were set to.
  210. * - phy_start_aneg() will make sure forced settings are sane, and
  211. * choose the next best ones from the ones selected, so we don't
  212. * care if ethtool tries to give us bad values.
  213. */
  214. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  215. {
  216. u32 speed = ethtool_cmd_speed(cmd);
  217. if (cmd->phy_address != phydev->addr)
  218. return -EINVAL;
  219. /* We make sure that we don't pass unsupported
  220. * values in to the PHY */
  221. cmd->advertising &= phydev->supported;
  222. /* Verify the settings we care about. */
  223. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  224. return -EINVAL;
  225. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  226. return -EINVAL;
  227. if (cmd->autoneg == AUTONEG_DISABLE &&
  228. ((speed != SPEED_1000 &&
  229. speed != SPEED_100 &&
  230. speed != SPEED_10) ||
  231. (cmd->duplex != DUPLEX_HALF &&
  232. cmd->duplex != DUPLEX_FULL)))
  233. return -EINVAL;
  234. phydev->autoneg = cmd->autoneg;
  235. phydev->speed = speed;
  236. phydev->advertising = cmd->advertising;
  237. if (AUTONEG_ENABLE == cmd->autoneg)
  238. phydev->advertising |= ADVERTISED_Autoneg;
  239. else
  240. phydev->advertising &= ~ADVERTISED_Autoneg;
  241. phydev->duplex = cmd->duplex;
  242. /* Restart the PHY */
  243. phy_start_aneg(phydev);
  244. return 0;
  245. }
  246. EXPORT_SYMBOL(phy_ethtool_sset);
  247. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  248. {
  249. cmd->supported = phydev->supported;
  250. cmd->advertising = phydev->advertising;
  251. ethtool_cmd_speed_set(cmd, phydev->speed);
  252. cmd->duplex = phydev->duplex;
  253. cmd->port = PORT_MII;
  254. cmd->phy_address = phydev->addr;
  255. cmd->transceiver = phy_is_internal(phydev) ?
  256. XCVR_INTERNAL : XCVR_EXTERNAL;
  257. cmd->autoneg = phydev->autoneg;
  258. return 0;
  259. }
  260. EXPORT_SYMBOL(phy_ethtool_gset);
  261. /**
  262. * phy_mii_ioctl - generic PHY MII ioctl interface
  263. * @phydev: the phy_device struct
  264. * @ifr: &struct ifreq for socket ioctl's
  265. * @cmd: ioctl cmd to execute
  266. *
  267. * Note that this function is currently incompatible with the
  268. * PHYCONTROL layer. It changes registers without regard to
  269. * current state. Use at own risk.
  270. */
  271. int phy_mii_ioctl(struct phy_device *phydev,
  272. struct ifreq *ifr, int cmd)
  273. {
  274. struct mii_ioctl_data *mii_data = if_mii(ifr);
  275. u16 val = mii_data->val_in;
  276. switch (cmd) {
  277. case SIOCGMIIPHY:
  278. mii_data->phy_id = phydev->addr;
  279. /* fall through */
  280. case SIOCGMIIREG:
  281. mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
  282. mii_data->reg_num);
  283. break;
  284. case SIOCSMIIREG:
  285. if (mii_data->phy_id == phydev->addr) {
  286. switch(mii_data->reg_num) {
  287. case MII_BMCR:
  288. if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
  289. phydev->autoneg = AUTONEG_DISABLE;
  290. else
  291. phydev->autoneg = AUTONEG_ENABLE;
  292. if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
  293. phydev->duplex = DUPLEX_FULL;
  294. else
  295. phydev->duplex = DUPLEX_HALF;
  296. if ((!phydev->autoneg) &&
  297. (val & BMCR_SPEED1000))
  298. phydev->speed = SPEED_1000;
  299. else if ((!phydev->autoneg) &&
  300. (val & BMCR_SPEED100))
  301. phydev->speed = SPEED_100;
  302. break;
  303. case MII_ADVERTISE:
  304. phydev->advertising = val;
  305. break;
  306. default:
  307. /* do nothing */
  308. break;
  309. }
  310. }
  311. mdiobus_write(phydev->bus, mii_data->phy_id,
  312. mii_data->reg_num, val);
  313. if (mii_data->reg_num == MII_BMCR &&
  314. val & BMCR_RESET &&
  315. phydev->drv->config_init) {
  316. phy_scan_fixups(phydev);
  317. phydev->drv->config_init(phydev);
  318. }
  319. break;
  320. case SIOCSHWTSTAMP:
  321. if (phydev->drv->hwtstamp)
  322. return phydev->drv->hwtstamp(phydev, ifr);
  323. /* fall through */
  324. default:
  325. return -EOPNOTSUPP;
  326. }
  327. return 0;
  328. }
  329. EXPORT_SYMBOL(phy_mii_ioctl);
  330. /**
  331. * phy_start_aneg - start auto-negotiation for this PHY device
  332. * @phydev: the phy_device struct
  333. *
  334. * Description: Sanitizes the settings (if we're not autonegotiating
  335. * them), and then calls the driver's config_aneg function.
  336. * If the PHYCONTROL Layer is operating, we change the state to
  337. * reflect the beginning of Auto-negotiation or forcing.
  338. */
  339. int phy_start_aneg(struct phy_device *phydev)
  340. {
  341. int err;
  342. mutex_lock(&phydev->lock);
  343. if (AUTONEG_DISABLE == phydev->autoneg)
  344. phy_sanitize_settings(phydev);
  345. err = phydev->drv->config_aneg(phydev);
  346. if (err < 0)
  347. goto out_unlock;
  348. if (phydev->state != PHY_HALTED) {
  349. if (AUTONEG_ENABLE == phydev->autoneg) {
  350. phydev->state = PHY_AN;
  351. phydev->link_timeout = PHY_AN_TIMEOUT;
  352. } else {
  353. phydev->state = PHY_FORCING;
  354. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  355. }
  356. }
  357. out_unlock:
  358. mutex_unlock(&phydev->lock);
  359. return err;
  360. }
  361. EXPORT_SYMBOL(phy_start_aneg);
  362. /**
  363. * phy_start_machine - start PHY state machine tracking
  364. * @phydev: the phy_device struct
  365. * @handler: callback function for state change notifications
  366. *
  367. * Description: The PHY infrastructure can run a state machine
  368. * which tracks whether the PHY is starting up, negotiating,
  369. * etc. This function starts the timer which tracks the state
  370. * of the PHY. If you want to be notified when the state changes,
  371. * pass in the callback @handler, otherwise, pass NULL. If you
  372. * want to maintain your own state machine, do not call this
  373. * function.
  374. */
  375. void phy_start_machine(struct phy_device *phydev,
  376. void (*handler)(struct net_device *))
  377. {
  378. phydev->adjust_state = handler;
  379. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  380. }
  381. /**
  382. * phy_stop_machine - stop the PHY state machine tracking
  383. * @phydev: target phy_device struct
  384. *
  385. * Description: Stops the state machine timer, sets the state to UP
  386. * (unless it wasn't up yet). This function must be called BEFORE
  387. * phy_detach.
  388. */
  389. void phy_stop_machine(struct phy_device *phydev)
  390. {
  391. cancel_delayed_work_sync(&phydev->state_queue);
  392. mutex_lock(&phydev->lock);
  393. if (phydev->state > PHY_UP)
  394. phydev->state = PHY_UP;
  395. mutex_unlock(&phydev->lock);
  396. phydev->adjust_state = NULL;
  397. }
  398. /**
  399. * phy_error - enter HALTED state for this PHY device
  400. * @phydev: target phy_device struct
  401. *
  402. * Moves the PHY to the HALTED state in response to a read
  403. * or write error, and tells the controller the link is down.
  404. * Must not be called from interrupt context, or while the
  405. * phydev->lock is held.
  406. */
  407. static void phy_error(struct phy_device *phydev)
  408. {
  409. mutex_lock(&phydev->lock);
  410. phydev->state = PHY_HALTED;
  411. mutex_unlock(&phydev->lock);
  412. }
  413. /**
  414. * phy_interrupt - PHY interrupt handler
  415. * @irq: interrupt line
  416. * @phy_dat: phy_device pointer
  417. *
  418. * Description: When a PHY interrupt occurs, the handler disables
  419. * interrupts, and schedules a work task to clear the interrupt.
  420. */
  421. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  422. {
  423. struct phy_device *phydev = phy_dat;
  424. if (PHY_HALTED == phydev->state)
  425. return IRQ_NONE; /* It can't be ours. */
  426. /* The MDIO bus is not allowed to be written in interrupt
  427. * context, so we need to disable the irq here. A work
  428. * queue will write the PHY to disable and clear the
  429. * interrupt, and then reenable the irq line. */
  430. disable_irq_nosync(irq);
  431. atomic_inc(&phydev->irq_disable);
  432. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  433. return IRQ_HANDLED;
  434. }
  435. /**
  436. * phy_enable_interrupts - Enable the interrupts from the PHY side
  437. * @phydev: target phy_device struct
  438. */
  439. static int phy_enable_interrupts(struct phy_device *phydev)
  440. {
  441. int err;
  442. err = phy_clear_interrupt(phydev);
  443. if (err < 0)
  444. return err;
  445. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  446. return err;
  447. }
  448. /**
  449. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  450. * @phydev: target phy_device struct
  451. */
  452. static int phy_disable_interrupts(struct phy_device *phydev)
  453. {
  454. int err;
  455. /* Disable PHY interrupts */
  456. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  457. if (err)
  458. goto phy_err;
  459. /* Clear the interrupt */
  460. err = phy_clear_interrupt(phydev);
  461. if (err)
  462. goto phy_err;
  463. return 0;
  464. phy_err:
  465. phy_error(phydev);
  466. return err;
  467. }
  468. /**
  469. * phy_start_interrupts - request and enable interrupts for a PHY device
  470. * @phydev: target phy_device struct
  471. *
  472. * Description: Request the interrupt for the given PHY.
  473. * If this fails, then we set irq to PHY_POLL.
  474. * Otherwise, we enable the interrupts in the PHY.
  475. * This should only be called with a valid IRQ number.
  476. * Returns 0 on success or < 0 on error.
  477. */
  478. int phy_start_interrupts(struct phy_device *phydev)
  479. {
  480. int err = 0;
  481. atomic_set(&phydev->irq_disable, 0);
  482. if (request_irq(phydev->irq, phy_interrupt,
  483. IRQF_SHARED,
  484. "phy_interrupt",
  485. phydev) < 0) {
  486. pr_warn("%s: Can't get IRQ %d (PHY)\n",
  487. phydev->bus->name, phydev->irq);
  488. phydev->irq = PHY_POLL;
  489. return 0;
  490. }
  491. err = phy_enable_interrupts(phydev);
  492. return err;
  493. }
  494. EXPORT_SYMBOL(phy_start_interrupts);
  495. /**
  496. * phy_stop_interrupts - disable interrupts from a PHY device
  497. * @phydev: target phy_device struct
  498. */
  499. int phy_stop_interrupts(struct phy_device *phydev)
  500. {
  501. int err;
  502. err = phy_disable_interrupts(phydev);
  503. if (err)
  504. phy_error(phydev);
  505. free_irq(phydev->irq, phydev);
  506. /*
  507. * Cannot call flush_scheduled_work() here as desired because
  508. * of rtnl_lock(), but we do not really care about what would
  509. * be done, except from enable_irq(), so cancel any work
  510. * possibly pending and take care of the matter below.
  511. */
  512. cancel_work_sync(&phydev->phy_queue);
  513. /*
  514. * If work indeed has been cancelled, disable_irq() will have
  515. * been left unbalanced from phy_interrupt() and enable_irq()
  516. * has to be called so that other devices on the line work.
  517. */
  518. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  519. enable_irq(phydev->irq);
  520. return err;
  521. }
  522. EXPORT_SYMBOL(phy_stop_interrupts);
  523. /**
  524. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  525. * @work: work_struct that describes the work to be done
  526. */
  527. void phy_change(struct work_struct *work)
  528. {
  529. int err;
  530. struct phy_device *phydev =
  531. container_of(work, struct phy_device, phy_queue);
  532. if (phydev->drv->did_interrupt &&
  533. !phydev->drv->did_interrupt(phydev))
  534. goto ignore;
  535. err = phy_disable_interrupts(phydev);
  536. if (err)
  537. goto phy_err;
  538. mutex_lock(&phydev->lock);
  539. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  540. phydev->state = PHY_CHANGELINK;
  541. mutex_unlock(&phydev->lock);
  542. atomic_dec(&phydev->irq_disable);
  543. enable_irq(phydev->irq);
  544. /* Reenable interrupts */
  545. if (PHY_HALTED != phydev->state)
  546. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  547. if (err)
  548. goto irq_enable_err;
  549. /* reschedule state queue work to run as soon as possible */
  550. cancel_delayed_work_sync(&phydev->state_queue);
  551. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
  552. return;
  553. ignore:
  554. atomic_dec(&phydev->irq_disable);
  555. enable_irq(phydev->irq);
  556. return;
  557. irq_enable_err:
  558. disable_irq(phydev->irq);
  559. atomic_inc(&phydev->irq_disable);
  560. phy_err:
  561. phy_error(phydev);
  562. }
  563. /**
  564. * phy_stop - Bring down the PHY link, and stop checking the status
  565. * @phydev: target phy_device struct
  566. */
  567. void phy_stop(struct phy_device *phydev)
  568. {
  569. mutex_lock(&phydev->lock);
  570. if (PHY_HALTED == phydev->state)
  571. goto out_unlock;
  572. if (phy_interrupt_is_valid(phydev)) {
  573. /* Disable PHY Interrupts */
  574. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  575. /* Clear any pending interrupts */
  576. phy_clear_interrupt(phydev);
  577. }
  578. phydev->state = PHY_HALTED;
  579. out_unlock:
  580. mutex_unlock(&phydev->lock);
  581. /*
  582. * Cannot call flush_scheduled_work() here as desired because
  583. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  584. * will not reenable interrupts.
  585. */
  586. }
  587. /**
  588. * phy_start - start or restart a PHY device
  589. * @phydev: target phy_device struct
  590. *
  591. * Description: Indicates the attached device's readiness to
  592. * handle PHY-related work. Used during startup to start the
  593. * PHY, and after a call to phy_stop() to resume operation.
  594. * Also used to indicate the MDIO bus has cleared an error
  595. * condition.
  596. */
  597. void phy_start(struct phy_device *phydev)
  598. {
  599. mutex_lock(&phydev->lock);
  600. switch (phydev->state) {
  601. case PHY_STARTING:
  602. phydev->state = PHY_PENDING;
  603. break;
  604. case PHY_READY:
  605. phydev->state = PHY_UP;
  606. break;
  607. case PHY_HALTED:
  608. phydev->state = PHY_RESUMING;
  609. default:
  610. break;
  611. }
  612. mutex_unlock(&phydev->lock);
  613. }
  614. EXPORT_SYMBOL(phy_stop);
  615. EXPORT_SYMBOL(phy_start);
  616. /**
  617. * phy_state_machine - Handle the state machine
  618. * @work: work_struct that describes the work to be done
  619. */
  620. void phy_state_machine(struct work_struct *work)
  621. {
  622. struct delayed_work *dwork = to_delayed_work(work);
  623. struct phy_device *phydev =
  624. container_of(dwork, struct phy_device, state_queue);
  625. int needs_aneg = 0;
  626. int err = 0;
  627. mutex_lock(&phydev->lock);
  628. if (phydev->adjust_state)
  629. phydev->adjust_state(phydev->attached_dev);
  630. switch(phydev->state) {
  631. case PHY_DOWN:
  632. case PHY_STARTING:
  633. case PHY_READY:
  634. case PHY_PENDING:
  635. break;
  636. case PHY_UP:
  637. needs_aneg = 1;
  638. phydev->link_timeout = PHY_AN_TIMEOUT;
  639. break;
  640. case PHY_AN:
  641. err = phy_read_status(phydev);
  642. if (err < 0)
  643. break;
  644. /* If the link is down, give up on
  645. * negotiation for now */
  646. if (!phydev->link) {
  647. phydev->state = PHY_NOLINK;
  648. netif_carrier_off(phydev->attached_dev);
  649. phydev->adjust_link(phydev->attached_dev);
  650. break;
  651. }
  652. /* Check if negotiation is done. Break
  653. * if there's an error */
  654. err = phy_aneg_done(phydev);
  655. if (err < 0)
  656. break;
  657. /* If AN is done, we're running */
  658. if (err > 0) {
  659. phydev->state = PHY_RUNNING;
  660. netif_carrier_on(phydev->attached_dev);
  661. phydev->adjust_link(phydev->attached_dev);
  662. } else if (0 == phydev->link_timeout--) {
  663. needs_aneg = 1;
  664. /* If we have the magic_aneg bit,
  665. * we try again */
  666. if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  667. break;
  668. }
  669. break;
  670. case PHY_NOLINK:
  671. err = phy_read_status(phydev);
  672. if (err)
  673. break;
  674. if (phydev->link) {
  675. phydev->state = PHY_RUNNING;
  676. netif_carrier_on(phydev->attached_dev);
  677. phydev->adjust_link(phydev->attached_dev);
  678. }
  679. break;
  680. case PHY_FORCING:
  681. err = genphy_update_link(phydev);
  682. if (err)
  683. break;
  684. if (phydev->link) {
  685. phydev->state = PHY_RUNNING;
  686. netif_carrier_on(phydev->attached_dev);
  687. } else {
  688. if (0 == phydev->link_timeout--)
  689. needs_aneg = 1;
  690. }
  691. phydev->adjust_link(phydev->attached_dev);
  692. break;
  693. case PHY_RUNNING:
  694. /* Only register a CHANGE if we are
  695. * polling or ignoring interrupts
  696. */
  697. if (!phy_interrupt_is_valid(phydev))
  698. phydev->state = PHY_CHANGELINK;
  699. break;
  700. case PHY_CHANGELINK:
  701. err = phy_read_status(phydev);
  702. if (err)
  703. break;
  704. if (phydev->link) {
  705. phydev->state = PHY_RUNNING;
  706. netif_carrier_on(phydev->attached_dev);
  707. } else {
  708. phydev->state = PHY_NOLINK;
  709. netif_carrier_off(phydev->attached_dev);
  710. }
  711. phydev->adjust_link(phydev->attached_dev);
  712. if (phy_interrupt_is_valid(phydev))
  713. err = phy_config_interrupt(phydev,
  714. PHY_INTERRUPT_ENABLED);
  715. break;
  716. case PHY_HALTED:
  717. if (phydev->link) {
  718. phydev->link = 0;
  719. netif_carrier_off(phydev->attached_dev);
  720. phydev->adjust_link(phydev->attached_dev);
  721. }
  722. break;
  723. case PHY_RESUMING:
  724. err = phy_clear_interrupt(phydev);
  725. if (err)
  726. break;
  727. err = phy_config_interrupt(phydev,
  728. PHY_INTERRUPT_ENABLED);
  729. if (err)
  730. break;
  731. if (AUTONEG_ENABLE == phydev->autoneg) {
  732. err = phy_aneg_done(phydev);
  733. if (err < 0)
  734. break;
  735. /* err > 0 if AN is done.
  736. * Otherwise, it's 0, and we're
  737. * still waiting for AN */
  738. if (err > 0) {
  739. err = phy_read_status(phydev);
  740. if (err)
  741. break;
  742. if (phydev->link) {
  743. phydev->state = PHY_RUNNING;
  744. netif_carrier_on(phydev->attached_dev);
  745. } else
  746. phydev->state = PHY_NOLINK;
  747. phydev->adjust_link(phydev->attached_dev);
  748. } else {
  749. phydev->state = PHY_AN;
  750. phydev->link_timeout = PHY_AN_TIMEOUT;
  751. }
  752. } else {
  753. err = phy_read_status(phydev);
  754. if (err)
  755. break;
  756. if (phydev->link) {
  757. phydev->state = PHY_RUNNING;
  758. netif_carrier_on(phydev->attached_dev);
  759. } else
  760. phydev->state = PHY_NOLINK;
  761. phydev->adjust_link(phydev->attached_dev);
  762. }
  763. break;
  764. }
  765. mutex_unlock(&phydev->lock);
  766. if (needs_aneg)
  767. err = phy_start_aneg(phydev);
  768. if (err < 0)
  769. phy_error(phydev);
  770. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
  771. PHY_STATE_TIME * HZ);
  772. }
  773. void phy_mac_interrupt(struct phy_device *phydev, int new_link)
  774. {
  775. cancel_work_sync(&phydev->phy_queue);
  776. phydev->link = new_link;
  777. schedule_work(&phydev->phy_queue);
  778. }
  779. EXPORT_SYMBOL(phy_mac_interrupt);
  780. static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  781. int addr)
  782. {
  783. /* Write the desired MMD Devad */
  784. bus->write(bus, addr, MII_MMD_CTRL, devad);
  785. /* Write the desired MMD register address */
  786. bus->write(bus, addr, MII_MMD_DATA, prtad);
  787. /* Select the Function : DATA with no post increment */
  788. bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
  789. }
  790. /**
  791. * phy_read_mmd_indirect - reads data from the MMD registers
  792. * @bus: the target MII bus
  793. * @prtad: MMD Address
  794. * @devad: MMD DEVAD
  795. * @addr: PHY address on the MII bus
  796. *
  797. * Description: it reads data from the MMD registers (clause 22 to access to
  798. * clause 45) of the specified phy address.
  799. * To read these register we have:
  800. * 1) Write reg 13 // DEVAD
  801. * 2) Write reg 14 // MMD Address
  802. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  803. * 3) Read reg 14 // Read MMD data
  804. */
  805. static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  806. int addr)
  807. {
  808. u32 ret;
  809. mmd_phy_indirect(bus, prtad, devad, addr);
  810. /* Read the content of the MMD's selected register */
  811. ret = bus->read(bus, addr, MII_MMD_DATA);
  812. return ret;
  813. }
  814. /**
  815. * phy_write_mmd_indirect - writes data to the MMD registers
  816. * @bus: the target MII bus
  817. * @prtad: MMD Address
  818. * @devad: MMD DEVAD
  819. * @addr: PHY address on the MII bus
  820. * @data: data to write in the MMD register
  821. *
  822. * Description: Write data from the MMD registers of the specified
  823. * phy address.
  824. * To write these register we have:
  825. * 1) Write reg 13 // DEVAD
  826. * 2) Write reg 14 // MMD Address
  827. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  828. * 3) Write reg 14 // Write MMD data
  829. */
  830. static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  831. int addr, u32 data)
  832. {
  833. mmd_phy_indirect(bus, prtad, devad, addr);
  834. /* Write the data into MMD's selected register */
  835. bus->write(bus, addr, MII_MMD_DATA, data);
  836. }
  837. /**
  838. * phy_init_eee - init and check the EEE feature
  839. * @phydev: target phy_device struct
  840. * @clk_stop_enable: PHY may stop the clock during LPI
  841. *
  842. * Description: it checks if the Energy-Efficient Ethernet (EEE)
  843. * is supported by looking at the MMD registers 3.20 and 7.60/61
  844. * and it programs the MMD register 3.0 setting the "Clock stop enable"
  845. * bit if required.
  846. */
  847. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  848. {
  849. int ret = -EPROTONOSUPPORT;
  850. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  851. * Also EEE feature is active when core is operating with MII, GMII
  852. * or RGMII.
  853. */
  854. if ((phydev->duplex == DUPLEX_FULL) &&
  855. ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
  856. (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
  857. (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
  858. int eee_lp, eee_cap, eee_adv;
  859. u32 lp, cap, adv;
  860. int idx, status;
  861. /* Read phy status to properly get the right settings */
  862. status = phy_read_status(phydev);
  863. if (status)
  864. return status;
  865. /* First check if the EEE ability is supported */
  866. eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
  867. MDIO_MMD_PCS, phydev->addr);
  868. if (eee_cap < 0)
  869. return eee_cap;
  870. cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  871. if (!cap)
  872. goto eee_exit;
  873. /* Check which link settings negotiated and verify it in
  874. * the EEE advertising registers.
  875. */
  876. eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
  877. MDIO_MMD_AN, phydev->addr);
  878. if (eee_lp < 0)
  879. return eee_lp;
  880. eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
  881. MDIO_MMD_AN, phydev->addr);
  882. if (eee_adv < 0)
  883. return eee_adv;
  884. adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
  885. lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  886. idx = phy_find_setting(phydev->speed, phydev->duplex);
  887. if (!(lp & adv & settings[idx].setting))
  888. goto eee_exit;
  889. if (clk_stop_enable) {
  890. /* Configure the PHY to stop receiving xMII
  891. * clock while it is signaling LPI.
  892. */
  893. int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
  894. MDIO_MMD_PCS,
  895. phydev->addr);
  896. if (val < 0)
  897. return val;
  898. val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
  899. phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
  900. MDIO_MMD_PCS, phydev->addr, val);
  901. }
  902. ret = 0; /* EEE supported */
  903. }
  904. eee_exit:
  905. return ret;
  906. }
  907. EXPORT_SYMBOL(phy_init_eee);
  908. /**
  909. * phy_get_eee_err - report the EEE wake error count
  910. * @phydev: target phy_device struct
  911. *
  912. * Description: it is to report the number of time where the PHY
  913. * failed to complete its normal wake sequence.
  914. */
  915. int phy_get_eee_err(struct phy_device *phydev)
  916. {
  917. return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
  918. MDIO_MMD_PCS, phydev->addr);
  919. }
  920. EXPORT_SYMBOL(phy_get_eee_err);
  921. /**
  922. * phy_ethtool_get_eee - get EEE supported and status
  923. * @phydev: target phy_device struct
  924. * @data: ethtool_eee data
  925. *
  926. * Description: it reportes the Supported/Advertisement/LP Advertisement
  927. * capabilities.
  928. */
  929. int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
  930. {
  931. int val;
  932. /* Get Supported EEE */
  933. val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
  934. MDIO_MMD_PCS, phydev->addr);
  935. if (val < 0)
  936. return val;
  937. data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
  938. /* Get advertisement EEE */
  939. val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
  940. MDIO_MMD_AN, phydev->addr);
  941. if (val < 0)
  942. return val;
  943. data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  944. /* Get LP advertisement EEE */
  945. val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
  946. MDIO_MMD_AN, phydev->addr);
  947. if (val < 0)
  948. return val;
  949. data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  950. return 0;
  951. }
  952. EXPORT_SYMBOL(phy_ethtool_get_eee);
  953. /**
  954. * phy_ethtool_set_eee - set EEE supported and status
  955. * @phydev: target phy_device struct
  956. * @data: ethtool_eee data
  957. *
  958. * Description: it is to program the Advertisement EEE register.
  959. */
  960. int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  961. {
  962. int val;
  963. val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  964. phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
  965. phydev->addr, val);
  966. return 0;
  967. }
  968. EXPORT_SYMBOL(phy_ethtool_set_eee);
  969. int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  970. {
  971. if (phydev->drv->set_wol)
  972. return phydev->drv->set_wol(phydev, wol);
  973. return -EOPNOTSUPP;
  974. }
  975. EXPORT_SYMBOL(phy_ethtool_set_wol);
  976. void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  977. {
  978. if (phydev->drv->get_wol)
  979. phydev->drv->get_wol(phydev, wol);
  980. }
  981. EXPORT_SYMBOL(phy_ethtool_get_wol);