phy.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963
  1. /*
  2. * drivers/net/phy/phy.c
  3. *
  4. * Framework for configuring and reading PHY devices
  5. * Based on code in sungem_phy.c and gianfar_phy.c
  6. *
  7. * Author: Andy Fleming
  8. *
  9. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  10. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/unistd.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/init.h>
  24. #include <linux/delay.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/skbuff.h>
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/mii.h>
  31. #include <linux/ethtool.h>
  32. #include <linux/phy.h>
  33. #include <linux/timer.h>
  34. #include <linux/workqueue.h>
  35. #include <asm/atomic.h>
  36. #include <asm/io.h>
  37. #include <asm/irq.h>
  38. #include <asm/uaccess.h>
  39. /**
  40. * phy_print_status - Convenience function to print out the current phy status
  41. * @phydev: the phy_device struct
  42. */
  43. void phy_print_status(struct phy_device *phydev)
  44. {
  45. pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
  46. phydev->link ? "Up" : "Down");
  47. if (phydev->link)
  48. printk(" - %d/%s", phydev->speed,
  49. DUPLEX_FULL == phydev->duplex ?
  50. "Full" : "Half");
  51. printk("\n");
  52. }
  53. EXPORT_SYMBOL(phy_print_status);
  54. /**
  55. * phy_clear_interrupt - Ack the phy device's interrupt
  56. * @phydev: the phy_device struct
  57. *
  58. * If the @phydev driver has an ack_interrupt function, call it to
  59. * ack and clear the phy device's interrupt.
  60. *
  61. * Returns 0 on success on < 0 on error.
  62. */
  63. int phy_clear_interrupt(struct phy_device *phydev)
  64. {
  65. int err = 0;
  66. if (phydev->drv->ack_interrupt)
  67. err = phydev->drv->ack_interrupt(phydev);
  68. return err;
  69. }
  70. /**
  71. * phy_config_interrupt - configure the PHY device for the requested interrupts
  72. * @phydev: the phy_device struct
  73. * @interrupts: interrupt flags to configure for this @phydev
  74. *
  75. * Returns 0 on success on < 0 on error.
  76. */
  77. int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  78. {
  79. int err = 0;
  80. phydev->interrupts = interrupts;
  81. if (phydev->drv->config_intr)
  82. err = phydev->drv->config_intr(phydev);
  83. return err;
  84. }
  85. /**
  86. * phy_aneg_done - return auto-negotiation status
  87. * @phydev: target phy_device struct
  88. *
  89. * Description: Reads the status register and returns 0 either if
  90. * auto-negotiation is incomplete, or if there was an error.
  91. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
  92. */
  93. static inline int phy_aneg_done(struct phy_device *phydev)
  94. {
  95. int retval;
  96. retval = phy_read(phydev, MII_BMSR);
  97. return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  98. }
  99. /* A structure for mapping a particular speed and duplex
  100. * combination to a particular SUPPORTED and ADVERTISED value */
  101. struct phy_setting {
  102. int speed;
  103. int duplex;
  104. u32 setting;
  105. };
  106. /* A mapping of all SUPPORTED settings to speed/duplex */
  107. static const struct phy_setting settings[] = {
  108. {
  109. .speed = 10000,
  110. .duplex = DUPLEX_FULL,
  111. .setting = SUPPORTED_10000baseT_Full,
  112. },
  113. {
  114. .speed = SPEED_1000,
  115. .duplex = DUPLEX_FULL,
  116. .setting = SUPPORTED_1000baseT_Full,
  117. },
  118. {
  119. .speed = SPEED_1000,
  120. .duplex = DUPLEX_HALF,
  121. .setting = SUPPORTED_1000baseT_Half,
  122. },
  123. {
  124. .speed = SPEED_100,
  125. .duplex = DUPLEX_FULL,
  126. .setting = SUPPORTED_100baseT_Full,
  127. },
  128. {
  129. .speed = SPEED_100,
  130. .duplex = DUPLEX_HALF,
  131. .setting = SUPPORTED_100baseT_Half,
  132. },
  133. {
  134. .speed = SPEED_10,
  135. .duplex = DUPLEX_FULL,
  136. .setting = SUPPORTED_10baseT_Full,
  137. },
  138. {
  139. .speed = SPEED_10,
  140. .duplex = DUPLEX_HALF,
  141. .setting = SUPPORTED_10baseT_Half,
  142. },
  143. };
  144. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  145. /**
  146. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  147. * @speed: speed to match
  148. * @duplex: duplex to match
  149. *
  150. * Description: Searches the settings array for the setting which
  151. * matches the desired speed and duplex, and returns the index
  152. * of that setting. Returns the index of the last setting if
  153. * none of the others match.
  154. */
  155. static inline int phy_find_setting(int speed, int duplex)
  156. {
  157. int idx = 0;
  158. while (idx < ARRAY_SIZE(settings) &&
  159. (settings[idx].speed != speed ||
  160. settings[idx].duplex != duplex))
  161. idx++;
  162. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  163. }
  164. /**
  165. * phy_find_valid - find a PHY setting that matches the requested features mask
  166. * @idx: The first index in settings[] to search
  167. * @features: A mask of the valid settings
  168. *
  169. * Description: Returns the index of the first valid setting less
  170. * than or equal to the one pointed to by idx, as determined by
  171. * the mask in features. Returns the index of the last setting
  172. * if nothing else matches.
  173. */
  174. static inline int phy_find_valid(int idx, u32 features)
  175. {
  176. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  177. idx++;
  178. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  179. }
  180. /**
  181. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  182. * @phydev: the target phy_device struct
  183. *
  184. * Description: Make sure the PHY is set to supported speeds and
  185. * duplexes. Drop down by one in this order: 1000/FULL,
  186. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  187. */
  188. void phy_sanitize_settings(struct phy_device *phydev)
  189. {
  190. u32 features = phydev->supported;
  191. int idx;
  192. /* Sanitize settings based on PHY capabilities */
  193. if ((features & SUPPORTED_Autoneg) == 0)
  194. phydev->autoneg = AUTONEG_DISABLE;
  195. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  196. features);
  197. phydev->speed = settings[idx].speed;
  198. phydev->duplex = settings[idx].duplex;
  199. }
  200. EXPORT_SYMBOL(phy_sanitize_settings);
  201. /**
  202. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  203. * @phydev: target phy_device struct
  204. * @cmd: ethtool_cmd
  205. *
  206. * A few notes about parameter checking:
  207. * - We don't set port or transceiver, so we don't care what they
  208. * were set to.
  209. * - phy_start_aneg() will make sure forced settings are sane, and
  210. * choose the next best ones from the ones selected, so we don't
  211. * care if ethtool tries to give us bad values.
  212. */
  213. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  214. {
  215. if (cmd->phy_address != phydev->addr)
  216. return -EINVAL;
  217. /* We make sure that we don't pass unsupported
  218. * values in to the PHY */
  219. cmd->advertising &= phydev->supported;
  220. /* Verify the settings we care about. */
  221. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  222. return -EINVAL;
  223. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  224. return -EINVAL;
  225. if (cmd->autoneg == AUTONEG_DISABLE &&
  226. ((cmd->speed != SPEED_1000 &&
  227. cmd->speed != SPEED_100 &&
  228. cmd->speed != SPEED_10) ||
  229. (cmd->duplex != DUPLEX_HALF &&
  230. cmd->duplex != DUPLEX_FULL)))
  231. return -EINVAL;
  232. phydev->autoneg = cmd->autoneg;
  233. phydev->speed = cmd->speed;
  234. phydev->advertising = cmd->advertising;
  235. if (AUTONEG_ENABLE == cmd->autoneg)
  236. phydev->advertising |= ADVERTISED_Autoneg;
  237. else
  238. phydev->advertising &= ~ADVERTISED_Autoneg;
  239. phydev->duplex = cmd->duplex;
  240. /* Restart the PHY */
  241. phy_start_aneg(phydev);
  242. return 0;
  243. }
  244. EXPORT_SYMBOL(phy_ethtool_sset);
  245. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  246. {
  247. cmd->supported = phydev->supported;
  248. cmd->advertising = phydev->advertising;
  249. cmd->speed = phydev->speed;
  250. cmd->duplex = phydev->duplex;
  251. cmd->port = PORT_MII;
  252. cmd->phy_address = phydev->addr;
  253. cmd->transceiver = XCVR_EXTERNAL;
  254. cmd->autoneg = phydev->autoneg;
  255. return 0;
  256. }
  257. EXPORT_SYMBOL(phy_ethtool_gset);
  258. /**
  259. * phy_mii_ioctl - generic PHY MII ioctl interface
  260. * @phydev: the phy_device struct
  261. * @mii_data: MII ioctl data
  262. * @cmd: ioctl cmd to execute
  263. *
  264. * Note that this function is currently incompatible with the
  265. * PHYCONTROL layer. It changes registers without regard to
  266. * current state. Use at own risk.
  267. */
  268. int phy_mii_ioctl(struct phy_device *phydev,
  269. struct mii_ioctl_data *mii_data, int cmd)
  270. {
  271. u16 val = mii_data->val_in;
  272. switch (cmd) {
  273. case SIOCGMIIPHY:
  274. mii_data->phy_id = phydev->addr;
  275. /* fall through */
  276. case SIOCGMIIREG:
  277. mii_data->val_out = phy_read(phydev, mii_data->reg_num);
  278. break;
  279. case SIOCSMIIREG:
  280. if (mii_data->phy_id == phydev->addr) {
  281. switch(mii_data->reg_num) {
  282. case MII_BMCR:
  283. if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
  284. phydev->autoneg = AUTONEG_DISABLE;
  285. else
  286. phydev->autoneg = AUTONEG_ENABLE;
  287. if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
  288. phydev->duplex = DUPLEX_FULL;
  289. else
  290. phydev->duplex = DUPLEX_HALF;
  291. if ((!phydev->autoneg) &&
  292. (val & BMCR_SPEED1000))
  293. phydev->speed = SPEED_1000;
  294. else if ((!phydev->autoneg) &&
  295. (val & BMCR_SPEED100))
  296. phydev->speed = SPEED_100;
  297. break;
  298. case MII_ADVERTISE:
  299. phydev->advertising = val;
  300. break;
  301. default:
  302. /* do nothing */
  303. break;
  304. }
  305. }
  306. phy_write(phydev, mii_data->reg_num, val);
  307. if (mii_data->reg_num == MII_BMCR &&
  308. val & BMCR_RESET &&
  309. phydev->drv->config_init) {
  310. phy_scan_fixups(phydev);
  311. phydev->drv->config_init(phydev);
  312. }
  313. break;
  314. default:
  315. return -EOPNOTSUPP;
  316. }
  317. return 0;
  318. }
  319. EXPORT_SYMBOL(phy_mii_ioctl);
  320. /**
  321. * phy_start_aneg - start auto-negotiation for this PHY device
  322. * @phydev: the phy_device struct
  323. *
  324. * Description: Sanitizes the settings (if we're not autonegotiating
  325. * them), and then calls the driver's config_aneg function.
  326. * If the PHYCONTROL Layer is operating, we change the state to
  327. * reflect the beginning of Auto-negotiation or forcing.
  328. */
  329. int phy_start_aneg(struct phy_device *phydev)
  330. {
  331. int err;
  332. mutex_lock(&phydev->lock);
  333. if (AUTONEG_DISABLE == phydev->autoneg)
  334. phy_sanitize_settings(phydev);
  335. err = phydev->drv->config_aneg(phydev);
  336. if (err < 0)
  337. goto out_unlock;
  338. if (phydev->state != PHY_HALTED) {
  339. if (AUTONEG_ENABLE == phydev->autoneg) {
  340. phydev->state = PHY_AN;
  341. phydev->link_timeout = PHY_AN_TIMEOUT;
  342. } else {
  343. phydev->state = PHY_FORCING;
  344. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  345. }
  346. }
  347. out_unlock:
  348. mutex_unlock(&phydev->lock);
  349. return err;
  350. }
  351. EXPORT_SYMBOL(phy_start_aneg);
  352. static void phy_change(struct work_struct *work);
  353. /**
  354. * phy_start_machine - start PHY state machine tracking
  355. * @phydev: the phy_device struct
  356. * @handler: callback function for state change notifications
  357. *
  358. * Description: The PHY infrastructure can run a state machine
  359. * which tracks whether the PHY is starting up, negotiating,
  360. * etc. This function starts the timer which tracks the state
  361. * of the PHY. If you want to be notified when the state changes,
  362. * pass in the callback @handler, otherwise, pass NULL. If you
  363. * want to maintain your own state machine, do not call this
  364. * function.
  365. */
  366. void phy_start_machine(struct phy_device *phydev,
  367. void (*handler)(struct net_device *))
  368. {
  369. phydev->adjust_state = handler;
  370. schedule_delayed_work(&phydev->state_queue, HZ);
  371. }
  372. /**
  373. * phy_stop_machine - stop the PHY state machine tracking
  374. * @phydev: target phy_device struct
  375. *
  376. * Description: Stops the state machine timer, sets the state to UP
  377. * (unless it wasn't up yet). This function must be called BEFORE
  378. * phy_detach.
  379. */
  380. void phy_stop_machine(struct phy_device *phydev)
  381. {
  382. cancel_delayed_work_sync(&phydev->state_queue);
  383. mutex_lock(&phydev->lock);
  384. if (phydev->state > PHY_UP)
  385. phydev->state = PHY_UP;
  386. mutex_unlock(&phydev->lock);
  387. phydev->adjust_state = NULL;
  388. }
  389. /**
  390. * phy_force_reduction - reduce PHY speed/duplex settings by one step
  391. * @phydev: target phy_device struct
  392. *
  393. * Description: Reduces the speed/duplex settings by one notch,
  394. * in this order--
  395. * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  396. * The function bottoms out at 10/HALF.
  397. */
  398. static void phy_force_reduction(struct phy_device *phydev)
  399. {
  400. int idx;
  401. idx = phy_find_setting(phydev->speed, phydev->duplex);
  402. idx++;
  403. idx = phy_find_valid(idx, phydev->supported);
  404. phydev->speed = settings[idx].speed;
  405. phydev->duplex = settings[idx].duplex;
  406. pr_info("Trying %d/%s\n", phydev->speed,
  407. DUPLEX_FULL == phydev->duplex ?
  408. "FULL" : "HALF");
  409. }
  410. /**
  411. * phy_error - enter HALTED state for this PHY device
  412. * @phydev: target phy_device struct
  413. *
  414. * Moves the PHY to the HALTED state in response to a read
  415. * or write error, and tells the controller the link is down.
  416. * Must not be called from interrupt context, or while the
  417. * phydev->lock is held.
  418. */
  419. static void phy_error(struct phy_device *phydev)
  420. {
  421. mutex_lock(&phydev->lock);
  422. phydev->state = PHY_HALTED;
  423. mutex_unlock(&phydev->lock);
  424. }
  425. /**
  426. * phy_interrupt - PHY interrupt handler
  427. * @irq: interrupt line
  428. * @phy_dat: phy_device pointer
  429. *
  430. * Description: When a PHY interrupt occurs, the handler disables
  431. * interrupts, and schedules a work task to clear the interrupt.
  432. */
  433. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  434. {
  435. struct phy_device *phydev = phy_dat;
  436. if (PHY_HALTED == phydev->state)
  437. return IRQ_NONE; /* It can't be ours. */
  438. /* The MDIO bus is not allowed to be written in interrupt
  439. * context, so we need to disable the irq here. A work
  440. * queue will write the PHY to disable and clear the
  441. * interrupt, and then reenable the irq line. */
  442. disable_irq_nosync(irq);
  443. atomic_inc(&phydev->irq_disable);
  444. schedule_work(&phydev->phy_queue);
  445. return IRQ_HANDLED;
  446. }
  447. /**
  448. * phy_enable_interrupts - Enable the interrupts from the PHY side
  449. * @phydev: target phy_device struct
  450. */
  451. int phy_enable_interrupts(struct phy_device *phydev)
  452. {
  453. int err;
  454. err = phy_clear_interrupt(phydev);
  455. if (err < 0)
  456. return err;
  457. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  458. return err;
  459. }
  460. EXPORT_SYMBOL(phy_enable_interrupts);
  461. /**
  462. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  463. * @phydev: target phy_device struct
  464. */
  465. int phy_disable_interrupts(struct phy_device *phydev)
  466. {
  467. int err;
  468. /* Disable PHY interrupts */
  469. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  470. if (err)
  471. goto phy_err;
  472. /* Clear the interrupt */
  473. err = phy_clear_interrupt(phydev);
  474. if (err)
  475. goto phy_err;
  476. return 0;
  477. phy_err:
  478. phy_error(phydev);
  479. return err;
  480. }
  481. EXPORT_SYMBOL(phy_disable_interrupts);
  482. /**
  483. * phy_start_interrupts - request and enable interrupts for a PHY device
  484. * @phydev: target phy_device struct
  485. *
  486. * Description: Request the interrupt for the given PHY.
  487. * If this fails, then we set irq to PHY_POLL.
  488. * Otherwise, we enable the interrupts in the PHY.
  489. * This should only be called with a valid IRQ number.
  490. * Returns 0 on success or < 0 on error.
  491. */
  492. int phy_start_interrupts(struct phy_device *phydev)
  493. {
  494. int err = 0;
  495. INIT_WORK(&phydev->phy_queue, phy_change);
  496. atomic_set(&phydev->irq_disable, 0);
  497. if (request_irq(phydev->irq, phy_interrupt,
  498. IRQF_SHARED,
  499. "phy_interrupt",
  500. phydev) < 0) {
  501. printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
  502. phydev->bus->name,
  503. phydev->irq);
  504. phydev->irq = PHY_POLL;
  505. return 0;
  506. }
  507. err = phy_enable_interrupts(phydev);
  508. return err;
  509. }
  510. EXPORT_SYMBOL(phy_start_interrupts);
  511. /**
  512. * phy_stop_interrupts - disable interrupts from a PHY device
  513. * @phydev: target phy_device struct
  514. */
  515. int phy_stop_interrupts(struct phy_device *phydev)
  516. {
  517. int err;
  518. err = phy_disable_interrupts(phydev);
  519. if (err)
  520. phy_error(phydev);
  521. free_irq(phydev->irq, phydev);
  522. /*
  523. * Cannot call flush_scheduled_work() here as desired because
  524. * of rtnl_lock(), but we do not really care about what would
  525. * be done, except from enable_irq(), so cancel any work
  526. * possibly pending and take care of the matter below.
  527. */
  528. cancel_work_sync(&phydev->phy_queue);
  529. /*
  530. * If work indeed has been cancelled, disable_irq() will have
  531. * been left unbalanced from phy_interrupt() and enable_irq()
  532. * has to be called so that other devices on the line work.
  533. */
  534. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  535. enable_irq(phydev->irq);
  536. return err;
  537. }
  538. EXPORT_SYMBOL(phy_stop_interrupts);
  539. /**
  540. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  541. * @work: work_struct that describes the work to be done
  542. */
  543. static void phy_change(struct work_struct *work)
  544. {
  545. int err;
  546. struct phy_device *phydev =
  547. container_of(work, struct phy_device, phy_queue);
  548. if (phydev->drv->did_interrupt &&
  549. !phydev->drv->did_interrupt(phydev))
  550. goto ignore;
  551. err = phy_disable_interrupts(phydev);
  552. if (err)
  553. goto phy_err;
  554. mutex_lock(&phydev->lock);
  555. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  556. phydev->state = PHY_CHANGELINK;
  557. mutex_unlock(&phydev->lock);
  558. atomic_dec(&phydev->irq_disable);
  559. enable_irq(phydev->irq);
  560. /* Reenable interrupts */
  561. if (PHY_HALTED != phydev->state)
  562. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  563. if (err)
  564. goto irq_enable_err;
  565. /* reschedule state queue work to run as soon as possible */
  566. cancel_delayed_work_sync(&phydev->state_queue);
  567. schedule_delayed_work(&phydev->state_queue, 0);
  568. return;
  569. ignore:
  570. atomic_dec(&phydev->irq_disable);
  571. enable_irq(phydev->irq);
  572. return;
  573. irq_enable_err:
  574. disable_irq(phydev->irq);
  575. atomic_inc(&phydev->irq_disable);
  576. phy_err:
  577. phy_error(phydev);
  578. }
  579. /**
  580. * phy_stop - Bring down the PHY link, and stop checking the status
  581. * @phydev: target phy_device struct
  582. */
  583. void phy_stop(struct phy_device *phydev)
  584. {
  585. mutex_lock(&phydev->lock);
  586. if (PHY_HALTED == phydev->state)
  587. goto out_unlock;
  588. if (phydev->irq != PHY_POLL) {
  589. /* Disable PHY Interrupts */
  590. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  591. /* Clear any pending interrupts */
  592. phy_clear_interrupt(phydev);
  593. }
  594. phydev->state = PHY_HALTED;
  595. out_unlock:
  596. mutex_unlock(&phydev->lock);
  597. /*
  598. * Cannot call flush_scheduled_work() here as desired because
  599. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  600. * will not reenable interrupts.
  601. */
  602. }
  603. /**
  604. * phy_start - start or restart a PHY device
  605. * @phydev: target phy_device struct
  606. *
  607. * Description: Indicates the attached device's readiness to
  608. * handle PHY-related work. Used during startup to start the
  609. * PHY, and after a call to phy_stop() to resume operation.
  610. * Also used to indicate the MDIO bus has cleared an error
  611. * condition.
  612. */
  613. void phy_start(struct phy_device *phydev)
  614. {
  615. mutex_lock(&phydev->lock);
  616. switch (phydev->state) {
  617. case PHY_STARTING:
  618. phydev->state = PHY_PENDING;
  619. break;
  620. case PHY_READY:
  621. phydev->state = PHY_UP;
  622. break;
  623. case PHY_HALTED:
  624. phydev->state = PHY_RESUMING;
  625. default:
  626. break;
  627. }
  628. mutex_unlock(&phydev->lock);
  629. }
  630. EXPORT_SYMBOL(phy_stop);
  631. EXPORT_SYMBOL(phy_start);
  632. /**
  633. * phy_state_machine - Handle the state machine
  634. * @work: work_struct that describes the work to be done
  635. */
  636. void phy_state_machine(struct work_struct *work)
  637. {
  638. struct delayed_work *dwork = to_delayed_work(work);
  639. struct phy_device *phydev =
  640. container_of(dwork, struct phy_device, state_queue);
  641. int needs_aneg = 0;
  642. int err = 0;
  643. mutex_lock(&phydev->lock);
  644. if (phydev->adjust_state)
  645. phydev->adjust_state(phydev->attached_dev);
  646. switch(phydev->state) {
  647. case PHY_DOWN:
  648. case PHY_STARTING:
  649. case PHY_READY:
  650. case PHY_PENDING:
  651. break;
  652. case PHY_UP:
  653. needs_aneg = 1;
  654. phydev->link_timeout = PHY_AN_TIMEOUT;
  655. break;
  656. case PHY_AN:
  657. err = phy_read_status(phydev);
  658. if (err < 0)
  659. break;
  660. /* If the link is down, give up on
  661. * negotiation for now */
  662. if (!phydev->link) {
  663. phydev->state = PHY_NOLINK;
  664. netif_carrier_off(phydev->attached_dev);
  665. phydev->adjust_link(phydev->attached_dev);
  666. break;
  667. }
  668. /* Check if negotiation is done. Break
  669. * if there's an error */
  670. err = phy_aneg_done(phydev);
  671. if (err < 0)
  672. break;
  673. /* If AN is done, we're running */
  674. if (err > 0) {
  675. phydev->state = PHY_RUNNING;
  676. netif_carrier_on(phydev->attached_dev);
  677. phydev->adjust_link(phydev->attached_dev);
  678. } else if (0 == phydev->link_timeout--) {
  679. int idx;
  680. needs_aneg = 1;
  681. /* If we have the magic_aneg bit,
  682. * we try again */
  683. if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  684. break;
  685. /* The timer expired, and we still
  686. * don't have a setting, so we try
  687. * forcing it until we find one that
  688. * works, starting from the fastest speed,
  689. * and working our way down */
  690. idx = phy_find_valid(0, phydev->supported);
  691. phydev->speed = settings[idx].speed;
  692. phydev->duplex = settings[idx].duplex;
  693. phydev->autoneg = AUTONEG_DISABLE;
  694. pr_info("Trying %d/%s\n", phydev->speed,
  695. DUPLEX_FULL ==
  696. phydev->duplex ?
  697. "FULL" : "HALF");
  698. }
  699. break;
  700. case PHY_NOLINK:
  701. err = phy_read_status(phydev);
  702. if (err)
  703. break;
  704. if (phydev->link) {
  705. phydev->state = PHY_RUNNING;
  706. netif_carrier_on(phydev->attached_dev);
  707. phydev->adjust_link(phydev->attached_dev);
  708. }
  709. break;
  710. case PHY_FORCING:
  711. err = genphy_update_link(phydev);
  712. if (err)
  713. break;
  714. if (phydev->link) {
  715. phydev->state = PHY_RUNNING;
  716. netif_carrier_on(phydev->attached_dev);
  717. } else {
  718. if (0 == phydev->link_timeout--) {
  719. phy_force_reduction(phydev);
  720. needs_aneg = 1;
  721. }
  722. }
  723. phydev->adjust_link(phydev->attached_dev);
  724. break;
  725. case PHY_RUNNING:
  726. /* Only register a CHANGE if we are
  727. * polling */
  728. if (PHY_POLL == phydev->irq)
  729. phydev->state = PHY_CHANGELINK;
  730. break;
  731. case PHY_CHANGELINK:
  732. err = phy_read_status(phydev);
  733. if (err)
  734. break;
  735. if (phydev->link) {
  736. phydev->state = PHY_RUNNING;
  737. netif_carrier_on(phydev->attached_dev);
  738. } else {
  739. phydev->state = PHY_NOLINK;
  740. netif_carrier_off(phydev->attached_dev);
  741. }
  742. phydev->adjust_link(phydev->attached_dev);
  743. if (PHY_POLL != phydev->irq)
  744. err = phy_config_interrupt(phydev,
  745. PHY_INTERRUPT_ENABLED);
  746. break;
  747. case PHY_HALTED:
  748. if (phydev->link) {
  749. phydev->link = 0;
  750. netif_carrier_off(phydev->attached_dev);
  751. phydev->adjust_link(phydev->attached_dev);
  752. }
  753. break;
  754. case PHY_RESUMING:
  755. err = phy_clear_interrupt(phydev);
  756. if (err)
  757. break;
  758. err = phy_config_interrupt(phydev,
  759. PHY_INTERRUPT_ENABLED);
  760. if (err)
  761. break;
  762. if (AUTONEG_ENABLE == phydev->autoneg) {
  763. err = phy_aneg_done(phydev);
  764. if (err < 0)
  765. break;
  766. /* err > 0 if AN is done.
  767. * Otherwise, it's 0, and we're
  768. * still waiting for AN */
  769. if (err > 0) {
  770. err = phy_read_status(phydev);
  771. if (err)
  772. break;
  773. if (phydev->link) {
  774. phydev->state = PHY_RUNNING;
  775. netif_carrier_on(phydev->attached_dev);
  776. } else
  777. phydev->state = PHY_NOLINK;
  778. phydev->adjust_link(phydev->attached_dev);
  779. } else {
  780. phydev->state = PHY_AN;
  781. phydev->link_timeout = PHY_AN_TIMEOUT;
  782. }
  783. } else {
  784. err = phy_read_status(phydev);
  785. if (err)
  786. break;
  787. if (phydev->link) {
  788. phydev->state = PHY_RUNNING;
  789. netif_carrier_on(phydev->attached_dev);
  790. } else
  791. phydev->state = PHY_NOLINK;
  792. phydev->adjust_link(phydev->attached_dev);
  793. }
  794. break;
  795. }
  796. mutex_unlock(&phydev->lock);
  797. if (needs_aneg)
  798. err = phy_start_aneg(phydev);
  799. if (err < 0)
  800. phy_error(phydev);
  801. schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
  802. }