phy.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * drivers/net/phy/phy.c
  3. *
  4. * Framework for configuring and reading PHY devices
  5. * Based on code in sungem_phy.c and gianfar_phy.c
  6. *
  7. * Author: Andy Fleming
  8. *
  9. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  10. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/unistd.h>
  22. #include <linux/slab.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/mm.h>
  30. #include <linux/module.h>
  31. #include <linux/mii.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/phy.h>
  34. #include <linux/timer.h>
  35. #include <linux/workqueue.h>
  36. #include <asm/atomic.h>
  37. #include <asm/io.h>
  38. #include <asm/irq.h>
  39. #include <asm/uaccess.h>
  40. /**
  41. * phy_print_status - Convenience function to print out the current phy status
  42. * @phydev: the phy_device struct
  43. */
  44. void phy_print_status(struct phy_device *phydev)
  45. {
  46. pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
  47. phydev->link ? "Up" : "Down");
  48. if (phydev->link)
  49. printk(" - %d/%s", phydev->speed,
  50. DUPLEX_FULL == phydev->duplex ?
  51. "Full" : "Half");
  52. printk("\n");
  53. }
  54. EXPORT_SYMBOL(phy_print_status);
  55. /**
  56. * phy_clear_interrupt - Ack the phy device's interrupt
  57. * @phydev: the phy_device struct
  58. *
  59. * If the @phydev driver has an ack_interrupt function, call it to
  60. * ack and clear the phy device's interrupt.
  61. *
  62. * Returns 0 on success on < 0 on error.
  63. */
  64. int phy_clear_interrupt(struct phy_device *phydev)
  65. {
  66. int err = 0;
  67. if (phydev->drv->ack_interrupt)
  68. err = phydev->drv->ack_interrupt(phydev);
  69. return err;
  70. }
  71. /**
  72. * phy_config_interrupt - configure the PHY device for the requested interrupts
  73. * @phydev: the phy_device struct
  74. * @interrupts: interrupt flags to configure for this @phydev
  75. *
  76. * Returns 0 on success on < 0 on error.
  77. */
  78. int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  79. {
  80. int err = 0;
  81. phydev->interrupts = interrupts;
  82. if (phydev->drv->config_intr)
  83. err = phydev->drv->config_intr(phydev);
  84. return err;
  85. }
  86. /**
  87. * phy_aneg_done - return auto-negotiation status
  88. * @phydev: target phy_device struct
  89. *
  90. * Description: Reads the status register and returns 0 either if
  91. * auto-negotiation is incomplete, or if there was an error.
  92. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
  93. */
  94. static inline int phy_aneg_done(struct phy_device *phydev)
  95. {
  96. int retval;
  97. retval = phy_read(phydev, MII_BMSR);
  98. return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  99. }
  100. /* A structure for mapping a particular speed and duplex
  101. * combination to a particular SUPPORTED and ADVERTISED value */
  102. struct phy_setting {
  103. int speed;
  104. int duplex;
  105. u32 setting;
  106. };
  107. /* A mapping of all SUPPORTED settings to speed/duplex */
  108. static const struct phy_setting settings[] = {
  109. {
  110. .speed = 10000,
  111. .duplex = DUPLEX_FULL,
  112. .setting = SUPPORTED_10000baseT_Full,
  113. },
  114. {
  115. .speed = SPEED_1000,
  116. .duplex = DUPLEX_FULL,
  117. .setting = SUPPORTED_1000baseT_Full,
  118. },
  119. {
  120. .speed = SPEED_1000,
  121. .duplex = DUPLEX_HALF,
  122. .setting = SUPPORTED_1000baseT_Half,
  123. },
  124. {
  125. .speed = SPEED_100,
  126. .duplex = DUPLEX_FULL,
  127. .setting = SUPPORTED_100baseT_Full,
  128. },
  129. {
  130. .speed = SPEED_100,
  131. .duplex = DUPLEX_HALF,
  132. .setting = SUPPORTED_100baseT_Half,
  133. },
  134. {
  135. .speed = SPEED_10,
  136. .duplex = DUPLEX_FULL,
  137. .setting = SUPPORTED_10baseT_Full,
  138. },
  139. {
  140. .speed = SPEED_10,
  141. .duplex = DUPLEX_HALF,
  142. .setting = SUPPORTED_10baseT_Half,
  143. },
  144. };
  145. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  146. /**
  147. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  148. * @speed: speed to match
  149. * @duplex: duplex to match
  150. *
  151. * Description: Searches the settings array for the setting which
  152. * matches the desired speed and duplex, and returns the index
  153. * of that setting. Returns the index of the last setting if
  154. * none of the others match.
  155. */
  156. static inline int phy_find_setting(int speed, int duplex)
  157. {
  158. int idx = 0;
  159. while (idx < ARRAY_SIZE(settings) &&
  160. (settings[idx].speed != speed ||
  161. settings[idx].duplex != duplex))
  162. idx++;
  163. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  164. }
  165. /**
  166. * phy_find_valid - find a PHY setting that matches the requested features mask
  167. * @idx: The first index in settings[] to search
  168. * @features: A mask of the valid settings
  169. *
  170. * Description: Returns the index of the first valid setting less
  171. * than or equal to the one pointed to by idx, as determined by
  172. * the mask in features. Returns the index of the last setting
  173. * if nothing else matches.
  174. */
  175. static inline int phy_find_valid(int idx, u32 features)
  176. {
  177. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  178. idx++;
  179. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  180. }
  181. /**
  182. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  183. * @phydev: the target phy_device struct
  184. *
  185. * Description: Make sure the PHY is set to supported speeds and
  186. * duplexes. Drop down by one in this order: 1000/FULL,
  187. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  188. */
  189. void phy_sanitize_settings(struct phy_device *phydev)
  190. {
  191. u32 features = phydev->supported;
  192. int idx;
  193. /* Sanitize settings based on PHY capabilities */
  194. if ((features & SUPPORTED_Autoneg) == 0)
  195. phydev->autoneg = AUTONEG_DISABLE;
  196. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  197. features);
  198. phydev->speed = settings[idx].speed;
  199. phydev->duplex = settings[idx].duplex;
  200. }
  201. EXPORT_SYMBOL(phy_sanitize_settings);
  202. /**
  203. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  204. * @phydev: target phy_device struct
  205. * @cmd: ethtool_cmd
  206. *
  207. * A few notes about parameter checking:
  208. * - We don't set port or transceiver, so we don't care what they
  209. * were set to.
  210. * - phy_start_aneg() will make sure forced settings are sane, and
  211. * choose the next best ones from the ones selected, so we don't
  212. * care if ethtool tries to give us bad values.
  213. */
  214. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  215. {
  216. if (cmd->phy_address != phydev->addr)
  217. return -EINVAL;
  218. /* We make sure that we don't pass unsupported
  219. * values in to the PHY */
  220. cmd->advertising &= phydev->supported;
  221. /* Verify the settings we care about. */
  222. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  223. return -EINVAL;
  224. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  225. return -EINVAL;
  226. if (cmd->autoneg == AUTONEG_DISABLE
  227. && ((cmd->speed != SPEED_1000
  228. && cmd->speed != SPEED_100
  229. && cmd->speed != SPEED_10)
  230. || (cmd->duplex != DUPLEX_HALF
  231. && cmd->duplex != DUPLEX_FULL)))
  232. return -EINVAL;
  233. phydev->autoneg = cmd->autoneg;
  234. phydev->speed = cmd->speed;
  235. phydev->advertising = cmd->advertising;
  236. if (AUTONEG_ENABLE == cmd->autoneg)
  237. phydev->advertising |= ADVERTISED_Autoneg;
  238. else
  239. phydev->advertising &= ~ADVERTISED_Autoneg;
  240. phydev->duplex = cmd->duplex;
  241. /* Restart the PHY */
  242. phy_start_aneg(phydev);
  243. return 0;
  244. }
  245. EXPORT_SYMBOL(phy_ethtool_sset);
  246. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  247. {
  248. cmd->supported = phydev->supported;
  249. cmd->advertising = phydev->advertising;
  250. cmd->speed = phydev->speed;
  251. cmd->duplex = phydev->duplex;
  252. cmd->port = PORT_MII;
  253. cmd->phy_address = phydev->addr;
  254. cmd->transceiver = XCVR_EXTERNAL;
  255. cmd->autoneg = phydev->autoneg;
  256. return 0;
  257. }
  258. EXPORT_SYMBOL(phy_ethtool_gset);
  259. /**
  260. * phy_mii_ioctl - generic PHY MII ioctl interface
  261. * @phydev: the phy_device struct
  262. * @mii_data: MII ioctl data
  263. * @cmd: ioctl cmd to execute
  264. *
  265. * Note that this function is currently incompatible with the
  266. * PHYCONTROL layer. It changes registers without regard to
  267. * current state. Use at own risk.
  268. */
  269. int phy_mii_ioctl(struct phy_device *phydev,
  270. struct mii_ioctl_data *mii_data, int cmd)
  271. {
  272. u16 val = mii_data->val_in;
  273. switch (cmd) {
  274. case SIOCGMIIPHY:
  275. mii_data->phy_id = phydev->addr;
  276. /* fall through */
  277. case SIOCGMIIREG:
  278. mii_data->val_out = phy_read(phydev, mii_data->reg_num);
  279. break;
  280. case SIOCSMIIREG:
  281. if (!capable(CAP_NET_ADMIN))
  282. return -EPERM;
  283. if (mii_data->phy_id == phydev->addr) {
  284. switch(mii_data->reg_num) {
  285. case MII_BMCR:
  286. if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
  287. phydev->autoneg = AUTONEG_DISABLE;
  288. else
  289. phydev->autoneg = AUTONEG_ENABLE;
  290. if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
  291. phydev->duplex = DUPLEX_FULL;
  292. else
  293. phydev->duplex = DUPLEX_HALF;
  294. if ((!phydev->autoneg) &&
  295. (val & BMCR_SPEED1000))
  296. phydev->speed = SPEED_1000;
  297. else if ((!phydev->autoneg) &&
  298. (val & BMCR_SPEED100))
  299. phydev->speed = SPEED_100;
  300. break;
  301. case MII_ADVERTISE:
  302. phydev->advertising = val;
  303. break;
  304. default:
  305. /* do nothing */
  306. break;
  307. }
  308. }
  309. phy_write(phydev, mii_data->reg_num, val);
  310. if (mii_data->reg_num == MII_BMCR
  311. && val & BMCR_RESET
  312. && phydev->drv->config_init) {
  313. phy_scan_fixups(phydev);
  314. phydev->drv->config_init(phydev);
  315. }
  316. break;
  317. default:
  318. return -EOPNOTSUPP;
  319. }
  320. return 0;
  321. }
  322. EXPORT_SYMBOL(phy_mii_ioctl);
  323. /**
  324. * phy_start_aneg - start auto-negotiation for this PHY device
  325. * @phydev: the phy_device struct
  326. *
  327. * Description: Sanitizes the settings (if we're not autonegotiating
  328. * them), and then calls the driver's config_aneg function.
  329. * If the PHYCONTROL Layer is operating, we change the state to
  330. * reflect the beginning of Auto-negotiation or forcing.
  331. */
  332. int phy_start_aneg(struct phy_device *phydev)
  333. {
  334. int err;
  335. mutex_lock(&phydev->lock);
  336. if (AUTONEG_DISABLE == phydev->autoneg)
  337. phy_sanitize_settings(phydev);
  338. err = phydev->drv->config_aneg(phydev);
  339. if (err < 0)
  340. goto out_unlock;
  341. if (phydev->state != PHY_HALTED) {
  342. if (AUTONEG_ENABLE == phydev->autoneg) {
  343. phydev->state = PHY_AN;
  344. phydev->link_timeout = PHY_AN_TIMEOUT;
  345. } else {
  346. phydev->state = PHY_FORCING;
  347. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  348. }
  349. }
  350. out_unlock:
  351. mutex_unlock(&phydev->lock);
  352. return err;
  353. }
  354. EXPORT_SYMBOL(phy_start_aneg);
  355. static void phy_change(struct work_struct *work);
  356. static void phy_state_machine(struct work_struct *work);
  357. static void phy_timer(unsigned long data);
  358. /**
  359. * phy_start_machine - start PHY state machine tracking
  360. * @phydev: the phy_device struct
  361. * @handler: callback function for state change notifications
  362. *
  363. * Description: The PHY infrastructure can run a state machine
  364. * which tracks whether the PHY is starting up, negotiating,
  365. * etc. This function starts the timer which tracks the state
  366. * of the PHY. If you want to be notified when the state changes,
  367. * pass in the callback @handler, otherwise, pass NULL. If you
  368. * want to maintain your own state machine, do not call this
  369. * function.
  370. */
  371. void phy_start_machine(struct phy_device *phydev,
  372. void (*handler)(struct net_device *))
  373. {
  374. phydev->adjust_state = handler;
  375. INIT_WORK(&phydev->state_queue, phy_state_machine);
  376. init_timer(&phydev->phy_timer);
  377. phydev->phy_timer.function = &phy_timer;
  378. phydev->phy_timer.data = (unsigned long) phydev;
  379. mod_timer(&phydev->phy_timer, jiffies + HZ);
  380. }
  381. /**
  382. * phy_stop_machine - stop the PHY state machine tracking
  383. * @phydev: target phy_device struct
  384. *
  385. * Description: Stops the state machine timer, sets the state to UP
  386. * (unless it wasn't up yet). This function must be called BEFORE
  387. * phy_detach.
  388. */
  389. void phy_stop_machine(struct phy_device *phydev)
  390. {
  391. del_timer_sync(&phydev->phy_timer);
  392. cancel_work_sync(&phydev->state_queue);
  393. mutex_lock(&phydev->lock);
  394. if (phydev->state > PHY_UP)
  395. phydev->state = PHY_UP;
  396. mutex_unlock(&phydev->lock);
  397. phydev->adjust_state = NULL;
  398. }
  399. /**
  400. * phy_force_reduction - reduce PHY speed/duplex settings by one step
  401. * @phydev: target phy_device struct
  402. *
  403. * Description: Reduces the speed/duplex settings by one notch,
  404. * in this order--
  405. * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  406. * The function bottoms out at 10/HALF.
  407. */
  408. static void phy_force_reduction(struct phy_device *phydev)
  409. {
  410. int idx;
  411. idx = phy_find_setting(phydev->speed, phydev->duplex);
  412. idx++;
  413. idx = phy_find_valid(idx, phydev->supported);
  414. phydev->speed = settings[idx].speed;
  415. phydev->duplex = settings[idx].duplex;
  416. pr_info("Trying %d/%s\n", phydev->speed,
  417. DUPLEX_FULL == phydev->duplex ?
  418. "FULL" : "HALF");
  419. }
  420. /**
  421. * phy_error - enter HALTED state for this PHY device
  422. * @phydev: target phy_device struct
  423. *
  424. * Moves the PHY to the HALTED state in response to a read
  425. * or write error, and tells the controller the link is down.
  426. * Must not be called from interrupt context, or while the
  427. * phydev->lock is held.
  428. */
  429. static void phy_error(struct phy_device *phydev)
  430. {
  431. mutex_lock(&phydev->lock);
  432. phydev->state = PHY_HALTED;
  433. mutex_unlock(&phydev->lock);
  434. }
  435. /**
  436. * phy_interrupt - PHY interrupt handler
  437. * @irq: interrupt line
  438. * @phy_dat: phy_device pointer
  439. *
  440. * Description: When a PHY interrupt occurs, the handler disables
  441. * interrupts, and schedules a work task to clear the interrupt.
  442. */
  443. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  444. {
  445. struct phy_device *phydev = phy_dat;
  446. if (PHY_HALTED == phydev->state)
  447. return IRQ_NONE; /* It can't be ours. */
  448. /* The MDIO bus is not allowed to be written in interrupt
  449. * context, so we need to disable the irq here. A work
  450. * queue will write the PHY to disable and clear the
  451. * interrupt, and then reenable the irq line. */
  452. disable_irq_nosync(irq);
  453. atomic_inc(&phydev->irq_disable);
  454. schedule_work(&phydev->phy_queue);
  455. return IRQ_HANDLED;
  456. }
  457. /**
  458. * phy_enable_interrupts - Enable the interrupts from the PHY side
  459. * @phydev: target phy_device struct
  460. */
  461. int phy_enable_interrupts(struct phy_device *phydev)
  462. {
  463. int err;
  464. err = phy_clear_interrupt(phydev);
  465. if (err < 0)
  466. return err;
  467. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  468. return err;
  469. }
  470. EXPORT_SYMBOL(phy_enable_interrupts);
  471. /**
  472. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  473. * @phydev: target phy_device struct
  474. */
  475. int phy_disable_interrupts(struct phy_device *phydev)
  476. {
  477. int err;
  478. /* Disable PHY interrupts */
  479. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  480. if (err)
  481. goto phy_err;
  482. /* Clear the interrupt */
  483. err = phy_clear_interrupt(phydev);
  484. if (err)
  485. goto phy_err;
  486. return 0;
  487. phy_err:
  488. phy_error(phydev);
  489. return err;
  490. }
  491. EXPORT_SYMBOL(phy_disable_interrupts);
  492. /**
  493. * phy_start_interrupts - request and enable interrupts for a PHY device
  494. * @phydev: target phy_device struct
  495. *
  496. * Description: Request the interrupt for the given PHY.
  497. * If this fails, then we set irq to PHY_POLL.
  498. * Otherwise, we enable the interrupts in the PHY.
  499. * This should only be called with a valid IRQ number.
  500. * Returns 0 on success or < 0 on error.
  501. */
  502. int phy_start_interrupts(struct phy_device *phydev)
  503. {
  504. int err = 0;
  505. INIT_WORK(&phydev->phy_queue, phy_change);
  506. atomic_set(&phydev->irq_disable, 0);
  507. if (request_irq(phydev->irq, phy_interrupt,
  508. IRQF_SHARED,
  509. "phy_interrupt",
  510. phydev) < 0) {
  511. printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
  512. phydev->bus->name,
  513. phydev->irq);
  514. phydev->irq = PHY_POLL;
  515. return 0;
  516. }
  517. err = phy_enable_interrupts(phydev);
  518. return err;
  519. }
  520. EXPORT_SYMBOL(phy_start_interrupts);
  521. /**
  522. * phy_stop_interrupts - disable interrupts from a PHY device
  523. * @phydev: target phy_device struct
  524. */
  525. int phy_stop_interrupts(struct phy_device *phydev)
  526. {
  527. int err;
  528. err = phy_disable_interrupts(phydev);
  529. if (err)
  530. phy_error(phydev);
  531. free_irq(phydev->irq, phydev);
  532. /*
  533. * Cannot call flush_scheduled_work() here as desired because
  534. * of rtnl_lock(), but we do not really care about what would
  535. * be done, except from enable_irq(), so cancel any work
  536. * possibly pending and take care of the matter below.
  537. */
  538. cancel_work_sync(&phydev->phy_queue);
  539. /*
  540. * If work indeed has been cancelled, disable_irq() will have
  541. * been left unbalanced from phy_interrupt() and enable_irq()
  542. * has to be called so that other devices on the line work.
  543. */
  544. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  545. enable_irq(phydev->irq);
  546. return err;
  547. }
  548. EXPORT_SYMBOL(phy_stop_interrupts);
  549. /**
  550. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  551. * @work: work_struct that describes the work to be done
  552. */
  553. static void phy_change(struct work_struct *work)
  554. {
  555. int err;
  556. struct phy_device *phydev =
  557. container_of(work, struct phy_device, phy_queue);
  558. err = phy_disable_interrupts(phydev);
  559. if (err)
  560. goto phy_err;
  561. mutex_lock(&phydev->lock);
  562. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  563. phydev->state = PHY_CHANGELINK;
  564. mutex_unlock(&phydev->lock);
  565. atomic_dec(&phydev->irq_disable);
  566. enable_irq(phydev->irq);
  567. /* Reenable interrupts */
  568. if (PHY_HALTED != phydev->state)
  569. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  570. if (err)
  571. goto irq_enable_err;
  572. /* Stop timer and run the state queue now. The work function for
  573. * state_queue will start the timer up again.
  574. */
  575. del_timer(&phydev->phy_timer);
  576. schedule_work(&phydev->state_queue);
  577. return;
  578. irq_enable_err:
  579. disable_irq(phydev->irq);
  580. atomic_inc(&phydev->irq_disable);
  581. phy_err:
  582. phy_error(phydev);
  583. }
  584. /**
  585. * phy_stop - Bring down the PHY link, and stop checking the status
  586. * @phydev: target phy_device struct
  587. */
  588. void phy_stop(struct phy_device *phydev)
  589. {
  590. mutex_lock(&phydev->lock);
  591. if (PHY_HALTED == phydev->state)
  592. goto out_unlock;
  593. if (phydev->irq != PHY_POLL) {
  594. /* Disable PHY Interrupts */
  595. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  596. /* Clear any pending interrupts */
  597. phy_clear_interrupt(phydev);
  598. }
  599. phydev->state = PHY_HALTED;
  600. out_unlock:
  601. mutex_unlock(&phydev->lock);
  602. /*
  603. * Cannot call flush_scheduled_work() here as desired because
  604. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  605. * will not reenable interrupts.
  606. */
  607. }
  608. /**
  609. * phy_start - start or restart a PHY device
  610. * @phydev: target phy_device struct
  611. *
  612. * Description: Indicates the attached device's readiness to
  613. * handle PHY-related work. Used during startup to start the
  614. * PHY, and after a call to phy_stop() to resume operation.
  615. * Also used to indicate the MDIO bus has cleared an error
  616. * condition.
  617. */
  618. void phy_start(struct phy_device *phydev)
  619. {
  620. mutex_lock(&phydev->lock);
  621. switch (phydev->state) {
  622. case PHY_STARTING:
  623. phydev->state = PHY_PENDING;
  624. break;
  625. case PHY_READY:
  626. phydev->state = PHY_UP;
  627. break;
  628. case PHY_HALTED:
  629. phydev->state = PHY_RESUMING;
  630. default:
  631. break;
  632. }
  633. mutex_unlock(&phydev->lock);
  634. }
  635. EXPORT_SYMBOL(phy_stop);
  636. EXPORT_SYMBOL(phy_start);
  637. /**
  638. * phy_state_machine - Handle the state machine
  639. * @work: work_struct that describes the work to be done
  640. *
  641. * Description: Scheduled by the state_queue workqueue each time
  642. * phy_timer is triggered.
  643. */
  644. static void phy_state_machine(struct work_struct *work)
  645. {
  646. struct phy_device *phydev =
  647. container_of(work, struct phy_device, state_queue);
  648. int needs_aneg = 0;
  649. int err = 0;
  650. mutex_lock(&phydev->lock);
  651. if (phydev->adjust_state)
  652. phydev->adjust_state(phydev->attached_dev);
  653. switch(phydev->state) {
  654. case PHY_DOWN:
  655. case PHY_STARTING:
  656. case PHY_READY:
  657. case PHY_PENDING:
  658. break;
  659. case PHY_UP:
  660. needs_aneg = 1;
  661. phydev->link_timeout = PHY_AN_TIMEOUT;
  662. break;
  663. case PHY_AN:
  664. err = phy_read_status(phydev);
  665. if (err < 0)
  666. break;
  667. /* If the link is down, give up on
  668. * negotiation for now */
  669. if (!phydev->link) {
  670. phydev->state = PHY_NOLINK;
  671. netif_carrier_off(phydev->attached_dev);
  672. phydev->adjust_link(phydev->attached_dev);
  673. break;
  674. }
  675. /* Check if negotiation is done. Break
  676. * if there's an error */
  677. err = phy_aneg_done(phydev);
  678. if (err < 0)
  679. break;
  680. /* If AN is done, we're running */
  681. if (err > 0) {
  682. phydev->state = PHY_RUNNING;
  683. netif_carrier_on(phydev->attached_dev);
  684. phydev->adjust_link(phydev->attached_dev);
  685. } else if (0 == phydev->link_timeout--) {
  686. int idx;
  687. needs_aneg = 1;
  688. /* If we have the magic_aneg bit,
  689. * we try again */
  690. if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  691. break;
  692. /* The timer expired, and we still
  693. * don't have a setting, so we try
  694. * forcing it until we find one that
  695. * works, starting from the fastest speed,
  696. * and working our way down */
  697. idx = phy_find_valid(0, phydev->supported);
  698. phydev->speed = settings[idx].speed;
  699. phydev->duplex = settings[idx].duplex;
  700. phydev->autoneg = AUTONEG_DISABLE;
  701. pr_info("Trying %d/%s\n", phydev->speed,
  702. DUPLEX_FULL ==
  703. phydev->duplex ?
  704. "FULL" : "HALF");
  705. }
  706. break;
  707. case PHY_NOLINK:
  708. err = phy_read_status(phydev);
  709. if (err)
  710. break;
  711. if (phydev->link) {
  712. phydev->state = PHY_RUNNING;
  713. netif_carrier_on(phydev->attached_dev);
  714. phydev->adjust_link(phydev->attached_dev);
  715. }
  716. break;
  717. case PHY_FORCING:
  718. err = genphy_update_link(phydev);
  719. if (err)
  720. break;
  721. if (phydev->link) {
  722. phydev->state = PHY_RUNNING;
  723. netif_carrier_on(phydev->attached_dev);
  724. } else {
  725. if (0 == phydev->link_timeout--) {
  726. phy_force_reduction(phydev);
  727. needs_aneg = 1;
  728. }
  729. }
  730. phydev->adjust_link(phydev->attached_dev);
  731. break;
  732. case PHY_RUNNING:
  733. /* Only register a CHANGE if we are
  734. * polling */
  735. if (PHY_POLL == phydev->irq)
  736. phydev->state = PHY_CHANGELINK;
  737. break;
  738. case PHY_CHANGELINK:
  739. err = phy_read_status(phydev);
  740. if (err)
  741. break;
  742. if (phydev->link) {
  743. phydev->state = PHY_RUNNING;
  744. netif_carrier_on(phydev->attached_dev);
  745. } else {
  746. phydev->state = PHY_NOLINK;
  747. netif_carrier_off(phydev->attached_dev);
  748. }
  749. phydev->adjust_link(phydev->attached_dev);
  750. if (PHY_POLL != phydev->irq)
  751. err = phy_config_interrupt(phydev,
  752. PHY_INTERRUPT_ENABLED);
  753. break;
  754. case PHY_HALTED:
  755. if (phydev->link) {
  756. phydev->link = 0;
  757. netif_carrier_off(phydev->attached_dev);
  758. phydev->adjust_link(phydev->attached_dev);
  759. }
  760. break;
  761. case PHY_RESUMING:
  762. err = phy_clear_interrupt(phydev);
  763. if (err)
  764. break;
  765. err = phy_config_interrupt(phydev,
  766. PHY_INTERRUPT_ENABLED);
  767. if (err)
  768. break;
  769. if (AUTONEG_ENABLE == phydev->autoneg) {
  770. err = phy_aneg_done(phydev);
  771. if (err < 0)
  772. break;
  773. /* err > 0 if AN is done.
  774. * Otherwise, it's 0, and we're
  775. * still waiting for AN */
  776. if (err > 0) {
  777. phydev->state = PHY_RUNNING;
  778. } else {
  779. phydev->state = PHY_AN;
  780. phydev->link_timeout = PHY_AN_TIMEOUT;
  781. }
  782. } else
  783. phydev->state = PHY_RUNNING;
  784. break;
  785. }
  786. mutex_unlock(&phydev->lock);
  787. if (needs_aneg)
  788. err = phy_start_aneg(phydev);
  789. if (err < 0)
  790. phy_error(phydev);
  791. mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
  792. }
  793. /* PHY timer which schedules the state machine work */
  794. static void phy_timer(unsigned long data)
  795. {
  796. struct phy_device *phydev = (struct phy_device *)data;
  797. /*
  798. * PHY I/O operations can potentially sleep so we ensure that
  799. * it's done from a process context
  800. */
  801. schedule_work(&phydev->state_queue);
  802. }