phy.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * drivers/net/phy/phy.c
  3. *
  4. * Framework for configuring and reading PHY devices
  5. * Based on code in sungem_phy.c and gianfar_phy.c
  6. *
  7. * Author: Andy Fleming
  8. *
  9. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  10. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/unistd.h>
  22. #include <linux/slab.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/mm.h>
  30. #include <linux/module.h>
  31. #include <linux/mii.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/phy.h>
  34. #include <linux/timer.h>
  35. #include <linux/workqueue.h>
  36. #include <asm/atomic.h>
  37. #include <asm/io.h>
  38. #include <asm/irq.h>
  39. #include <asm/uaccess.h>
  40. /**
  41. * phy_print_status - Convenience function to print out the current phy status
  42. * @phydev: the phy_device struct
  43. */
  44. void phy_print_status(struct phy_device *phydev)
  45. {
  46. pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
  47. phydev->link ? "Up" : "Down");
  48. if (phydev->link)
  49. printk(" - %d/%s", phydev->speed,
  50. DUPLEX_FULL == phydev->duplex ?
  51. "Full" : "Half");
  52. printk("\n");
  53. }
  54. EXPORT_SYMBOL(phy_print_status);
  55. /**
  56. * phy_read - Convenience function for reading a given PHY register
  57. * @phydev: the phy_device struct
  58. * @regnum: register number to read
  59. *
  60. * NOTE: MUST NOT be called from interrupt context,
  61. * because the bus read/write functions may wait for an interrupt
  62. * to conclude the operation.
  63. */
  64. int phy_read(struct phy_device *phydev, u16 regnum)
  65. {
  66. int retval;
  67. struct mii_bus *bus = phydev->bus;
  68. BUG_ON(in_interrupt());
  69. mutex_lock(&bus->mdio_lock);
  70. retval = bus->read(bus, phydev->addr, regnum);
  71. mutex_unlock(&bus->mdio_lock);
  72. return retval;
  73. }
  74. EXPORT_SYMBOL(phy_read);
  75. /**
  76. * phy_write - Convenience function for writing a given PHY register
  77. * @phydev: the phy_device struct
  78. * @regnum: register number to write
  79. * @val: value to write to @regnum
  80. *
  81. * NOTE: MUST NOT be called from interrupt context,
  82. * because the bus read/write functions may wait for an interrupt
  83. * to conclude the operation.
  84. */
  85. int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
  86. {
  87. int err;
  88. struct mii_bus *bus = phydev->bus;
  89. BUG_ON(in_interrupt());
  90. mutex_lock(&bus->mdio_lock);
  91. err = bus->write(bus, phydev->addr, regnum, val);
  92. mutex_unlock(&bus->mdio_lock);
  93. return err;
  94. }
  95. EXPORT_SYMBOL(phy_write);
  96. /**
  97. * phy_clear_interrupt - Ack the phy device's interrupt
  98. * @phydev: the phy_device struct
  99. *
  100. * If the @phydev driver has an ack_interrupt function, call it to
  101. * ack and clear the phy device's interrupt.
  102. *
  103. * Returns 0 on success on < 0 on error.
  104. */
  105. int phy_clear_interrupt(struct phy_device *phydev)
  106. {
  107. int err = 0;
  108. if (phydev->drv->ack_interrupt)
  109. err = phydev->drv->ack_interrupt(phydev);
  110. return err;
  111. }
  112. /**
  113. * phy_config_interrupt - configure the PHY device for the requested interrupts
  114. * @phydev: the phy_device struct
  115. * @interrupts: interrupt flags to configure for this @phydev
  116. *
  117. * Returns 0 on success on < 0 on error.
  118. */
  119. int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  120. {
  121. int err = 0;
  122. phydev->interrupts = interrupts;
  123. if (phydev->drv->config_intr)
  124. err = phydev->drv->config_intr(phydev);
  125. return err;
  126. }
  127. /**
  128. * phy_aneg_done - return auto-negotiation status
  129. * @phydev: target phy_device struct
  130. *
  131. * Description: Reads the status register and returns 0 either if
  132. * auto-negotiation is incomplete, or if there was an error.
  133. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
  134. */
  135. static inline int phy_aneg_done(struct phy_device *phydev)
  136. {
  137. int retval;
  138. retval = phy_read(phydev, MII_BMSR);
  139. return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  140. }
  141. /* A structure for mapping a particular speed and duplex
  142. * combination to a particular SUPPORTED and ADVERTISED value */
  143. struct phy_setting {
  144. int speed;
  145. int duplex;
  146. u32 setting;
  147. };
  148. /* A mapping of all SUPPORTED settings to speed/duplex */
  149. static const struct phy_setting settings[] = {
  150. {
  151. .speed = 10000,
  152. .duplex = DUPLEX_FULL,
  153. .setting = SUPPORTED_10000baseT_Full,
  154. },
  155. {
  156. .speed = SPEED_1000,
  157. .duplex = DUPLEX_FULL,
  158. .setting = SUPPORTED_1000baseT_Full,
  159. },
  160. {
  161. .speed = SPEED_1000,
  162. .duplex = DUPLEX_HALF,
  163. .setting = SUPPORTED_1000baseT_Half,
  164. },
  165. {
  166. .speed = SPEED_100,
  167. .duplex = DUPLEX_FULL,
  168. .setting = SUPPORTED_100baseT_Full,
  169. },
  170. {
  171. .speed = SPEED_100,
  172. .duplex = DUPLEX_HALF,
  173. .setting = SUPPORTED_100baseT_Half,
  174. },
  175. {
  176. .speed = SPEED_10,
  177. .duplex = DUPLEX_FULL,
  178. .setting = SUPPORTED_10baseT_Full,
  179. },
  180. {
  181. .speed = SPEED_10,
  182. .duplex = DUPLEX_HALF,
  183. .setting = SUPPORTED_10baseT_Half,
  184. },
  185. };
  186. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  187. /**
  188. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  189. * @speed: speed to match
  190. * @duplex: duplex to match
  191. *
  192. * Description: Searches the settings array for the setting which
  193. * matches the desired speed and duplex, and returns the index
  194. * of that setting. Returns the index of the last setting if
  195. * none of the others match.
  196. */
  197. static inline int phy_find_setting(int speed, int duplex)
  198. {
  199. int idx = 0;
  200. while (idx < ARRAY_SIZE(settings) &&
  201. (settings[idx].speed != speed ||
  202. settings[idx].duplex != duplex))
  203. idx++;
  204. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  205. }
  206. /**
  207. * phy_find_valid - find a PHY setting that matches the requested features mask
  208. * @idx: The first index in settings[] to search
  209. * @features: A mask of the valid settings
  210. *
  211. * Description: Returns the index of the first valid setting less
  212. * than or equal to the one pointed to by idx, as determined by
  213. * the mask in features. Returns the index of the last setting
  214. * if nothing else matches.
  215. */
  216. static inline int phy_find_valid(int idx, u32 features)
  217. {
  218. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  219. idx++;
  220. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  221. }
  222. /**
  223. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  224. * @phydev: the target phy_device struct
  225. *
  226. * Description: Make sure the PHY is set to supported speeds and
  227. * duplexes. Drop down by one in this order: 1000/FULL,
  228. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  229. */
  230. void phy_sanitize_settings(struct phy_device *phydev)
  231. {
  232. u32 features = phydev->supported;
  233. int idx;
  234. /* Sanitize settings based on PHY capabilities */
  235. if ((features & SUPPORTED_Autoneg) == 0)
  236. phydev->autoneg = AUTONEG_DISABLE;
  237. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  238. features);
  239. phydev->speed = settings[idx].speed;
  240. phydev->duplex = settings[idx].duplex;
  241. }
  242. EXPORT_SYMBOL(phy_sanitize_settings);
  243. /**
  244. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  245. * @phydev: target phy_device struct
  246. * @cmd: ethtool_cmd
  247. *
  248. * A few notes about parameter checking:
  249. * - We don't set port or transceiver, so we don't care what they
  250. * were set to.
  251. * - phy_start_aneg() will make sure forced settings are sane, and
  252. * choose the next best ones from the ones selected, so we don't
  253. * care if ethtool tries to give us bad values.
  254. */
  255. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  256. {
  257. if (cmd->phy_address != phydev->addr)
  258. return -EINVAL;
  259. /* We make sure that we don't pass unsupported
  260. * values in to the PHY */
  261. cmd->advertising &= phydev->supported;
  262. /* Verify the settings we care about. */
  263. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  264. return -EINVAL;
  265. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  266. return -EINVAL;
  267. if (cmd->autoneg == AUTONEG_DISABLE
  268. && ((cmd->speed != SPEED_1000
  269. && cmd->speed != SPEED_100
  270. && cmd->speed != SPEED_10)
  271. || (cmd->duplex != DUPLEX_HALF
  272. && cmd->duplex != DUPLEX_FULL)))
  273. return -EINVAL;
  274. phydev->autoneg = cmd->autoneg;
  275. phydev->speed = cmd->speed;
  276. phydev->advertising = cmd->advertising;
  277. if (AUTONEG_ENABLE == cmd->autoneg)
  278. phydev->advertising |= ADVERTISED_Autoneg;
  279. else
  280. phydev->advertising &= ~ADVERTISED_Autoneg;
  281. phydev->duplex = cmd->duplex;
  282. /* Restart the PHY */
  283. phy_start_aneg(phydev);
  284. return 0;
  285. }
  286. EXPORT_SYMBOL(phy_ethtool_sset);
  287. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  288. {
  289. cmd->supported = phydev->supported;
  290. cmd->advertising = phydev->advertising;
  291. cmd->speed = phydev->speed;
  292. cmd->duplex = phydev->duplex;
  293. cmd->port = PORT_MII;
  294. cmd->phy_address = phydev->addr;
  295. cmd->transceiver = XCVR_EXTERNAL;
  296. cmd->autoneg = phydev->autoneg;
  297. return 0;
  298. }
  299. EXPORT_SYMBOL(phy_ethtool_gset);
  300. /**
  301. * phy_mii_ioctl - generic PHY MII ioctl interface
  302. * @phydev: the phy_device struct
  303. * @mii_data: MII ioctl data
  304. * @cmd: ioctl cmd to execute
  305. *
  306. * Note that this function is currently incompatible with the
  307. * PHYCONTROL layer. It changes registers without regard to
  308. * current state. Use at own risk.
  309. */
  310. int phy_mii_ioctl(struct phy_device *phydev,
  311. struct mii_ioctl_data *mii_data, int cmd)
  312. {
  313. u16 val = mii_data->val_in;
  314. switch (cmd) {
  315. case SIOCGMIIPHY:
  316. mii_data->phy_id = phydev->addr;
  317. break;
  318. case SIOCGMIIREG:
  319. mii_data->val_out = phy_read(phydev, mii_data->reg_num);
  320. break;
  321. case SIOCSMIIREG:
  322. if (!capable(CAP_NET_ADMIN))
  323. return -EPERM;
  324. if (mii_data->phy_id == phydev->addr) {
  325. switch(mii_data->reg_num) {
  326. case MII_BMCR:
  327. if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
  328. phydev->autoneg = AUTONEG_DISABLE;
  329. else
  330. phydev->autoneg = AUTONEG_ENABLE;
  331. if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
  332. phydev->duplex = DUPLEX_FULL;
  333. else
  334. phydev->duplex = DUPLEX_HALF;
  335. if ((!phydev->autoneg) &&
  336. (val & BMCR_SPEED1000))
  337. phydev->speed = SPEED_1000;
  338. else if ((!phydev->autoneg) &&
  339. (val & BMCR_SPEED100))
  340. phydev->speed = SPEED_100;
  341. break;
  342. case MII_ADVERTISE:
  343. phydev->advertising = val;
  344. break;
  345. default:
  346. /* do nothing */
  347. break;
  348. }
  349. }
  350. phy_write(phydev, mii_data->reg_num, val);
  351. if (mii_data->reg_num == MII_BMCR
  352. && val & BMCR_RESET
  353. && phydev->drv->config_init) {
  354. phy_scan_fixups(phydev);
  355. phydev->drv->config_init(phydev);
  356. }
  357. break;
  358. default:
  359. return -ENOTTY;
  360. }
  361. return 0;
  362. }
  363. EXPORT_SYMBOL(phy_mii_ioctl);
  364. /**
  365. * phy_start_aneg - start auto-negotiation for this PHY device
  366. * @phydev: the phy_device struct
  367. *
  368. * Description: Sanitizes the settings (if we're not autonegotiating
  369. * them), and then calls the driver's config_aneg function.
  370. * If the PHYCONTROL Layer is operating, we change the state to
  371. * reflect the beginning of Auto-negotiation or forcing.
  372. */
  373. int phy_start_aneg(struct phy_device *phydev)
  374. {
  375. int err;
  376. mutex_lock(&phydev->lock);
  377. if (AUTONEG_DISABLE == phydev->autoneg)
  378. phy_sanitize_settings(phydev);
  379. err = phydev->drv->config_aneg(phydev);
  380. if (err < 0)
  381. goto out_unlock;
  382. if (phydev->state != PHY_HALTED) {
  383. if (AUTONEG_ENABLE == phydev->autoneg) {
  384. phydev->state = PHY_AN;
  385. phydev->link_timeout = PHY_AN_TIMEOUT;
  386. } else {
  387. phydev->state = PHY_FORCING;
  388. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  389. }
  390. }
  391. out_unlock:
  392. mutex_unlock(&phydev->lock);
  393. return err;
  394. }
  395. EXPORT_SYMBOL(phy_start_aneg);
  396. static void phy_change(struct work_struct *work);
  397. static void phy_state_machine(struct work_struct *work);
  398. static void phy_timer(unsigned long data);
  399. /**
  400. * phy_start_machine - start PHY state machine tracking
  401. * @phydev: the phy_device struct
  402. * @handler: callback function for state change notifications
  403. *
  404. * Description: The PHY infrastructure can run a state machine
  405. * which tracks whether the PHY is starting up, negotiating,
  406. * etc. This function starts the timer which tracks the state
  407. * of the PHY. If you want to be notified when the state changes,
  408. * pass in the callback @handler, otherwise, pass NULL. If you
  409. * want to maintain your own state machine, do not call this
  410. * function.
  411. */
  412. void phy_start_machine(struct phy_device *phydev,
  413. void (*handler)(struct net_device *))
  414. {
  415. phydev->adjust_state = handler;
  416. INIT_WORK(&phydev->state_queue, phy_state_machine);
  417. init_timer(&phydev->phy_timer);
  418. phydev->phy_timer.function = &phy_timer;
  419. phydev->phy_timer.data = (unsigned long) phydev;
  420. mod_timer(&phydev->phy_timer, jiffies + HZ);
  421. }
  422. /**
  423. * phy_stop_machine - stop the PHY state machine tracking
  424. * @phydev: target phy_device struct
  425. *
  426. * Description: Stops the state machine timer, sets the state to UP
  427. * (unless it wasn't up yet). This function must be called BEFORE
  428. * phy_detach.
  429. */
  430. void phy_stop_machine(struct phy_device *phydev)
  431. {
  432. del_timer_sync(&phydev->phy_timer);
  433. cancel_work_sync(&phydev->state_queue);
  434. mutex_lock(&phydev->lock);
  435. if (phydev->state > PHY_UP)
  436. phydev->state = PHY_UP;
  437. mutex_unlock(&phydev->lock);
  438. phydev->adjust_state = NULL;
  439. }
  440. /**
  441. * phy_force_reduction - reduce PHY speed/duplex settings by one step
  442. * @phydev: target phy_device struct
  443. *
  444. * Description: Reduces the speed/duplex settings by one notch,
  445. * in this order--
  446. * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  447. * The function bottoms out at 10/HALF.
  448. */
  449. static void phy_force_reduction(struct phy_device *phydev)
  450. {
  451. int idx;
  452. idx = phy_find_setting(phydev->speed, phydev->duplex);
  453. idx++;
  454. idx = phy_find_valid(idx, phydev->supported);
  455. phydev->speed = settings[idx].speed;
  456. phydev->duplex = settings[idx].duplex;
  457. pr_info("Trying %d/%s\n", phydev->speed,
  458. DUPLEX_FULL == phydev->duplex ?
  459. "FULL" : "HALF");
  460. }
  461. /**
  462. * phy_error - enter HALTED state for this PHY device
  463. * @phydev: target phy_device struct
  464. *
  465. * Moves the PHY to the HALTED state in response to a read
  466. * or write error, and tells the controller the link is down.
  467. * Must not be called from interrupt context, or while the
  468. * phydev->lock is held.
  469. */
  470. static void phy_error(struct phy_device *phydev)
  471. {
  472. mutex_lock(&phydev->lock);
  473. phydev->state = PHY_HALTED;
  474. mutex_unlock(&phydev->lock);
  475. }
  476. /**
  477. * phy_interrupt - PHY interrupt handler
  478. * @irq: interrupt line
  479. * @phy_dat: phy_device pointer
  480. *
  481. * Description: When a PHY interrupt occurs, the handler disables
  482. * interrupts, and schedules a work task to clear the interrupt.
  483. */
  484. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  485. {
  486. struct phy_device *phydev = phy_dat;
  487. if (PHY_HALTED == phydev->state)
  488. return IRQ_NONE; /* It can't be ours. */
  489. /* The MDIO bus is not allowed to be written in interrupt
  490. * context, so we need to disable the irq here. A work
  491. * queue will write the PHY to disable and clear the
  492. * interrupt, and then reenable the irq line. */
  493. disable_irq_nosync(irq);
  494. atomic_inc(&phydev->irq_disable);
  495. schedule_work(&phydev->phy_queue);
  496. return IRQ_HANDLED;
  497. }
  498. /**
  499. * phy_enable_interrupts - Enable the interrupts from the PHY side
  500. * @phydev: target phy_device struct
  501. */
  502. int phy_enable_interrupts(struct phy_device *phydev)
  503. {
  504. int err;
  505. err = phy_clear_interrupt(phydev);
  506. if (err < 0)
  507. return err;
  508. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  509. return err;
  510. }
  511. EXPORT_SYMBOL(phy_enable_interrupts);
  512. /**
  513. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  514. * @phydev: target phy_device struct
  515. */
  516. int phy_disable_interrupts(struct phy_device *phydev)
  517. {
  518. int err;
  519. /* Disable PHY interrupts */
  520. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  521. if (err)
  522. goto phy_err;
  523. /* Clear the interrupt */
  524. err = phy_clear_interrupt(phydev);
  525. if (err)
  526. goto phy_err;
  527. return 0;
  528. phy_err:
  529. phy_error(phydev);
  530. return err;
  531. }
  532. EXPORT_SYMBOL(phy_disable_interrupts);
  533. /**
  534. * phy_start_interrupts - request and enable interrupts for a PHY device
  535. * @phydev: target phy_device struct
  536. *
  537. * Description: Request the interrupt for the given PHY.
  538. * If this fails, then we set irq to PHY_POLL.
  539. * Otherwise, we enable the interrupts in the PHY.
  540. * This should only be called with a valid IRQ number.
  541. * Returns 0 on success or < 0 on error.
  542. */
  543. int phy_start_interrupts(struct phy_device *phydev)
  544. {
  545. int err = 0;
  546. INIT_WORK(&phydev->phy_queue, phy_change);
  547. atomic_set(&phydev->irq_disable, 0);
  548. if (request_irq(phydev->irq, phy_interrupt,
  549. IRQF_SHARED,
  550. "phy_interrupt",
  551. phydev) < 0) {
  552. printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
  553. phydev->bus->name,
  554. phydev->irq);
  555. phydev->irq = PHY_POLL;
  556. return 0;
  557. }
  558. err = phy_enable_interrupts(phydev);
  559. return err;
  560. }
  561. EXPORT_SYMBOL(phy_start_interrupts);
  562. /**
  563. * phy_stop_interrupts - disable interrupts from a PHY device
  564. * @phydev: target phy_device struct
  565. */
  566. int phy_stop_interrupts(struct phy_device *phydev)
  567. {
  568. int err;
  569. err = phy_disable_interrupts(phydev);
  570. if (err)
  571. phy_error(phydev);
  572. free_irq(phydev->irq, phydev);
  573. /*
  574. * Cannot call flush_scheduled_work() here as desired because
  575. * of rtnl_lock(), but we do not really care about what would
  576. * be done, except from enable_irq(), so cancel any work
  577. * possibly pending and take care of the matter below.
  578. */
  579. cancel_work_sync(&phydev->phy_queue);
  580. /*
  581. * If work indeed has been cancelled, disable_irq() will have
  582. * been left unbalanced from phy_interrupt() and enable_irq()
  583. * has to be called so that other devices on the line work.
  584. */
  585. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  586. enable_irq(phydev->irq);
  587. return err;
  588. }
  589. EXPORT_SYMBOL(phy_stop_interrupts);
  590. /**
  591. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  592. * @work: work_struct that describes the work to be done
  593. */
  594. static void phy_change(struct work_struct *work)
  595. {
  596. int err;
  597. struct phy_device *phydev =
  598. container_of(work, struct phy_device, phy_queue);
  599. err = phy_disable_interrupts(phydev);
  600. if (err)
  601. goto phy_err;
  602. mutex_lock(&phydev->lock);
  603. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  604. phydev->state = PHY_CHANGELINK;
  605. mutex_unlock(&phydev->lock);
  606. atomic_dec(&phydev->irq_disable);
  607. enable_irq(phydev->irq);
  608. /* Reenable interrupts */
  609. if (PHY_HALTED != phydev->state)
  610. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  611. if (err)
  612. goto irq_enable_err;
  613. return;
  614. irq_enable_err:
  615. disable_irq(phydev->irq);
  616. atomic_inc(&phydev->irq_disable);
  617. phy_err:
  618. phy_error(phydev);
  619. }
  620. /**
  621. * phy_stop - Bring down the PHY link, and stop checking the status
  622. * @phydev: target phy_device struct
  623. */
  624. void phy_stop(struct phy_device *phydev)
  625. {
  626. mutex_lock(&phydev->lock);
  627. if (PHY_HALTED == phydev->state)
  628. goto out_unlock;
  629. if (phydev->irq != PHY_POLL) {
  630. /* Disable PHY Interrupts */
  631. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  632. /* Clear any pending interrupts */
  633. phy_clear_interrupt(phydev);
  634. }
  635. phydev->state = PHY_HALTED;
  636. out_unlock:
  637. mutex_unlock(&phydev->lock);
  638. /*
  639. * Cannot call flush_scheduled_work() here as desired because
  640. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  641. * will not reenable interrupts.
  642. */
  643. }
  644. /**
  645. * phy_start - start or restart a PHY device
  646. * @phydev: target phy_device struct
  647. *
  648. * Description: Indicates the attached device's readiness to
  649. * handle PHY-related work. Used during startup to start the
  650. * PHY, and after a call to phy_stop() to resume operation.
  651. * Also used to indicate the MDIO bus has cleared an error
  652. * condition.
  653. */
  654. void phy_start(struct phy_device *phydev)
  655. {
  656. mutex_lock(&phydev->lock);
  657. switch (phydev->state) {
  658. case PHY_STARTING:
  659. phydev->state = PHY_PENDING;
  660. break;
  661. case PHY_READY:
  662. phydev->state = PHY_UP;
  663. break;
  664. case PHY_HALTED:
  665. phydev->state = PHY_RESUMING;
  666. default:
  667. break;
  668. }
  669. mutex_unlock(&phydev->lock);
  670. }
  671. EXPORT_SYMBOL(phy_stop);
  672. EXPORT_SYMBOL(phy_start);
  673. /**
  674. * phy_state_machine - Handle the state machine
  675. * @work: work_struct that describes the work to be done
  676. *
  677. * Description: Scheduled by the state_queue workqueue each time
  678. * phy_timer is triggered.
  679. */
  680. static void phy_state_machine(struct work_struct *work)
  681. {
  682. struct phy_device *phydev =
  683. container_of(work, struct phy_device, state_queue);
  684. int needs_aneg = 0;
  685. int err = 0;
  686. mutex_lock(&phydev->lock);
  687. if (phydev->adjust_state)
  688. phydev->adjust_state(phydev->attached_dev);
  689. switch(phydev->state) {
  690. case PHY_DOWN:
  691. case PHY_STARTING:
  692. case PHY_READY:
  693. case PHY_PENDING:
  694. break;
  695. case PHY_UP:
  696. needs_aneg = 1;
  697. phydev->link_timeout = PHY_AN_TIMEOUT;
  698. break;
  699. case PHY_AN:
  700. err = phy_read_status(phydev);
  701. if (err < 0)
  702. break;
  703. /* If the link is down, give up on
  704. * negotiation for now */
  705. if (!phydev->link) {
  706. phydev->state = PHY_NOLINK;
  707. netif_carrier_off(phydev->attached_dev);
  708. phydev->adjust_link(phydev->attached_dev);
  709. break;
  710. }
  711. /* Check if negotiation is done. Break
  712. * if there's an error */
  713. err = phy_aneg_done(phydev);
  714. if (err < 0)
  715. break;
  716. /* If AN is done, we're running */
  717. if (err > 0) {
  718. phydev->state = PHY_RUNNING;
  719. netif_carrier_on(phydev->attached_dev);
  720. phydev->adjust_link(phydev->attached_dev);
  721. } else if (0 == phydev->link_timeout--) {
  722. int idx;
  723. needs_aneg = 1;
  724. /* If we have the magic_aneg bit,
  725. * we try again */
  726. if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  727. break;
  728. /* The timer expired, and we still
  729. * don't have a setting, so we try
  730. * forcing it until we find one that
  731. * works, starting from the fastest speed,
  732. * and working our way down */
  733. idx = phy_find_valid(0, phydev->supported);
  734. phydev->speed = settings[idx].speed;
  735. phydev->duplex = settings[idx].duplex;
  736. phydev->autoneg = AUTONEG_DISABLE;
  737. pr_info("Trying %d/%s\n", phydev->speed,
  738. DUPLEX_FULL ==
  739. phydev->duplex ?
  740. "FULL" : "HALF");
  741. }
  742. break;
  743. case PHY_NOLINK:
  744. err = phy_read_status(phydev);
  745. if (err)
  746. break;
  747. if (phydev->link) {
  748. phydev->state = PHY_RUNNING;
  749. netif_carrier_on(phydev->attached_dev);
  750. phydev->adjust_link(phydev->attached_dev);
  751. }
  752. break;
  753. case PHY_FORCING:
  754. err = genphy_update_link(phydev);
  755. if (err)
  756. break;
  757. if (phydev->link) {
  758. phydev->state = PHY_RUNNING;
  759. netif_carrier_on(phydev->attached_dev);
  760. } else {
  761. if (0 == phydev->link_timeout--) {
  762. phy_force_reduction(phydev);
  763. needs_aneg = 1;
  764. }
  765. }
  766. phydev->adjust_link(phydev->attached_dev);
  767. break;
  768. case PHY_RUNNING:
  769. /* Only register a CHANGE if we are
  770. * polling */
  771. if (PHY_POLL == phydev->irq)
  772. phydev->state = PHY_CHANGELINK;
  773. break;
  774. case PHY_CHANGELINK:
  775. err = phy_read_status(phydev);
  776. if (err)
  777. break;
  778. if (phydev->link) {
  779. phydev->state = PHY_RUNNING;
  780. netif_carrier_on(phydev->attached_dev);
  781. } else {
  782. phydev->state = PHY_NOLINK;
  783. netif_carrier_off(phydev->attached_dev);
  784. }
  785. phydev->adjust_link(phydev->attached_dev);
  786. if (PHY_POLL != phydev->irq)
  787. err = phy_config_interrupt(phydev,
  788. PHY_INTERRUPT_ENABLED);
  789. break;
  790. case PHY_HALTED:
  791. if (phydev->link) {
  792. phydev->link = 0;
  793. netif_carrier_off(phydev->attached_dev);
  794. phydev->adjust_link(phydev->attached_dev);
  795. }
  796. break;
  797. case PHY_RESUMING:
  798. err = phy_clear_interrupt(phydev);
  799. if (err)
  800. break;
  801. err = phy_config_interrupt(phydev,
  802. PHY_INTERRUPT_ENABLED);
  803. if (err)
  804. break;
  805. if (AUTONEG_ENABLE == phydev->autoneg) {
  806. err = phy_aneg_done(phydev);
  807. if (err < 0)
  808. break;
  809. /* err > 0 if AN is done.
  810. * Otherwise, it's 0, and we're
  811. * still waiting for AN */
  812. if (err > 0) {
  813. phydev->state = PHY_RUNNING;
  814. } else {
  815. phydev->state = PHY_AN;
  816. phydev->link_timeout = PHY_AN_TIMEOUT;
  817. }
  818. } else
  819. phydev->state = PHY_RUNNING;
  820. break;
  821. }
  822. mutex_unlock(&phydev->lock);
  823. if (needs_aneg)
  824. err = phy_start_aneg(phydev);
  825. if (err < 0)
  826. phy_error(phydev);
  827. mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
  828. }
  829. /* PHY timer which schedules the state machine work */
  830. static void phy_timer(unsigned long data)
  831. {
  832. struct phy_device *phydev = (struct phy_device *)data;
  833. /*
  834. * PHY I/O operations can potentially sleep so we ensure that
  835. * it's done from a process context
  836. */
  837. schedule_work(&phydev->state_queue);
  838. }