davinci_mdio.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * DaVinci MDIO Module driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments.
  5. *
  6. * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
  7. *
  8. * Copyright (C) 2009 Texas Instruments.
  9. *
  10. * ---------------------------------------------------------------------------
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. * ---------------------------------------------------------------------------
  26. */
  27. #include <linux/module.h>
  28. #include <linux/kernel.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/delay.h>
  31. #include <linux/sched.h>
  32. #include <linux/slab.h>
  33. #include <linux/phy.h>
  34. #include <linux/clk.h>
  35. #include <linux/err.h>
  36. #include <linux/io.h>
  37. #include <linux/pm_runtime.h>
  38. #include <linux/davinci_emac.h>
  39. #include <linux/of.h>
  40. #include <linux/of_device.h>
  41. #include <linux/pinctrl/consumer.h>
  42. /*
  43. * This timeout definition is a worst-case ultra defensive measure against
  44. * unexpected controller lock ups. Ideally, we should never ever hit this
  45. * scenario in practice.
  46. */
  47. #define MDIO_TIMEOUT 100 /* msecs */
  48. #define PHY_REG_MASK 0x1f
  49. #define PHY_ID_MASK 0x1f
  50. #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
  51. struct davinci_mdio_regs {
  52. u32 version;
  53. u32 control;
  54. #define CONTROL_IDLE BIT(31)
  55. #define CONTROL_ENABLE BIT(30)
  56. #define CONTROL_MAX_DIV (0xffff)
  57. u32 alive;
  58. u32 link;
  59. u32 linkintraw;
  60. u32 linkintmasked;
  61. u32 __reserved_0[2];
  62. u32 userintraw;
  63. u32 userintmasked;
  64. u32 userintmaskset;
  65. u32 userintmaskclr;
  66. u32 __reserved_1[20];
  67. struct {
  68. u32 access;
  69. #define USERACCESS_GO BIT(31)
  70. #define USERACCESS_WRITE BIT(30)
  71. #define USERACCESS_ACK BIT(29)
  72. #define USERACCESS_READ (0)
  73. #define USERACCESS_DATA (0xffff)
  74. u32 physel;
  75. } user[0];
  76. };
  77. struct mdio_platform_data default_pdata = {
  78. .bus_freq = DEF_OUT_FREQ,
  79. };
  80. struct davinci_mdio_data {
  81. struct mdio_platform_data pdata;
  82. struct davinci_mdio_regs __iomem *regs;
  83. spinlock_t lock;
  84. struct clk *clk;
  85. struct device *dev;
  86. struct mii_bus *bus;
  87. bool suspended;
  88. unsigned long access_time; /* jiffies */
  89. };
  90. static void __davinci_mdio_reset(struct davinci_mdio_data *data)
  91. {
  92. u32 mdio_in, div, mdio_out_khz, access_time;
  93. mdio_in = clk_get_rate(data->clk);
  94. div = (mdio_in / data->pdata.bus_freq) - 1;
  95. if (div > CONTROL_MAX_DIV)
  96. div = CONTROL_MAX_DIV;
  97. /* set enable and clock divider */
  98. __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
  99. /*
  100. * One mdio transaction consists of:
  101. * 32 bits of preamble
  102. * 32 bits of transferred data
  103. * 24 bits of bus yield (not needed unless shared?)
  104. */
  105. mdio_out_khz = mdio_in / (1000 * (div + 1));
  106. access_time = (88 * 1000) / mdio_out_khz;
  107. /*
  108. * In the worst case, we could be kicking off a user-access immediately
  109. * after the mdio bus scan state-machine triggered its own read. If
  110. * so, our request could get deferred by one access cycle. We
  111. * defensively allow for 4 access cycles.
  112. */
  113. data->access_time = usecs_to_jiffies(access_time * 4);
  114. if (!data->access_time)
  115. data->access_time = 1;
  116. }
  117. static int davinci_mdio_reset(struct mii_bus *bus)
  118. {
  119. struct davinci_mdio_data *data = bus->priv;
  120. u32 phy_mask, ver;
  121. __davinci_mdio_reset(data);
  122. /* wait for scan logic to settle */
  123. msleep(PHY_MAX_ADDR * data->access_time);
  124. /* dump hardware version info */
  125. ver = __raw_readl(&data->regs->version);
  126. dev_info(data->dev, "davinci mdio revision %d.%d\n",
  127. (ver >> 8) & 0xff, ver & 0xff);
  128. /* get phy mask from the alive register */
  129. phy_mask = __raw_readl(&data->regs->alive);
  130. if (phy_mask) {
  131. /* restrict mdio bus to live phys only */
  132. dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
  133. phy_mask = ~phy_mask;
  134. } else {
  135. /* desperately scan all phys */
  136. dev_warn(data->dev, "no live phy, scanning all\n");
  137. phy_mask = 0;
  138. }
  139. data->bus->phy_mask = phy_mask;
  140. return 0;
  141. }
  142. /* wait until hardware is ready for another user access */
  143. static inline int wait_for_user_access(struct davinci_mdio_data *data)
  144. {
  145. struct davinci_mdio_regs __iomem *regs = data->regs;
  146. unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
  147. u32 reg;
  148. while (time_after(timeout, jiffies)) {
  149. reg = __raw_readl(&regs->user[0].access);
  150. if ((reg & USERACCESS_GO) == 0)
  151. return 0;
  152. reg = __raw_readl(&regs->control);
  153. if ((reg & CONTROL_IDLE) == 0)
  154. continue;
  155. /*
  156. * An emac soft_reset may have clobbered the mdio controller's
  157. * state machine. We need to reset and retry the current
  158. * operation
  159. */
  160. dev_warn(data->dev, "resetting idled controller\n");
  161. __davinci_mdio_reset(data);
  162. return -EAGAIN;
  163. }
  164. reg = __raw_readl(&regs->user[0].access);
  165. if ((reg & USERACCESS_GO) == 0)
  166. return 0;
  167. dev_err(data->dev, "timed out waiting for user access\n");
  168. return -ETIMEDOUT;
  169. }
  170. /* wait until hardware state machine is idle */
  171. static inline int wait_for_idle(struct davinci_mdio_data *data)
  172. {
  173. struct davinci_mdio_regs __iomem *regs = data->regs;
  174. unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
  175. while (time_after(timeout, jiffies)) {
  176. if (__raw_readl(&regs->control) & CONTROL_IDLE)
  177. return 0;
  178. }
  179. dev_err(data->dev, "timed out waiting for idle\n");
  180. return -ETIMEDOUT;
  181. }
  182. static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
  183. {
  184. struct davinci_mdio_data *data = bus->priv;
  185. u32 reg;
  186. int ret;
  187. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  188. return -EINVAL;
  189. spin_lock(&data->lock);
  190. if (data->suspended) {
  191. spin_unlock(&data->lock);
  192. return -ENODEV;
  193. }
  194. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  195. (phy_id << 16));
  196. while (1) {
  197. ret = wait_for_user_access(data);
  198. if (ret == -EAGAIN)
  199. continue;
  200. if (ret < 0)
  201. break;
  202. __raw_writel(reg, &data->regs->user[0].access);
  203. ret = wait_for_user_access(data);
  204. if (ret == -EAGAIN)
  205. continue;
  206. if (ret < 0)
  207. break;
  208. reg = __raw_readl(&data->regs->user[0].access);
  209. ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
  210. break;
  211. }
  212. spin_unlock(&data->lock);
  213. return ret;
  214. }
  215. static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
  216. int phy_reg, u16 phy_data)
  217. {
  218. struct davinci_mdio_data *data = bus->priv;
  219. u32 reg;
  220. int ret;
  221. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  222. return -EINVAL;
  223. spin_lock(&data->lock);
  224. if (data->suspended) {
  225. spin_unlock(&data->lock);
  226. return -ENODEV;
  227. }
  228. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  229. (phy_id << 16) | (phy_data & USERACCESS_DATA));
  230. while (1) {
  231. ret = wait_for_user_access(data);
  232. if (ret == -EAGAIN)
  233. continue;
  234. if (ret < 0)
  235. break;
  236. __raw_writel(reg, &data->regs->user[0].access);
  237. ret = wait_for_user_access(data);
  238. if (ret == -EAGAIN)
  239. continue;
  240. break;
  241. }
  242. spin_unlock(&data->lock);
  243. return 0;
  244. }
  245. #if IS_ENABLED(CONFIG_OF)
  246. static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
  247. struct platform_device *pdev)
  248. {
  249. struct device_node *node = pdev->dev.of_node;
  250. u32 prop;
  251. if (!node)
  252. return -EINVAL;
  253. if (of_property_read_u32(node, "bus_freq", &prop)) {
  254. pr_err("Missing bus_freq property in the DT.\n");
  255. return -EINVAL;
  256. }
  257. data->bus_freq = prop;
  258. return 0;
  259. }
  260. #endif
  261. static int davinci_mdio_probe(struct platform_device *pdev)
  262. {
  263. struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
  264. struct device *dev = &pdev->dev;
  265. struct davinci_mdio_data *data;
  266. struct resource *res;
  267. struct phy_device *phy;
  268. int ret, addr;
  269. data = kzalloc(sizeof(*data), GFP_KERNEL);
  270. if (!data)
  271. return -ENOMEM;
  272. data->bus = mdiobus_alloc();
  273. if (!data->bus) {
  274. dev_err(dev, "failed to alloc mii bus\n");
  275. ret = -ENOMEM;
  276. goto bail_out;
  277. }
  278. if (dev->of_node) {
  279. if (davinci_mdio_probe_dt(&data->pdata, pdev))
  280. data->pdata = default_pdata;
  281. snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
  282. } else {
  283. data->pdata = pdata ? (*pdata) : default_pdata;
  284. snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
  285. pdev->name, pdev->id);
  286. }
  287. data->bus->name = dev_name(dev);
  288. data->bus->read = davinci_mdio_read,
  289. data->bus->write = davinci_mdio_write,
  290. data->bus->reset = davinci_mdio_reset,
  291. data->bus->parent = dev;
  292. data->bus->priv = data;
  293. /* Select default pin state */
  294. pinctrl_pm_select_default_state(&pdev->dev);
  295. pm_runtime_enable(&pdev->dev);
  296. pm_runtime_get_sync(&pdev->dev);
  297. data->clk = clk_get(&pdev->dev, "fck");
  298. if (IS_ERR(data->clk)) {
  299. dev_err(dev, "failed to get device clock\n");
  300. ret = PTR_ERR(data->clk);
  301. data->clk = NULL;
  302. goto bail_out;
  303. }
  304. dev_set_drvdata(dev, data);
  305. data->dev = dev;
  306. spin_lock_init(&data->lock);
  307. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  308. if (!res) {
  309. dev_err(dev, "could not find register map resource\n");
  310. ret = -ENOENT;
  311. goto bail_out;
  312. }
  313. res = devm_request_mem_region(dev, res->start, resource_size(res),
  314. dev_name(dev));
  315. if (!res) {
  316. dev_err(dev, "could not allocate register map resource\n");
  317. ret = -ENXIO;
  318. goto bail_out;
  319. }
  320. data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
  321. if (!data->regs) {
  322. dev_err(dev, "could not map mdio registers\n");
  323. ret = -ENOMEM;
  324. goto bail_out;
  325. }
  326. /* register the mii bus */
  327. ret = mdiobus_register(data->bus);
  328. if (ret)
  329. goto bail_out;
  330. /* scan and dump the bus */
  331. for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
  332. phy = data->bus->phy_map[addr];
  333. if (phy) {
  334. dev_info(dev, "phy[%d]: device %s, driver %s\n",
  335. phy->addr, dev_name(&phy->dev),
  336. phy->drv ? phy->drv->name : "unknown");
  337. }
  338. }
  339. return 0;
  340. bail_out:
  341. if (data->bus)
  342. mdiobus_free(data->bus);
  343. if (data->clk)
  344. clk_put(data->clk);
  345. pm_runtime_put_sync(&pdev->dev);
  346. pm_runtime_disable(&pdev->dev);
  347. kfree(data);
  348. return ret;
  349. }
  350. static int davinci_mdio_remove(struct platform_device *pdev)
  351. {
  352. struct davinci_mdio_data *data = platform_get_drvdata(pdev);
  353. if (data->bus) {
  354. mdiobus_unregister(data->bus);
  355. mdiobus_free(data->bus);
  356. }
  357. if (data->clk)
  358. clk_put(data->clk);
  359. pm_runtime_put_sync(&pdev->dev);
  360. pm_runtime_disable(&pdev->dev);
  361. kfree(data);
  362. return 0;
  363. }
  364. static int davinci_mdio_suspend(struct device *dev)
  365. {
  366. struct davinci_mdio_data *data = dev_get_drvdata(dev);
  367. u32 ctrl;
  368. spin_lock(&data->lock);
  369. /* shutdown the scan state machine */
  370. ctrl = __raw_readl(&data->regs->control);
  371. ctrl &= ~CONTROL_ENABLE;
  372. __raw_writel(ctrl, &data->regs->control);
  373. wait_for_idle(data);
  374. data->suspended = true;
  375. spin_unlock(&data->lock);
  376. pm_runtime_put_sync(data->dev);
  377. /* Select sleep pin state */
  378. pinctrl_pm_select_sleep_state(dev);
  379. return 0;
  380. }
  381. static int davinci_mdio_resume(struct device *dev)
  382. {
  383. struct davinci_mdio_data *data = dev_get_drvdata(dev);
  384. /* Select default pin state */
  385. pinctrl_pm_select_default_state(dev);
  386. pm_runtime_get_sync(data->dev);
  387. spin_lock(&data->lock);
  388. /* restart the scan state machine */
  389. __davinci_mdio_reset(data);
  390. data->suspended = false;
  391. spin_unlock(&data->lock);
  392. return 0;
  393. }
  394. static const struct dev_pm_ops davinci_mdio_pm_ops = {
  395. .suspend_late = davinci_mdio_suspend,
  396. .resume_early = davinci_mdio_resume,
  397. };
  398. #if IS_ENABLED(CONFIG_OF)
  399. static const struct of_device_id davinci_mdio_of_mtable[] = {
  400. { .compatible = "ti,davinci_mdio", },
  401. { /* sentinel */ },
  402. };
  403. MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
  404. #endif
  405. static struct platform_driver davinci_mdio_driver = {
  406. .driver = {
  407. .name = "davinci_mdio",
  408. .owner = THIS_MODULE,
  409. .pm = &davinci_mdio_pm_ops,
  410. .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
  411. },
  412. .probe = davinci_mdio_probe,
  413. .remove = davinci_mdio_remove,
  414. };
  415. static int __init davinci_mdio_init(void)
  416. {
  417. return platform_driver_register(&davinci_mdio_driver);
  418. }
  419. device_initcall(davinci_mdio_init);
  420. static void __exit davinci_mdio_exit(void)
  421. {
  422. platform_driver_unregister(&davinci_mdio_driver);
  423. }
  424. module_exit(davinci_mdio_exit);
  425. MODULE_LICENSE("GPL");
  426. MODULE_DESCRIPTION("DaVinci MDIO driver");