am35x.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * Texas Instruments AM35x "glue layer"
  3. *
  4. * Copyright (c) 2010, by Texas Instruments
  5. *
  6. * Based on the DA8xx "glue layer" code.
  7. * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This file is part of the Inventra Controller Driver for Linux.
  10. *
  11. * The Inventra Controller Driver for Linux is free software; you
  12. * can redistribute it and/or modify it under the terms of the GNU
  13. * General Public License version 2 as published by the Free Software
  14. * Foundation.
  15. *
  16. * The Inventra Controller Driver for Linux is distributed in
  17. * the hope that it will be useful, but WITHOUT ANY WARRANTY;
  18. * without even the implied warranty of MERCHANTABILITY or
  19. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
  20. * License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with The Inventra Controller Driver for Linux ; if not,
  24. * write to the Free Software Foundation, Inc., 59 Temple Place,
  25. * Suite 330, Boston, MA 02111-1307 USA
  26. *
  27. */
  28. #include <linux/init.h>
  29. #include <linux/module.h>
  30. #include <linux/clk.h>
  31. #include <linux/err.h>
  32. #include <linux/io.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/dma-mapping.h>
  35. #include <plat/usb.h>
  36. #include "musb_core.h"
  37. /*
  38. * AM35x specific definitions
  39. */
  40. /* USB 2.0 OTG module registers */
  41. #define USB_REVISION_REG 0x00
  42. #define USB_CTRL_REG 0x04
  43. #define USB_STAT_REG 0x08
  44. #define USB_EMULATION_REG 0x0c
  45. /* 0x10 Reserved */
  46. #define USB_AUTOREQ_REG 0x14
  47. #define USB_SRP_FIX_TIME_REG 0x18
  48. #define USB_TEARDOWN_REG 0x1c
  49. #define EP_INTR_SRC_REG 0x20
  50. #define EP_INTR_SRC_SET_REG 0x24
  51. #define EP_INTR_SRC_CLEAR_REG 0x28
  52. #define EP_INTR_MASK_REG 0x2c
  53. #define EP_INTR_MASK_SET_REG 0x30
  54. #define EP_INTR_MASK_CLEAR_REG 0x34
  55. #define EP_INTR_SRC_MASKED_REG 0x38
  56. #define CORE_INTR_SRC_REG 0x40
  57. #define CORE_INTR_SRC_SET_REG 0x44
  58. #define CORE_INTR_SRC_CLEAR_REG 0x48
  59. #define CORE_INTR_MASK_REG 0x4c
  60. #define CORE_INTR_MASK_SET_REG 0x50
  61. #define CORE_INTR_MASK_CLEAR_REG 0x54
  62. #define CORE_INTR_SRC_MASKED_REG 0x58
  63. /* 0x5c Reserved */
  64. #define USB_END_OF_INTR_REG 0x60
  65. /* Control register bits */
  66. #define AM35X_SOFT_RESET_MASK 1
  67. /* USB interrupt register bits */
  68. #define AM35X_INTR_USB_SHIFT 16
  69. #define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
  70. #define AM35X_INTR_DRVVBUS 0x100
  71. #define AM35X_INTR_RX_SHIFT 16
  72. #define AM35X_INTR_TX_SHIFT 0
  73. #define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
  74. #define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
  75. #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
  76. #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
  77. #define USB_MENTOR_CORE_OFFSET 0x400
  78. struct am35x_glue {
  79. struct device *dev;
  80. struct platform_device *musb;
  81. struct clk *phy_clk;
  82. struct clk *clk;
  83. };
  84. #define glue_to_musb(g) platform_get_drvdata(g->musb)
  85. /*
  86. * am35x_musb_enable - enable interrupts
  87. */
  88. static void am35x_musb_enable(struct musb *musb)
  89. {
  90. void __iomem *reg_base = musb->ctrl_base;
  91. u32 epmask;
  92. /* Workaround: setup IRQs through both register sets. */
  93. epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
  94. ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
  95. musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
  96. musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
  97. /* Force the DRVVBUS IRQ so we can start polling for ID change. */
  98. if (is_otg_enabled(musb))
  99. musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
  100. AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
  101. }
  102. /*
  103. * am35x_musb_disable - disable HDRC and flush interrupts
  104. */
  105. static void am35x_musb_disable(struct musb *musb)
  106. {
  107. void __iomem *reg_base = musb->ctrl_base;
  108. musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
  109. musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
  110. AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
  111. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  112. musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
  113. }
  114. #define portstate(stmt) stmt
  115. static void am35x_musb_set_vbus(struct musb *musb, int is_on)
  116. {
  117. WARN_ON(is_on && is_peripheral_active(musb));
  118. }
  119. #define POLL_SECONDS 2
  120. static struct timer_list otg_workaround;
  121. static void otg_timer(unsigned long _musb)
  122. {
  123. struct musb *musb = (void *)_musb;
  124. void __iomem *mregs = musb->mregs;
  125. u8 devctl;
  126. unsigned long flags;
  127. /*
  128. * We poll because AM35x's won't expose several OTG-critical
  129. * status change events (from the transceiver) otherwise.
  130. */
  131. devctl = musb_readb(mregs, MUSB_DEVCTL);
  132. dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
  133. otg_state_string(musb->xceiv->state));
  134. spin_lock_irqsave(&musb->lock, flags);
  135. switch (musb->xceiv->state) {
  136. case OTG_STATE_A_WAIT_BCON:
  137. devctl &= ~MUSB_DEVCTL_SESSION;
  138. musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
  139. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  140. if (devctl & MUSB_DEVCTL_BDEVICE) {
  141. musb->xceiv->state = OTG_STATE_B_IDLE;
  142. MUSB_DEV_MODE(musb);
  143. } else {
  144. musb->xceiv->state = OTG_STATE_A_IDLE;
  145. MUSB_HST_MODE(musb);
  146. }
  147. break;
  148. case OTG_STATE_A_WAIT_VFALL:
  149. musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
  150. musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
  151. MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
  152. break;
  153. case OTG_STATE_B_IDLE:
  154. if (!is_peripheral_enabled(musb))
  155. break;
  156. devctl = musb_readb(mregs, MUSB_DEVCTL);
  157. if (devctl & MUSB_DEVCTL_BDEVICE)
  158. mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
  159. else
  160. musb->xceiv->state = OTG_STATE_A_IDLE;
  161. break;
  162. default:
  163. break;
  164. }
  165. spin_unlock_irqrestore(&musb->lock, flags);
  166. }
  167. static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
  168. {
  169. static unsigned long last_timer;
  170. if (!is_otg_enabled(musb))
  171. return;
  172. if (timeout == 0)
  173. timeout = jiffies + msecs_to_jiffies(3);
  174. /* Never idle if active, or when VBUS timeout is not set as host */
  175. if (musb->is_active || (musb->a_wait_bcon == 0 &&
  176. musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
  177. dev_dbg(musb->controller, "%s active, deleting timer\n",
  178. otg_state_string(musb->xceiv->state));
  179. del_timer(&otg_workaround);
  180. last_timer = jiffies;
  181. return;
  182. }
  183. if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
  184. dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
  185. return;
  186. }
  187. last_timer = timeout;
  188. dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
  189. otg_state_string(musb->xceiv->state),
  190. jiffies_to_msecs(timeout - jiffies));
  191. mod_timer(&otg_workaround, timeout);
  192. }
  193. static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
  194. {
  195. struct musb *musb = hci;
  196. void __iomem *reg_base = musb->ctrl_base;
  197. struct device *dev = musb->controller;
  198. struct musb_hdrc_platform_data *plat = dev->platform_data;
  199. struct omap_musb_board_data *data = plat->board_data;
  200. struct usb_otg *otg = musb->xceiv->otg;
  201. unsigned long flags;
  202. irqreturn_t ret = IRQ_NONE;
  203. u32 epintr, usbintr;
  204. spin_lock_irqsave(&musb->lock, flags);
  205. /* Get endpoint interrupts */
  206. epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
  207. if (epintr) {
  208. musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
  209. musb->int_rx =
  210. (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
  211. musb->int_tx =
  212. (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
  213. }
  214. /* Get usb core interrupts */
  215. usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
  216. if (!usbintr && !epintr)
  217. goto eoi;
  218. if (usbintr) {
  219. musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
  220. musb->int_usb =
  221. (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
  222. }
  223. /*
  224. * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
  225. * AM35x's missing ID change IRQ. We need an ID change IRQ to
  226. * switch appropriately between halves of the OTG state machine.
  227. * Managing DEVCTL.SESSION per Mentor docs requires that we know its
  228. * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
  229. * Also, DRVVBUS pulses for SRP (but not at 5V) ...
  230. */
  231. if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
  232. int drvvbus = musb_readl(reg_base, USB_STAT_REG);
  233. void __iomem *mregs = musb->mregs;
  234. u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
  235. int err;
  236. err = is_host_enabled(musb) && (musb->int_usb &
  237. MUSB_INTR_VBUSERROR);
  238. if (err) {
  239. /*
  240. * The Mentor core doesn't debounce VBUS as needed
  241. * to cope with device connect current spikes. This
  242. * means it's not uncommon for bus-powered devices
  243. * to get VBUS errors during enumeration.
  244. *
  245. * This is a workaround, but newer RTL from Mentor
  246. * seems to allow a better one: "re"-starting sessions
  247. * without waiting for VBUS to stop registering in
  248. * devctl.
  249. */
  250. musb->int_usb &= ~MUSB_INTR_VBUSERROR;
  251. musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
  252. mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
  253. WARNING("VBUS error workaround (delay coming)\n");
  254. } else if (is_host_enabled(musb) && drvvbus) {
  255. MUSB_HST_MODE(musb);
  256. otg->default_a = 1;
  257. musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
  258. portstate(musb->port1_status |= USB_PORT_STAT_POWER);
  259. del_timer(&otg_workaround);
  260. } else {
  261. musb->is_active = 0;
  262. MUSB_DEV_MODE(musb);
  263. otg->default_a = 0;
  264. musb->xceiv->state = OTG_STATE_B_IDLE;
  265. portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
  266. }
  267. /* NOTE: this must complete power-on within 100 ms. */
  268. dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
  269. drvvbus ? "on" : "off",
  270. otg_state_string(musb->xceiv->state),
  271. err ? " ERROR" : "",
  272. devctl);
  273. ret = IRQ_HANDLED;
  274. }
  275. if (musb->int_tx || musb->int_rx || musb->int_usb)
  276. ret |= musb_interrupt(musb);
  277. eoi:
  278. /* EOI needs to be written for the IRQ to be re-asserted. */
  279. if (ret == IRQ_HANDLED || epintr || usbintr) {
  280. /* clear level interrupt */
  281. if (data->clear_irq)
  282. data->clear_irq();
  283. /* write EOI */
  284. musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
  285. }
  286. /* Poll for ID change */
  287. if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE)
  288. mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
  289. spin_unlock_irqrestore(&musb->lock, flags);
  290. return ret;
  291. }
  292. static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
  293. {
  294. struct device *dev = musb->controller;
  295. struct musb_hdrc_platform_data *plat = dev->platform_data;
  296. struct omap_musb_board_data *data = plat->board_data;
  297. int retval = 0;
  298. if (data->set_mode)
  299. data->set_mode(musb_mode);
  300. else
  301. retval = -EIO;
  302. return retval;
  303. }
  304. static int am35x_musb_init(struct musb *musb)
  305. {
  306. struct device *dev = musb->controller;
  307. struct musb_hdrc_platform_data *plat = dev->platform_data;
  308. struct omap_musb_board_data *data = plat->board_data;
  309. void __iomem *reg_base = musb->ctrl_base;
  310. u32 rev;
  311. musb->mregs += USB_MENTOR_CORE_OFFSET;
  312. /* Returns zero if e.g. not clocked */
  313. rev = musb_readl(reg_base, USB_REVISION_REG);
  314. if (!rev)
  315. return -ENODEV;
  316. usb_nop_xceiv_register();
  317. musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
  318. if (IS_ERR_OR_NULL(musb->xceiv))
  319. return -ENODEV;
  320. if (is_host_enabled(musb))
  321. setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
  322. /* Reset the musb */
  323. if (data->reset)
  324. data->reset();
  325. /* Reset the controller */
  326. musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
  327. /* Start the on-chip PHY and its PLL. */
  328. if (data->set_phy_power)
  329. data->set_phy_power(1);
  330. msleep(5);
  331. musb->isr = am35x_musb_interrupt;
  332. /* clear level interrupt */
  333. if (data->clear_irq)
  334. data->clear_irq();
  335. return 0;
  336. }
  337. static int am35x_musb_exit(struct musb *musb)
  338. {
  339. struct device *dev = musb->controller;
  340. struct musb_hdrc_platform_data *plat = dev->platform_data;
  341. struct omap_musb_board_data *data = plat->board_data;
  342. if (is_host_enabled(musb))
  343. del_timer_sync(&otg_workaround);
  344. /* Shutdown the on-chip PHY and its PLL. */
  345. if (data->set_phy_power)
  346. data->set_phy_power(0);
  347. usb_put_phy(musb->xceiv);
  348. usb_nop_xceiv_unregister();
  349. return 0;
  350. }
  351. /* AM35x supports only 32bit read operation */
  352. void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
  353. {
  354. void __iomem *fifo = hw_ep->fifo;
  355. u32 val;
  356. int i;
  357. /* Read for 32bit-aligned destination address */
  358. if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
  359. readsl(fifo, dst, len >> 2);
  360. dst += len & ~0x03;
  361. len &= 0x03;
  362. }
  363. /*
  364. * Now read the remaining 1 to 3 byte or complete length if
  365. * unaligned address.
  366. */
  367. if (len > 4) {
  368. for (i = 0; i < (len >> 2); i++) {
  369. *(u32 *) dst = musb_readl(fifo, 0);
  370. dst += 4;
  371. }
  372. len &= 0x03;
  373. }
  374. if (len > 0) {
  375. val = musb_readl(fifo, 0);
  376. memcpy(dst, &val, len);
  377. }
  378. }
  379. static const struct musb_platform_ops am35x_ops = {
  380. .init = am35x_musb_init,
  381. .exit = am35x_musb_exit,
  382. .enable = am35x_musb_enable,
  383. .disable = am35x_musb_disable,
  384. .set_mode = am35x_musb_set_mode,
  385. .try_idle = am35x_musb_try_idle,
  386. .set_vbus = am35x_musb_set_vbus,
  387. };
  388. static u64 am35x_dmamask = DMA_BIT_MASK(32);
  389. static int __devinit am35x_probe(struct platform_device *pdev)
  390. {
  391. struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
  392. struct platform_device *musb;
  393. struct am35x_glue *glue;
  394. struct clk *phy_clk;
  395. struct clk *clk;
  396. int ret = -ENOMEM;
  397. glue = kzalloc(sizeof(*glue), GFP_KERNEL);
  398. if (!glue) {
  399. dev_err(&pdev->dev, "failed to allocate glue context\n");
  400. goto err0;
  401. }
  402. musb = platform_device_alloc("musb-hdrc", -1);
  403. if (!musb) {
  404. dev_err(&pdev->dev, "failed to allocate musb device\n");
  405. goto err1;
  406. }
  407. phy_clk = clk_get(&pdev->dev, "fck");
  408. if (IS_ERR(phy_clk)) {
  409. dev_err(&pdev->dev, "failed to get PHY clock\n");
  410. ret = PTR_ERR(phy_clk);
  411. goto err2;
  412. }
  413. clk = clk_get(&pdev->dev, "ick");
  414. if (IS_ERR(clk)) {
  415. dev_err(&pdev->dev, "failed to get clock\n");
  416. ret = PTR_ERR(clk);
  417. goto err3;
  418. }
  419. ret = clk_enable(phy_clk);
  420. if (ret) {
  421. dev_err(&pdev->dev, "failed to enable PHY clock\n");
  422. goto err4;
  423. }
  424. ret = clk_enable(clk);
  425. if (ret) {
  426. dev_err(&pdev->dev, "failed to enable clock\n");
  427. goto err5;
  428. }
  429. musb->dev.parent = &pdev->dev;
  430. musb->dev.dma_mask = &am35x_dmamask;
  431. musb->dev.coherent_dma_mask = am35x_dmamask;
  432. glue->dev = &pdev->dev;
  433. glue->musb = musb;
  434. glue->phy_clk = phy_clk;
  435. glue->clk = clk;
  436. pdata->platform_ops = &am35x_ops;
  437. platform_set_drvdata(pdev, glue);
  438. ret = platform_device_add_resources(musb, pdev->resource,
  439. pdev->num_resources);
  440. if (ret) {
  441. dev_err(&pdev->dev, "failed to add resources\n");
  442. goto err6;
  443. }
  444. ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
  445. if (ret) {
  446. dev_err(&pdev->dev, "failed to add platform_data\n");
  447. goto err6;
  448. }
  449. ret = platform_device_add(musb);
  450. if (ret) {
  451. dev_err(&pdev->dev, "failed to register musb device\n");
  452. goto err6;
  453. }
  454. return 0;
  455. err6:
  456. clk_disable(clk);
  457. err5:
  458. clk_disable(phy_clk);
  459. err4:
  460. clk_put(clk);
  461. err3:
  462. clk_put(phy_clk);
  463. err2:
  464. platform_device_put(musb);
  465. err1:
  466. kfree(glue);
  467. err0:
  468. return ret;
  469. }
  470. static int __devexit am35x_remove(struct platform_device *pdev)
  471. {
  472. struct am35x_glue *glue = platform_get_drvdata(pdev);
  473. platform_device_del(glue->musb);
  474. platform_device_put(glue->musb);
  475. clk_disable(glue->clk);
  476. clk_disable(glue->phy_clk);
  477. clk_put(glue->clk);
  478. clk_put(glue->phy_clk);
  479. kfree(glue);
  480. return 0;
  481. }
  482. #ifdef CONFIG_PM
  483. static int am35x_suspend(struct device *dev)
  484. {
  485. struct am35x_glue *glue = dev_get_drvdata(dev);
  486. struct musb_hdrc_platform_data *plat = dev->platform_data;
  487. struct omap_musb_board_data *data = plat->board_data;
  488. /* Shutdown the on-chip PHY and its PLL. */
  489. if (data->set_phy_power)
  490. data->set_phy_power(0);
  491. clk_disable(glue->phy_clk);
  492. clk_disable(glue->clk);
  493. return 0;
  494. }
  495. static int am35x_resume(struct device *dev)
  496. {
  497. struct am35x_glue *glue = dev_get_drvdata(dev);
  498. struct musb_hdrc_platform_data *plat = dev->platform_data;
  499. struct omap_musb_board_data *data = plat->board_data;
  500. int ret;
  501. /* Start the on-chip PHY and its PLL. */
  502. if (data->set_phy_power)
  503. data->set_phy_power(1);
  504. ret = clk_enable(glue->phy_clk);
  505. if (ret) {
  506. dev_err(dev, "failed to enable PHY clock\n");
  507. return ret;
  508. }
  509. ret = clk_enable(glue->clk);
  510. if (ret) {
  511. dev_err(dev, "failed to enable clock\n");
  512. return ret;
  513. }
  514. return 0;
  515. }
  516. static struct dev_pm_ops am35x_pm_ops = {
  517. .suspend = am35x_suspend,
  518. .resume = am35x_resume,
  519. };
  520. #define DEV_PM_OPS &am35x_pm_ops
  521. #else
  522. #define DEV_PM_OPS NULL
  523. #endif
  524. static struct platform_driver am35x_driver = {
  525. .probe = am35x_probe,
  526. .remove = __devexit_p(am35x_remove),
  527. .driver = {
  528. .name = "musb-am35x",
  529. .pm = DEV_PM_OPS,
  530. },
  531. };
  532. MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
  533. MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
  534. MODULE_LICENSE("GPL v2");
  535. static int __init am35x_init(void)
  536. {
  537. return platform_driver_register(&am35x_driver);
  538. }
  539. module_init(am35x_init);
  540. static void __exit am35x_exit(void)
  541. {
  542. platform_driver_unregister(&am35x_driver);
  543. }
  544. module_exit(am35x_exit);