am35x.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * Texas Instruments AM35x "glue layer"
  3. *
  4. * Copyright (c) 2010, by Texas Instruments
  5. *
  6. * Based on the DA8xx "glue layer" code.
  7. * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This file is part of the Inventra Controller Driver for Linux.
  10. *
  11. * The Inventra Controller Driver for Linux is free software; you
  12. * can redistribute it and/or modify it under the terms of the GNU
  13. * General Public License version 2 as published by the Free Software
  14. * Foundation.
  15. *
  16. * The Inventra Controller Driver for Linux is distributed in
  17. * the hope that it will be useful, but WITHOUT ANY WARRANTY;
  18. * without even the implied warranty of MERCHANTABILITY or
  19. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
  20. * License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with The Inventra Controller Driver for Linux ; if not,
  24. * write to the Free Software Foundation, Inc., 59 Temple Place,
  25. * Suite 330, Boston, MA 02111-1307 USA
  26. *
  27. */
  28. #include <linux/init.h>
  29. #include <linux/module.h>
  30. #include <linux/clk.h>
  31. #include <linux/err.h>
  32. #include <linux/io.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/dma-mapping.h>
  35. #include <plat/usb.h>
  36. #include "musb_core.h"
  37. /*
  38. * AM35x specific definitions
  39. */
  40. /* USB 2.0 OTG module registers */
  41. #define USB_REVISION_REG 0x00
  42. #define USB_CTRL_REG 0x04
  43. #define USB_STAT_REG 0x08
  44. #define USB_EMULATION_REG 0x0c
  45. /* 0x10 Reserved */
  46. #define USB_AUTOREQ_REG 0x14
  47. #define USB_SRP_FIX_TIME_REG 0x18
  48. #define USB_TEARDOWN_REG 0x1c
  49. #define EP_INTR_SRC_REG 0x20
  50. #define EP_INTR_SRC_SET_REG 0x24
  51. #define EP_INTR_SRC_CLEAR_REG 0x28
  52. #define EP_INTR_MASK_REG 0x2c
  53. #define EP_INTR_MASK_SET_REG 0x30
  54. #define EP_INTR_MASK_CLEAR_REG 0x34
  55. #define EP_INTR_SRC_MASKED_REG 0x38
  56. #define CORE_INTR_SRC_REG 0x40
  57. #define CORE_INTR_SRC_SET_REG 0x44
  58. #define CORE_INTR_SRC_CLEAR_REG 0x48
  59. #define CORE_INTR_MASK_REG 0x4c
  60. #define CORE_INTR_MASK_SET_REG 0x50
  61. #define CORE_INTR_MASK_CLEAR_REG 0x54
  62. #define CORE_INTR_SRC_MASKED_REG 0x58
  63. /* 0x5c Reserved */
  64. #define USB_END_OF_INTR_REG 0x60
  65. /* Control register bits */
  66. #define AM35X_SOFT_RESET_MASK 1
  67. /* USB interrupt register bits */
  68. #define AM35X_INTR_USB_SHIFT 16
  69. #define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
  70. #define AM35X_INTR_DRVVBUS 0x100
  71. #define AM35X_INTR_RX_SHIFT 16
  72. #define AM35X_INTR_TX_SHIFT 0
  73. #define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
  74. #define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
  75. #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
  76. #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
  77. #define USB_MENTOR_CORE_OFFSET 0x400
  78. struct am35x_glue {
  79. struct device *dev;
  80. struct platform_device *musb;
  81. struct clk *phy_clk;
  82. struct clk *clk;
  83. };
  84. #define glue_to_musb(g) platform_get_drvdata(g->musb)
  85. /*
  86. * am35x_musb_enable - enable interrupts
  87. */
  88. static void am35x_musb_enable(struct musb *musb)
  89. {
  90. void __iomem *reg_base = musb->ctrl_base;
  91. u32 epmask;
  92. /* Workaround: setup IRQs through both register sets. */
  93. epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
  94. ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
  95. musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
  96. musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
  97. /* Force the DRVVBUS IRQ so we can start polling for ID change. */
  98. musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
  99. AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
  100. }
  101. /*
  102. * am35x_musb_disable - disable HDRC and flush interrupts
  103. */
  104. static void am35x_musb_disable(struct musb *musb)
  105. {
  106. void __iomem *reg_base = musb->ctrl_base;
  107. musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
  108. musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
  109. AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
  110. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  111. musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
  112. }
  113. #define portstate(stmt) stmt
  114. static void am35x_musb_set_vbus(struct musb *musb, int is_on)
  115. {
  116. WARN_ON(is_on && is_peripheral_active(musb));
  117. }
  118. #define POLL_SECONDS 2
  119. static struct timer_list otg_workaround;
  120. static void otg_timer(unsigned long _musb)
  121. {
  122. struct musb *musb = (void *)_musb;
  123. void __iomem *mregs = musb->mregs;
  124. u8 devctl;
  125. unsigned long flags;
  126. /*
  127. * We poll because AM35x's won't expose several OTG-critical
  128. * status change events (from the transceiver) otherwise.
  129. */
  130. devctl = musb_readb(mregs, MUSB_DEVCTL);
  131. dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
  132. otg_state_string(musb->xceiv->state));
  133. spin_lock_irqsave(&musb->lock, flags);
  134. switch (musb->xceiv->state) {
  135. case OTG_STATE_A_WAIT_BCON:
  136. devctl &= ~MUSB_DEVCTL_SESSION;
  137. musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
  138. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  139. if (devctl & MUSB_DEVCTL_BDEVICE) {
  140. musb->xceiv->state = OTG_STATE_B_IDLE;
  141. MUSB_DEV_MODE(musb);
  142. } else {
  143. musb->xceiv->state = OTG_STATE_A_IDLE;
  144. MUSB_HST_MODE(musb);
  145. }
  146. break;
  147. case OTG_STATE_A_WAIT_VFALL:
  148. musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
  149. musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
  150. MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
  151. break;
  152. case OTG_STATE_B_IDLE:
  153. devctl = musb_readb(mregs, MUSB_DEVCTL);
  154. if (devctl & MUSB_DEVCTL_BDEVICE)
  155. mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
  156. else
  157. musb->xceiv->state = OTG_STATE_A_IDLE;
  158. break;
  159. default:
  160. break;
  161. }
  162. spin_unlock_irqrestore(&musb->lock, flags);
  163. }
  164. static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
  165. {
  166. static unsigned long last_timer;
  167. if (timeout == 0)
  168. timeout = jiffies + msecs_to_jiffies(3);
  169. /* Never idle if active, or when VBUS timeout is not set as host */
  170. if (musb->is_active || (musb->a_wait_bcon == 0 &&
  171. musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
  172. dev_dbg(musb->controller, "%s active, deleting timer\n",
  173. otg_state_string(musb->xceiv->state));
  174. del_timer(&otg_workaround);
  175. last_timer = jiffies;
  176. return;
  177. }
  178. if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
  179. dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
  180. return;
  181. }
  182. last_timer = timeout;
  183. dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
  184. otg_state_string(musb->xceiv->state),
  185. jiffies_to_msecs(timeout - jiffies));
  186. mod_timer(&otg_workaround, timeout);
  187. }
  188. static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
  189. {
  190. struct musb *musb = hci;
  191. void __iomem *reg_base = musb->ctrl_base;
  192. struct device *dev = musb->controller;
  193. struct musb_hdrc_platform_data *plat = dev->platform_data;
  194. struct omap_musb_board_data *data = plat->board_data;
  195. struct usb_otg *otg = musb->xceiv->otg;
  196. unsigned long flags;
  197. irqreturn_t ret = IRQ_NONE;
  198. u32 epintr, usbintr;
  199. spin_lock_irqsave(&musb->lock, flags);
  200. /* Get endpoint interrupts */
  201. epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
  202. if (epintr) {
  203. musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
  204. musb->int_rx =
  205. (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
  206. musb->int_tx =
  207. (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
  208. }
  209. /* Get usb core interrupts */
  210. usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
  211. if (!usbintr && !epintr)
  212. goto eoi;
  213. if (usbintr) {
  214. musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
  215. musb->int_usb =
  216. (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
  217. }
  218. /*
  219. * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
  220. * AM35x's missing ID change IRQ. We need an ID change IRQ to
  221. * switch appropriately between halves of the OTG state machine.
  222. * Managing DEVCTL.SESSION per Mentor docs requires that we know its
  223. * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
  224. * Also, DRVVBUS pulses for SRP (but not at 5V) ...
  225. */
  226. if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
  227. int drvvbus = musb_readl(reg_base, USB_STAT_REG);
  228. void __iomem *mregs = musb->mregs;
  229. u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
  230. int err;
  231. err = musb->int_usb & MUSB_INTR_VBUSERROR;
  232. if (err) {
  233. /*
  234. * The Mentor core doesn't debounce VBUS as needed
  235. * to cope with device connect current spikes. This
  236. * means it's not uncommon for bus-powered devices
  237. * to get VBUS errors during enumeration.
  238. *
  239. * This is a workaround, but newer RTL from Mentor
  240. * seems to allow a better one: "re"-starting sessions
  241. * without waiting for VBUS to stop registering in
  242. * devctl.
  243. */
  244. musb->int_usb &= ~MUSB_INTR_VBUSERROR;
  245. musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
  246. mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
  247. WARNING("VBUS error workaround (delay coming)\n");
  248. } else if (drvvbus) {
  249. MUSB_HST_MODE(musb);
  250. otg->default_a = 1;
  251. musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
  252. portstate(musb->port1_status |= USB_PORT_STAT_POWER);
  253. del_timer(&otg_workaround);
  254. } else {
  255. musb->is_active = 0;
  256. MUSB_DEV_MODE(musb);
  257. otg->default_a = 0;
  258. musb->xceiv->state = OTG_STATE_B_IDLE;
  259. portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
  260. }
  261. /* NOTE: this must complete power-on within 100 ms. */
  262. dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
  263. drvvbus ? "on" : "off",
  264. otg_state_string(musb->xceiv->state),
  265. err ? " ERROR" : "",
  266. devctl);
  267. ret = IRQ_HANDLED;
  268. }
  269. if (musb->int_tx || musb->int_rx || musb->int_usb)
  270. ret |= musb_interrupt(musb);
  271. eoi:
  272. /* EOI needs to be written for the IRQ to be re-asserted. */
  273. if (ret == IRQ_HANDLED || epintr || usbintr) {
  274. /* clear level interrupt */
  275. if (data->clear_irq)
  276. data->clear_irq();
  277. /* write EOI */
  278. musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
  279. }
  280. /* Poll for ID change */
  281. if (musb->xceiv->state == OTG_STATE_B_IDLE)
  282. mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
  283. spin_unlock_irqrestore(&musb->lock, flags);
  284. return ret;
  285. }
  286. static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
  287. {
  288. struct device *dev = musb->controller;
  289. struct musb_hdrc_platform_data *plat = dev->platform_data;
  290. struct omap_musb_board_data *data = plat->board_data;
  291. int retval = 0;
  292. if (data->set_mode)
  293. data->set_mode(musb_mode);
  294. else
  295. retval = -EIO;
  296. return retval;
  297. }
  298. static int am35x_musb_init(struct musb *musb)
  299. {
  300. struct device *dev = musb->controller;
  301. struct musb_hdrc_platform_data *plat = dev->platform_data;
  302. struct omap_musb_board_data *data = plat->board_data;
  303. void __iomem *reg_base = musb->ctrl_base;
  304. u32 rev;
  305. musb->mregs += USB_MENTOR_CORE_OFFSET;
  306. /* Returns zero if e.g. not clocked */
  307. rev = musb_readl(reg_base, USB_REVISION_REG);
  308. if (!rev)
  309. return -ENODEV;
  310. usb_nop_xceiv_register();
  311. musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
  312. if (IS_ERR_OR_NULL(musb->xceiv))
  313. return -ENODEV;
  314. setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
  315. /* Reset the musb */
  316. if (data->reset)
  317. data->reset();
  318. /* Reset the controller */
  319. musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
  320. /* Start the on-chip PHY and its PLL. */
  321. if (data->set_phy_power)
  322. data->set_phy_power(1);
  323. msleep(5);
  324. musb->isr = am35x_musb_interrupt;
  325. /* clear level interrupt */
  326. if (data->clear_irq)
  327. data->clear_irq();
  328. return 0;
  329. }
  330. static int am35x_musb_exit(struct musb *musb)
  331. {
  332. struct device *dev = musb->controller;
  333. struct musb_hdrc_platform_data *plat = dev->platform_data;
  334. struct omap_musb_board_data *data = plat->board_data;
  335. del_timer_sync(&otg_workaround);
  336. /* Shutdown the on-chip PHY and its PLL. */
  337. if (data->set_phy_power)
  338. data->set_phy_power(0);
  339. usb_put_phy(musb->xceiv);
  340. usb_nop_xceiv_unregister();
  341. return 0;
  342. }
  343. /* AM35x supports only 32bit read operation */
  344. void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
  345. {
  346. void __iomem *fifo = hw_ep->fifo;
  347. u32 val;
  348. int i;
  349. /* Read for 32bit-aligned destination address */
  350. if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
  351. readsl(fifo, dst, len >> 2);
  352. dst += len & ~0x03;
  353. len &= 0x03;
  354. }
  355. /*
  356. * Now read the remaining 1 to 3 byte or complete length if
  357. * unaligned address.
  358. */
  359. if (len > 4) {
  360. for (i = 0; i < (len >> 2); i++) {
  361. *(u32 *) dst = musb_readl(fifo, 0);
  362. dst += 4;
  363. }
  364. len &= 0x03;
  365. }
  366. if (len > 0) {
  367. val = musb_readl(fifo, 0);
  368. memcpy(dst, &val, len);
  369. }
  370. }
  371. static const struct musb_platform_ops am35x_ops = {
  372. .init = am35x_musb_init,
  373. .exit = am35x_musb_exit,
  374. .enable = am35x_musb_enable,
  375. .disable = am35x_musb_disable,
  376. .set_mode = am35x_musb_set_mode,
  377. .try_idle = am35x_musb_try_idle,
  378. .set_vbus = am35x_musb_set_vbus,
  379. };
  380. static u64 am35x_dmamask = DMA_BIT_MASK(32);
  381. static int __devinit am35x_probe(struct platform_device *pdev)
  382. {
  383. struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
  384. struct platform_device *musb;
  385. struct am35x_glue *glue;
  386. struct clk *phy_clk;
  387. struct clk *clk;
  388. int ret = -ENOMEM;
  389. int musbid;
  390. glue = kzalloc(sizeof(*glue), GFP_KERNEL);
  391. if (!glue) {
  392. dev_err(&pdev->dev, "failed to allocate glue context\n");
  393. goto err0;
  394. }
  395. /* get the musb id */
  396. musbid = musb_get_id(&pdev->dev, GFP_KERNEL);
  397. if (musbid < 0) {
  398. dev_err(&pdev->dev, "failed to allocate musb id\n");
  399. ret = -ENOMEM;
  400. goto err1;
  401. }
  402. musb = platform_device_alloc("musb-hdrc", musbid);
  403. if (!musb) {
  404. dev_err(&pdev->dev, "failed to allocate musb device\n");
  405. goto err2;
  406. }
  407. phy_clk = clk_get(&pdev->dev, "fck");
  408. if (IS_ERR(phy_clk)) {
  409. dev_err(&pdev->dev, "failed to get PHY clock\n");
  410. ret = PTR_ERR(phy_clk);
  411. goto err3;
  412. }
  413. clk = clk_get(&pdev->dev, "ick");
  414. if (IS_ERR(clk)) {
  415. dev_err(&pdev->dev, "failed to get clock\n");
  416. ret = PTR_ERR(clk);
  417. goto err4;
  418. }
  419. ret = clk_enable(phy_clk);
  420. if (ret) {
  421. dev_err(&pdev->dev, "failed to enable PHY clock\n");
  422. goto err5;
  423. }
  424. ret = clk_enable(clk);
  425. if (ret) {
  426. dev_err(&pdev->dev, "failed to enable clock\n");
  427. goto err6;
  428. }
  429. musb->id = musbid;
  430. musb->dev.parent = &pdev->dev;
  431. musb->dev.dma_mask = &am35x_dmamask;
  432. musb->dev.coherent_dma_mask = am35x_dmamask;
  433. glue->dev = &pdev->dev;
  434. glue->musb = musb;
  435. glue->phy_clk = phy_clk;
  436. glue->clk = clk;
  437. pdata->platform_ops = &am35x_ops;
  438. platform_set_drvdata(pdev, glue);
  439. ret = platform_device_add_resources(musb, pdev->resource,
  440. pdev->num_resources);
  441. if (ret) {
  442. dev_err(&pdev->dev, "failed to add resources\n");
  443. goto err7;
  444. }
  445. ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
  446. if (ret) {
  447. dev_err(&pdev->dev, "failed to add platform_data\n");
  448. goto err7;
  449. }
  450. ret = platform_device_add(musb);
  451. if (ret) {
  452. dev_err(&pdev->dev, "failed to register musb device\n");
  453. goto err7;
  454. }
  455. return 0;
  456. err7:
  457. clk_disable(clk);
  458. err6:
  459. clk_disable(phy_clk);
  460. err5:
  461. clk_put(clk);
  462. err4:
  463. clk_put(phy_clk);
  464. err3:
  465. platform_device_put(musb);
  466. err2:
  467. musb_put_id(&pdev->dev, musbid);
  468. err1:
  469. kfree(glue);
  470. err0:
  471. return ret;
  472. }
  473. static int __devexit am35x_remove(struct platform_device *pdev)
  474. {
  475. struct am35x_glue *glue = platform_get_drvdata(pdev);
  476. musb_put_id(&pdev->dev, glue->musb->id);
  477. platform_device_del(glue->musb);
  478. platform_device_put(glue->musb);
  479. clk_disable(glue->clk);
  480. clk_disable(glue->phy_clk);
  481. clk_put(glue->clk);
  482. clk_put(glue->phy_clk);
  483. kfree(glue);
  484. return 0;
  485. }
  486. #ifdef CONFIG_PM
  487. static int am35x_suspend(struct device *dev)
  488. {
  489. struct am35x_glue *glue = dev_get_drvdata(dev);
  490. struct musb_hdrc_platform_data *plat = dev->platform_data;
  491. struct omap_musb_board_data *data = plat->board_data;
  492. /* Shutdown the on-chip PHY and its PLL. */
  493. if (data->set_phy_power)
  494. data->set_phy_power(0);
  495. clk_disable(glue->phy_clk);
  496. clk_disable(glue->clk);
  497. return 0;
  498. }
  499. static int am35x_resume(struct device *dev)
  500. {
  501. struct am35x_glue *glue = dev_get_drvdata(dev);
  502. struct musb_hdrc_platform_data *plat = dev->platform_data;
  503. struct omap_musb_board_data *data = plat->board_data;
  504. int ret;
  505. /* Start the on-chip PHY and its PLL. */
  506. if (data->set_phy_power)
  507. data->set_phy_power(1);
  508. ret = clk_enable(glue->phy_clk);
  509. if (ret) {
  510. dev_err(dev, "failed to enable PHY clock\n");
  511. return ret;
  512. }
  513. ret = clk_enable(glue->clk);
  514. if (ret) {
  515. dev_err(dev, "failed to enable clock\n");
  516. return ret;
  517. }
  518. return 0;
  519. }
  520. static struct dev_pm_ops am35x_pm_ops = {
  521. .suspend = am35x_suspend,
  522. .resume = am35x_resume,
  523. };
  524. #define DEV_PM_OPS &am35x_pm_ops
  525. #else
  526. #define DEV_PM_OPS NULL
  527. #endif
  528. static struct platform_driver am35x_driver = {
  529. .probe = am35x_probe,
  530. .remove = __devexit_p(am35x_remove),
  531. .driver = {
  532. .name = "musb-am35x",
  533. .pm = DEV_PM_OPS,
  534. },
  535. };
  536. MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
  537. MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
  538. MODULE_LICENSE("GPL v2");
  539. static int __init am35x_init(void)
  540. {
  541. return platform_driver_register(&am35x_driver);
  542. }
  543. module_init(am35x_init);
  544. static void __exit am35x_exit(void)
  545. {
  546. platform_driver_unregister(&am35x_driver);
  547. }
  548. module_exit(am35x_exit);