mv_udc_core.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494
  1. /*
  2. * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
  3. * Author: Chao Xie <chao.xie@marvell.com>
  4. * Neil Zhang <zhangwm@marvell.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/kernel.h>
  16. #include <linux/delay.h>
  17. #include <linux/ioport.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/timer.h>
  23. #include <linux/list.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/device.h>
  27. #include <linux/usb/ch9.h>
  28. #include <linux/usb/gadget.h>
  29. #include <linux/usb/otg.h>
  30. #include <linux/pm.h>
  31. #include <linux/io.h>
  32. #include <linux/irq.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/clk.h>
  35. #include <linux/platform_data/mv_usb.h>
  36. #include <asm/system.h>
  37. #include <asm/unaligned.h>
  38. #include "mv_udc.h"
  39. #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
  40. #define DRIVER_VERSION "8 Nov 2010"
  41. #define ep_dir(ep) (((ep)->ep_num == 0) ? \
  42. ((ep)->udc->ep0_dir) : ((ep)->direction))
  43. /* timeout value -- usec */
  44. #define RESET_TIMEOUT 10000
  45. #define FLUSH_TIMEOUT 10000
  46. #define EPSTATUS_TIMEOUT 10000
  47. #define PRIME_TIMEOUT 10000
  48. #define READSAFE_TIMEOUT 1000
  49. #define DTD_TIMEOUT 1000
  50. #define LOOPS_USEC_SHIFT 4
  51. #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
  52. #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
  53. static DECLARE_COMPLETION(release_done);
  54. static const char driver_name[] = "mv_udc";
  55. static const char driver_desc[] = DRIVER_DESC;
  56. /* controller device global variable */
  57. static struct mv_udc *the_controller;
  58. int mv_usb_otgsc;
  59. static void nuke(struct mv_ep *ep, int status);
  60. static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
  61. /* for endpoint 0 operations */
  62. static const struct usb_endpoint_descriptor mv_ep0_desc = {
  63. .bLength = USB_DT_ENDPOINT_SIZE,
  64. .bDescriptorType = USB_DT_ENDPOINT,
  65. .bEndpointAddress = 0,
  66. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  67. .wMaxPacketSize = EP0_MAX_PKT_SIZE,
  68. };
  69. static void ep0_reset(struct mv_udc *udc)
  70. {
  71. struct mv_ep *ep;
  72. u32 epctrlx;
  73. int i = 0;
  74. /* ep0 in and out */
  75. for (i = 0; i < 2; i++) {
  76. ep = &udc->eps[i];
  77. ep->udc = udc;
  78. /* ep0 dQH */
  79. ep->dqh = &udc->ep_dqh[i];
  80. /* configure ep0 endpoint capabilities in dQH */
  81. ep->dqh->max_packet_length =
  82. (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  83. | EP_QUEUE_HEAD_IOS;
  84. ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
  85. epctrlx = readl(&udc->op_regs->epctrlx[0]);
  86. if (i) { /* TX */
  87. epctrlx |= EPCTRL_TX_ENABLE
  88. | (USB_ENDPOINT_XFER_CONTROL
  89. << EPCTRL_TX_EP_TYPE_SHIFT);
  90. } else { /* RX */
  91. epctrlx |= EPCTRL_RX_ENABLE
  92. | (USB_ENDPOINT_XFER_CONTROL
  93. << EPCTRL_RX_EP_TYPE_SHIFT);
  94. }
  95. writel(epctrlx, &udc->op_regs->epctrlx[0]);
  96. }
  97. }
  98. /* protocol ep0 stall, will automatically be cleared on new transaction */
  99. static void ep0_stall(struct mv_udc *udc)
  100. {
  101. u32 epctrlx;
  102. /* set TX and RX to stall */
  103. epctrlx = readl(&udc->op_regs->epctrlx[0]);
  104. epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
  105. writel(epctrlx, &udc->op_regs->epctrlx[0]);
  106. /* update ep0 state */
  107. udc->ep0_state = WAIT_FOR_SETUP;
  108. udc->ep0_dir = EP_DIR_OUT;
  109. }
  110. static int process_ep_req(struct mv_udc *udc, int index,
  111. struct mv_req *curr_req)
  112. {
  113. struct mv_dtd *curr_dtd;
  114. struct mv_dqh *curr_dqh;
  115. int td_complete, actual, remaining_length;
  116. int i, direction;
  117. int retval = 0;
  118. u32 errors;
  119. u32 bit_pos;
  120. curr_dqh = &udc->ep_dqh[index];
  121. direction = index % 2;
  122. curr_dtd = curr_req->head;
  123. td_complete = 0;
  124. actual = curr_req->req.length;
  125. for (i = 0; i < curr_req->dtd_count; i++) {
  126. if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
  127. dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
  128. udc->eps[index].name);
  129. return 1;
  130. }
  131. errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
  132. if (!errors) {
  133. remaining_length =
  134. (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
  135. >> DTD_LENGTH_BIT_POS;
  136. actual -= remaining_length;
  137. if (remaining_length) {
  138. if (direction) {
  139. dev_dbg(&udc->dev->dev,
  140. "TX dTD remains data\n");
  141. retval = -EPROTO;
  142. break;
  143. } else
  144. break;
  145. }
  146. } else {
  147. dev_info(&udc->dev->dev,
  148. "complete_tr error: ep=%d %s: error = 0x%x\n",
  149. index >> 1, direction ? "SEND" : "RECV",
  150. errors);
  151. if (errors & DTD_STATUS_HALTED) {
  152. /* Clear the errors and Halt condition */
  153. curr_dqh->size_ioc_int_sts &= ~errors;
  154. retval = -EPIPE;
  155. } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
  156. retval = -EPROTO;
  157. } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
  158. retval = -EILSEQ;
  159. }
  160. }
  161. if (i != curr_req->dtd_count - 1)
  162. curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
  163. }
  164. if (retval)
  165. return retval;
  166. if (direction == EP_DIR_OUT)
  167. bit_pos = 1 << curr_req->ep->ep_num;
  168. else
  169. bit_pos = 1 << (16 + curr_req->ep->ep_num);
  170. while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
  171. if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
  172. while (readl(&udc->op_regs->epstatus) & bit_pos)
  173. udelay(1);
  174. break;
  175. }
  176. udelay(1);
  177. }
  178. curr_req->req.actual = actual;
  179. return 0;
  180. }
  181. /*
  182. * done() - retire a request; caller blocked irqs
  183. * @status : request status to be set, only works when
  184. * request is still in progress.
  185. */
  186. static void done(struct mv_ep *ep, struct mv_req *req, int status)
  187. {
  188. struct mv_udc *udc = NULL;
  189. unsigned char stopped = ep->stopped;
  190. struct mv_dtd *curr_td, *next_td;
  191. int j;
  192. udc = (struct mv_udc *)ep->udc;
  193. /* Removed the req from fsl_ep->queue */
  194. list_del_init(&req->queue);
  195. /* req.status should be set as -EINPROGRESS in ep_queue() */
  196. if (req->req.status == -EINPROGRESS)
  197. req->req.status = status;
  198. else
  199. status = req->req.status;
  200. /* Free dtd for the request */
  201. next_td = req->head;
  202. for (j = 0; j < req->dtd_count; j++) {
  203. curr_td = next_td;
  204. if (j != req->dtd_count - 1)
  205. next_td = curr_td->next_dtd_virt;
  206. dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
  207. }
  208. if (req->mapped) {
  209. dma_unmap_single(ep->udc->gadget.dev.parent,
  210. req->req.dma, req->req.length,
  211. ((ep_dir(ep) == EP_DIR_IN) ?
  212. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  213. req->req.dma = DMA_ADDR_INVALID;
  214. req->mapped = 0;
  215. } else
  216. dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
  217. req->req.dma, req->req.length,
  218. ((ep_dir(ep) == EP_DIR_IN) ?
  219. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  220. if (status && (status != -ESHUTDOWN))
  221. dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
  222. ep->ep.name, &req->req, status,
  223. req->req.actual, req->req.length);
  224. ep->stopped = 1;
  225. spin_unlock(&ep->udc->lock);
  226. /*
  227. * complete() is from gadget layer,
  228. * eg fsg->bulk_in_complete()
  229. */
  230. if (req->req.complete)
  231. req->req.complete(&ep->ep, &req->req);
  232. spin_lock(&ep->udc->lock);
  233. ep->stopped = stopped;
  234. }
  235. static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
  236. {
  237. struct mv_udc *udc;
  238. struct mv_dqh *dqh;
  239. u32 bit_pos, direction;
  240. u32 usbcmd, epstatus;
  241. unsigned int loops;
  242. int retval = 0;
  243. udc = ep->udc;
  244. direction = ep_dir(ep);
  245. dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
  246. bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
  247. /* check if the pipe is empty */
  248. if (!(list_empty(&ep->queue))) {
  249. struct mv_req *lastreq;
  250. lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
  251. lastreq->tail->dtd_next =
  252. req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  253. wmb();
  254. if (readl(&udc->op_regs->epprime) & bit_pos)
  255. goto done;
  256. loops = LOOPS(READSAFE_TIMEOUT);
  257. while (1) {
  258. /* start with setting the semaphores */
  259. usbcmd = readl(&udc->op_regs->usbcmd);
  260. usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
  261. writel(usbcmd, &udc->op_regs->usbcmd);
  262. /* read the endpoint status */
  263. epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
  264. /*
  265. * Reread the ATDTW semaphore bit to check if it is
  266. * cleared. When hardware see a hazard, it will clear
  267. * the bit or else we remain set to 1 and we can
  268. * proceed with priming of endpoint if not already
  269. * primed.
  270. */
  271. if (readl(&udc->op_regs->usbcmd)
  272. & USBCMD_ATDTW_TRIPWIRE_SET)
  273. break;
  274. loops--;
  275. if (loops == 0) {
  276. dev_err(&udc->dev->dev,
  277. "Timeout for ATDTW_TRIPWIRE...\n");
  278. retval = -ETIME;
  279. goto done;
  280. }
  281. udelay(LOOPS_USEC);
  282. }
  283. /* Clear the semaphore */
  284. usbcmd = readl(&udc->op_regs->usbcmd);
  285. usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
  286. writel(usbcmd, &udc->op_regs->usbcmd);
  287. if (epstatus)
  288. goto done;
  289. }
  290. /* Write dQH next pointer and terminate bit to 0 */
  291. dqh->next_dtd_ptr = req->head->td_dma
  292. & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  293. /* clear active and halt bit, in case set from a previous error */
  294. dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
  295. /* Ensure that updates to the QH will occure before priming. */
  296. wmb();
  297. /* Prime the Endpoint */
  298. writel(bit_pos, &udc->op_regs->epprime);
  299. done:
  300. return retval;
  301. }
  302. static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
  303. dma_addr_t *dma, int *is_last)
  304. {
  305. u32 temp;
  306. struct mv_dtd *dtd;
  307. struct mv_udc *udc;
  308. /* how big will this transfer be? */
  309. *length = min(req->req.length - req->req.actual,
  310. (unsigned)EP_MAX_LENGTH_TRANSFER);
  311. udc = req->ep->udc;
  312. /*
  313. * Be careful that no _GFP_HIGHMEM is set,
  314. * or we can not use dma_to_virt
  315. */
  316. dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
  317. if (dtd == NULL)
  318. return dtd;
  319. dtd->td_dma = *dma;
  320. /* initialize buffer page pointers */
  321. temp = (u32)(req->req.dma + req->req.actual);
  322. dtd->buff_ptr0 = cpu_to_le32(temp);
  323. temp &= ~0xFFF;
  324. dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
  325. dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
  326. dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
  327. dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
  328. req->req.actual += *length;
  329. /* zlp is needed if req->req.zero is set */
  330. if (req->req.zero) {
  331. if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
  332. *is_last = 1;
  333. else
  334. *is_last = 0;
  335. } else if (req->req.length == req->req.actual)
  336. *is_last = 1;
  337. else
  338. *is_last = 0;
  339. /* Fill in the transfer size; set active bit */
  340. temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
  341. /* Enable interrupt for the last dtd of a request */
  342. if (*is_last && !req->req.no_interrupt)
  343. temp |= DTD_IOC;
  344. dtd->size_ioc_sts = temp;
  345. mb();
  346. return dtd;
  347. }
  348. /* generate dTD linked list for a request */
  349. static int req_to_dtd(struct mv_req *req)
  350. {
  351. unsigned count;
  352. int is_last, is_first = 1;
  353. struct mv_dtd *dtd, *last_dtd = NULL;
  354. struct mv_udc *udc;
  355. dma_addr_t dma;
  356. udc = req->ep->udc;
  357. do {
  358. dtd = build_dtd(req, &count, &dma, &is_last);
  359. if (dtd == NULL)
  360. return -ENOMEM;
  361. if (is_first) {
  362. is_first = 0;
  363. req->head = dtd;
  364. } else {
  365. last_dtd->dtd_next = dma;
  366. last_dtd->next_dtd_virt = dtd;
  367. }
  368. last_dtd = dtd;
  369. req->dtd_count++;
  370. } while (!is_last);
  371. /* set terminate bit to 1 for the last dTD */
  372. dtd->dtd_next = DTD_NEXT_TERMINATE;
  373. req->tail = dtd;
  374. return 0;
  375. }
  376. static int mv_ep_enable(struct usb_ep *_ep,
  377. const struct usb_endpoint_descriptor *desc)
  378. {
  379. struct mv_udc *udc;
  380. struct mv_ep *ep;
  381. struct mv_dqh *dqh;
  382. u16 max = 0;
  383. u32 bit_pos, epctrlx, direction;
  384. unsigned char zlt = 0, ios = 0, mult = 0;
  385. unsigned long flags;
  386. ep = container_of(_ep, struct mv_ep, ep);
  387. udc = ep->udc;
  388. if (!_ep || !desc || ep->desc
  389. || desc->bDescriptorType != USB_DT_ENDPOINT)
  390. return -EINVAL;
  391. if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
  392. return -ESHUTDOWN;
  393. direction = ep_dir(ep);
  394. max = usb_endpoint_maxp(desc);
  395. /*
  396. * disable HW zero length termination select
  397. * driver handles zero length packet through req->req.zero
  398. */
  399. zlt = 1;
  400. bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
  401. /* Check if the Endpoint is Primed */
  402. if ((readl(&udc->op_regs->epprime) & bit_pos)
  403. || (readl(&udc->op_regs->epstatus) & bit_pos)) {
  404. dev_info(&udc->dev->dev,
  405. "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
  406. " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
  407. (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
  408. (unsigned)readl(&udc->op_regs->epprime),
  409. (unsigned)readl(&udc->op_regs->epstatus),
  410. (unsigned)bit_pos);
  411. goto en_done;
  412. }
  413. /* Set the max packet length, interrupt on Setup and Mult fields */
  414. switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
  415. case USB_ENDPOINT_XFER_BULK:
  416. zlt = 1;
  417. mult = 0;
  418. break;
  419. case USB_ENDPOINT_XFER_CONTROL:
  420. ios = 1;
  421. case USB_ENDPOINT_XFER_INT:
  422. mult = 0;
  423. break;
  424. case USB_ENDPOINT_XFER_ISOC:
  425. /* Calculate transactions needed for high bandwidth iso */
  426. mult = (unsigned char)(1 + ((max >> 11) & 0x03));
  427. max = max & 0x7ff; /* bit 0~10 */
  428. /* 3 transactions at most */
  429. if (mult > 3)
  430. goto en_done;
  431. break;
  432. default:
  433. goto en_done;
  434. }
  435. spin_lock_irqsave(&udc->lock, flags);
  436. /* Get the endpoint queue head address */
  437. dqh = ep->dqh;
  438. dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  439. | (mult << EP_QUEUE_HEAD_MULT_POS)
  440. | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
  441. | (ios ? EP_QUEUE_HEAD_IOS : 0);
  442. dqh->next_dtd_ptr = 1;
  443. dqh->size_ioc_int_sts = 0;
  444. ep->ep.maxpacket = max;
  445. ep->desc = desc;
  446. ep->stopped = 0;
  447. /* Enable the endpoint for Rx or Tx and set the endpoint type */
  448. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  449. if (direction == EP_DIR_IN) {
  450. epctrlx &= ~EPCTRL_TX_ALL_MASK;
  451. epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
  452. | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  453. << EPCTRL_TX_EP_TYPE_SHIFT);
  454. } else {
  455. epctrlx &= ~EPCTRL_RX_ALL_MASK;
  456. epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
  457. | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  458. << EPCTRL_RX_EP_TYPE_SHIFT);
  459. }
  460. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  461. /*
  462. * Implement Guideline (GL# USB-7) The unused endpoint type must
  463. * be programmed to bulk.
  464. */
  465. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  466. if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
  467. epctrlx |= (USB_ENDPOINT_XFER_BULK
  468. << EPCTRL_RX_EP_TYPE_SHIFT);
  469. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  470. }
  471. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  472. if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
  473. epctrlx |= (USB_ENDPOINT_XFER_BULK
  474. << EPCTRL_TX_EP_TYPE_SHIFT);
  475. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  476. }
  477. spin_unlock_irqrestore(&udc->lock, flags);
  478. return 0;
  479. en_done:
  480. return -EINVAL;
  481. }
  482. static int mv_ep_disable(struct usb_ep *_ep)
  483. {
  484. struct mv_udc *udc;
  485. struct mv_ep *ep;
  486. struct mv_dqh *dqh;
  487. u32 bit_pos, epctrlx, direction;
  488. unsigned long flags;
  489. ep = container_of(_ep, struct mv_ep, ep);
  490. if ((_ep == NULL) || !ep->desc)
  491. return -EINVAL;
  492. udc = ep->udc;
  493. /* Get the endpoint queue head address */
  494. dqh = ep->dqh;
  495. spin_lock_irqsave(&udc->lock, flags);
  496. direction = ep_dir(ep);
  497. bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
  498. /* Reset the max packet length and the interrupt on Setup */
  499. dqh->max_packet_length = 0;
  500. /* Disable the endpoint for Rx or Tx and reset the endpoint type */
  501. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  502. epctrlx &= ~((direction == EP_DIR_IN)
  503. ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
  504. : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
  505. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  506. /* nuke all pending requests (does flush) */
  507. nuke(ep, -ESHUTDOWN);
  508. ep->desc = NULL;
  509. ep->stopped = 1;
  510. spin_unlock_irqrestore(&udc->lock, flags);
  511. return 0;
  512. }
  513. static struct usb_request *
  514. mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  515. {
  516. struct mv_req *req = NULL;
  517. req = kzalloc(sizeof *req, gfp_flags);
  518. if (!req)
  519. return NULL;
  520. req->req.dma = DMA_ADDR_INVALID;
  521. INIT_LIST_HEAD(&req->queue);
  522. return &req->req;
  523. }
  524. static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
  525. {
  526. struct mv_req *req = NULL;
  527. req = container_of(_req, struct mv_req, req);
  528. if (_req)
  529. kfree(req);
  530. }
  531. static void mv_ep_fifo_flush(struct usb_ep *_ep)
  532. {
  533. struct mv_udc *udc;
  534. u32 bit_pos, direction;
  535. struct mv_ep *ep;
  536. unsigned int loops;
  537. if (!_ep)
  538. return;
  539. ep = container_of(_ep, struct mv_ep, ep);
  540. if (!ep->desc)
  541. return;
  542. udc = ep->udc;
  543. direction = ep_dir(ep);
  544. if (ep->ep_num == 0)
  545. bit_pos = (1 << 16) | 1;
  546. else if (direction == EP_DIR_OUT)
  547. bit_pos = 1 << ep->ep_num;
  548. else
  549. bit_pos = 1 << (16 + ep->ep_num);
  550. loops = LOOPS(EPSTATUS_TIMEOUT);
  551. do {
  552. unsigned int inter_loops;
  553. if (loops == 0) {
  554. dev_err(&udc->dev->dev,
  555. "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
  556. (unsigned)readl(&udc->op_regs->epstatus),
  557. (unsigned)bit_pos);
  558. return;
  559. }
  560. /* Write 1 to the Flush register */
  561. writel(bit_pos, &udc->op_regs->epflush);
  562. /* Wait until flushing completed */
  563. inter_loops = LOOPS(FLUSH_TIMEOUT);
  564. while (readl(&udc->op_regs->epflush)) {
  565. /*
  566. * ENDPTFLUSH bit should be cleared to indicate this
  567. * operation is complete
  568. */
  569. if (inter_loops == 0) {
  570. dev_err(&udc->dev->dev,
  571. "TIMEOUT for ENDPTFLUSH=0x%x,"
  572. "bit_pos=0x%x\n",
  573. (unsigned)readl(&udc->op_regs->epflush),
  574. (unsigned)bit_pos);
  575. return;
  576. }
  577. inter_loops--;
  578. udelay(LOOPS_USEC);
  579. }
  580. loops--;
  581. } while (readl(&udc->op_regs->epstatus) & bit_pos);
  582. }
  583. /* queues (submits) an I/O request to an endpoint */
  584. static int
  585. mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
  586. {
  587. struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
  588. struct mv_req *req = container_of(_req, struct mv_req, req);
  589. struct mv_udc *udc = ep->udc;
  590. unsigned long flags;
  591. /* catch various bogus parameters */
  592. if (!_req || !req->req.complete || !req->req.buf
  593. || !list_empty(&req->queue)) {
  594. dev_err(&udc->dev->dev, "%s, bad params", __func__);
  595. return -EINVAL;
  596. }
  597. if (unlikely(!_ep || !ep->desc)) {
  598. dev_err(&udc->dev->dev, "%s, bad ep", __func__);
  599. return -EINVAL;
  600. }
  601. if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  602. if (req->req.length > ep->ep.maxpacket)
  603. return -EMSGSIZE;
  604. }
  605. udc = ep->udc;
  606. if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
  607. return -ESHUTDOWN;
  608. req->ep = ep;
  609. /* map virtual address to hardware */
  610. if (req->req.dma == DMA_ADDR_INVALID) {
  611. req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
  612. req->req.buf,
  613. req->req.length, ep_dir(ep)
  614. ? DMA_TO_DEVICE
  615. : DMA_FROM_DEVICE);
  616. req->mapped = 1;
  617. } else {
  618. dma_sync_single_for_device(ep->udc->gadget.dev.parent,
  619. req->req.dma, req->req.length,
  620. ep_dir(ep)
  621. ? DMA_TO_DEVICE
  622. : DMA_FROM_DEVICE);
  623. req->mapped = 0;
  624. }
  625. req->req.status = -EINPROGRESS;
  626. req->req.actual = 0;
  627. req->dtd_count = 0;
  628. spin_lock_irqsave(&udc->lock, flags);
  629. /* build dtds and push them to device queue */
  630. if (!req_to_dtd(req)) {
  631. int retval;
  632. retval = queue_dtd(ep, req);
  633. if (retval) {
  634. spin_unlock_irqrestore(&udc->lock, flags);
  635. return retval;
  636. }
  637. } else {
  638. spin_unlock_irqrestore(&udc->lock, flags);
  639. return -ENOMEM;
  640. }
  641. /* Update ep0 state */
  642. if (ep->ep_num == 0)
  643. udc->ep0_state = DATA_STATE_XMIT;
  644. /* irq handler advances the queue */
  645. if (req != NULL)
  646. list_add_tail(&req->queue, &ep->queue);
  647. spin_unlock_irqrestore(&udc->lock, flags);
  648. return 0;
  649. }
  650. static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
  651. {
  652. struct mv_dqh *dqh = ep->dqh;
  653. u32 bit_pos;
  654. /* Write dQH next pointer and terminate bit to 0 */
  655. dqh->next_dtd_ptr = req->head->td_dma
  656. & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  657. /* clear active and halt bit, in case set from a previous error */
  658. dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
  659. /* Ensure that updates to the QH will occure before priming. */
  660. wmb();
  661. bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
  662. /* Prime the Endpoint */
  663. writel(bit_pos, &ep->udc->op_regs->epprime);
  664. }
  665. /* dequeues (cancels, unlinks) an I/O request from an endpoint */
  666. static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  667. {
  668. struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
  669. struct mv_req *req;
  670. struct mv_udc *udc = ep->udc;
  671. unsigned long flags;
  672. int stopped, ret = 0;
  673. u32 epctrlx;
  674. if (!_ep || !_req)
  675. return -EINVAL;
  676. spin_lock_irqsave(&ep->udc->lock, flags);
  677. stopped = ep->stopped;
  678. /* Stop the ep before we deal with the queue */
  679. ep->stopped = 1;
  680. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  681. if (ep_dir(ep) == EP_DIR_IN)
  682. epctrlx &= ~EPCTRL_TX_ENABLE;
  683. else
  684. epctrlx &= ~EPCTRL_RX_ENABLE;
  685. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  686. /* make sure it's actually queued on this endpoint */
  687. list_for_each_entry(req, &ep->queue, queue) {
  688. if (&req->req == _req)
  689. break;
  690. }
  691. if (&req->req != _req) {
  692. ret = -EINVAL;
  693. goto out;
  694. }
  695. /* The request is in progress, or completed but not dequeued */
  696. if (ep->queue.next == &req->queue) {
  697. _req->status = -ECONNRESET;
  698. mv_ep_fifo_flush(_ep); /* flush current transfer */
  699. /* The request isn't the last request in this ep queue */
  700. if (req->queue.next != &ep->queue) {
  701. struct mv_req *next_req;
  702. next_req = list_entry(req->queue.next,
  703. struct mv_req, queue);
  704. /* Point the QH to the first TD of next request */
  705. mv_prime_ep(ep, next_req);
  706. } else {
  707. struct mv_dqh *qh;
  708. qh = ep->dqh;
  709. qh->next_dtd_ptr = 1;
  710. qh->size_ioc_int_sts = 0;
  711. }
  712. /* The request hasn't been processed, patch up the TD chain */
  713. } else {
  714. struct mv_req *prev_req;
  715. prev_req = list_entry(req->queue.prev, struct mv_req, queue);
  716. writel(readl(&req->tail->dtd_next),
  717. &prev_req->tail->dtd_next);
  718. }
  719. done(ep, req, -ECONNRESET);
  720. /* Enable EP */
  721. out:
  722. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  723. if (ep_dir(ep) == EP_DIR_IN)
  724. epctrlx |= EPCTRL_TX_ENABLE;
  725. else
  726. epctrlx |= EPCTRL_RX_ENABLE;
  727. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  728. ep->stopped = stopped;
  729. spin_unlock_irqrestore(&ep->udc->lock, flags);
  730. return ret;
  731. }
  732. static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
  733. {
  734. u32 epctrlx;
  735. epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
  736. if (stall) {
  737. if (direction == EP_DIR_IN)
  738. epctrlx |= EPCTRL_TX_EP_STALL;
  739. else
  740. epctrlx |= EPCTRL_RX_EP_STALL;
  741. } else {
  742. if (direction == EP_DIR_IN) {
  743. epctrlx &= ~EPCTRL_TX_EP_STALL;
  744. epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
  745. } else {
  746. epctrlx &= ~EPCTRL_RX_EP_STALL;
  747. epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
  748. }
  749. }
  750. writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
  751. }
  752. static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
  753. {
  754. u32 epctrlx;
  755. epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
  756. if (direction == EP_DIR_OUT)
  757. return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
  758. else
  759. return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
  760. }
  761. static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  762. {
  763. struct mv_ep *ep;
  764. unsigned long flags = 0;
  765. int status = 0;
  766. struct mv_udc *udc;
  767. ep = container_of(_ep, struct mv_ep, ep);
  768. udc = ep->udc;
  769. if (!_ep || !ep->desc) {
  770. status = -EINVAL;
  771. goto out;
  772. }
  773. if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  774. status = -EOPNOTSUPP;
  775. goto out;
  776. }
  777. /*
  778. * Attempt to halt IN ep will fail if any transfer requests
  779. * are still queue
  780. */
  781. if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
  782. status = -EAGAIN;
  783. goto out;
  784. }
  785. spin_lock_irqsave(&ep->udc->lock, flags);
  786. ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
  787. if (halt && wedge)
  788. ep->wedge = 1;
  789. else if (!halt)
  790. ep->wedge = 0;
  791. spin_unlock_irqrestore(&ep->udc->lock, flags);
  792. if (ep->ep_num == 0) {
  793. udc->ep0_state = WAIT_FOR_SETUP;
  794. udc->ep0_dir = EP_DIR_OUT;
  795. }
  796. out:
  797. return status;
  798. }
  799. static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
  800. {
  801. return mv_ep_set_halt_wedge(_ep, halt, 0);
  802. }
  803. static int mv_ep_set_wedge(struct usb_ep *_ep)
  804. {
  805. return mv_ep_set_halt_wedge(_ep, 1, 1);
  806. }
  807. static struct usb_ep_ops mv_ep_ops = {
  808. .enable = mv_ep_enable,
  809. .disable = mv_ep_disable,
  810. .alloc_request = mv_alloc_request,
  811. .free_request = mv_free_request,
  812. .queue = mv_ep_queue,
  813. .dequeue = mv_ep_dequeue,
  814. .set_wedge = mv_ep_set_wedge,
  815. .set_halt = mv_ep_set_halt,
  816. .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
  817. };
  818. static void udc_clock_enable(struct mv_udc *udc)
  819. {
  820. unsigned int i;
  821. for (i = 0; i < udc->clknum; i++)
  822. clk_enable(udc->clk[i]);
  823. }
  824. static void udc_clock_disable(struct mv_udc *udc)
  825. {
  826. unsigned int i;
  827. for (i = 0; i < udc->clknum; i++)
  828. clk_disable(udc->clk[i]);
  829. }
  830. static void udc_stop(struct mv_udc *udc)
  831. {
  832. u32 tmp;
  833. /* Disable interrupts */
  834. tmp = readl(&udc->op_regs->usbintr);
  835. tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
  836. USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
  837. writel(tmp, &udc->op_regs->usbintr);
  838. udc->stopped = 1;
  839. /* Reset the Run the bit in the command register to stop VUSB */
  840. tmp = readl(&udc->op_regs->usbcmd);
  841. tmp &= ~USBCMD_RUN_STOP;
  842. writel(tmp, &udc->op_regs->usbcmd);
  843. }
  844. static void udc_start(struct mv_udc *udc)
  845. {
  846. u32 usbintr;
  847. usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
  848. | USBINTR_PORT_CHANGE_DETECT_EN
  849. | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
  850. /* Enable interrupts */
  851. writel(usbintr, &udc->op_regs->usbintr);
  852. udc->stopped = 0;
  853. /* Set the Run bit in the command register */
  854. writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
  855. }
  856. static int udc_reset(struct mv_udc *udc)
  857. {
  858. unsigned int loops;
  859. u32 tmp, portsc;
  860. /* Stop the controller */
  861. tmp = readl(&udc->op_regs->usbcmd);
  862. tmp &= ~USBCMD_RUN_STOP;
  863. writel(tmp, &udc->op_regs->usbcmd);
  864. /* Reset the controller to get default values */
  865. writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
  866. /* wait for reset to complete */
  867. loops = LOOPS(RESET_TIMEOUT);
  868. while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
  869. if (loops == 0) {
  870. dev_err(&udc->dev->dev,
  871. "Wait for RESET completed TIMEOUT\n");
  872. return -ETIMEDOUT;
  873. }
  874. loops--;
  875. udelay(LOOPS_USEC);
  876. }
  877. /* set controller to device mode */
  878. tmp = readl(&udc->op_regs->usbmode);
  879. tmp |= USBMODE_CTRL_MODE_DEVICE;
  880. /* turn setup lockout off, require setup tripwire in usbcmd */
  881. tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
  882. writel(tmp, &udc->op_regs->usbmode);
  883. writel(0x0, &udc->op_regs->epsetupstat);
  884. /* Configure the Endpoint List Address */
  885. writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
  886. &udc->op_regs->eplistaddr);
  887. portsc = readl(&udc->op_regs->portsc[0]);
  888. if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
  889. portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
  890. if (udc->force_fs)
  891. portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
  892. else
  893. portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
  894. writel(portsc, &udc->op_regs->portsc[0]);
  895. tmp = readl(&udc->op_regs->epctrlx[0]);
  896. tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
  897. writel(tmp, &udc->op_regs->epctrlx[0]);
  898. return 0;
  899. }
  900. static int mv_udc_enable_internal(struct mv_udc *udc)
  901. {
  902. int retval;
  903. if (udc->active)
  904. return 0;
  905. dev_dbg(&udc->dev->dev, "enable udc\n");
  906. udc_clock_enable(udc);
  907. if (udc->pdata->phy_init) {
  908. retval = udc->pdata->phy_init(udc->phy_regs);
  909. if (retval) {
  910. dev_err(&udc->dev->dev,
  911. "init phy error %d\n", retval);
  912. udc_clock_disable(udc);
  913. return retval;
  914. }
  915. }
  916. udc->active = 1;
  917. return 0;
  918. }
  919. static int mv_udc_enable(struct mv_udc *udc)
  920. {
  921. if (udc->clock_gating)
  922. return mv_udc_enable_internal(udc);
  923. return 0;
  924. }
  925. static void mv_udc_disable_internal(struct mv_udc *udc)
  926. {
  927. if (udc->active) {
  928. dev_dbg(&udc->dev->dev, "disable udc\n");
  929. if (udc->pdata->phy_deinit)
  930. udc->pdata->phy_deinit(udc->phy_regs);
  931. udc_clock_disable(udc);
  932. udc->active = 0;
  933. }
  934. }
  935. static void mv_udc_disable(struct mv_udc *udc)
  936. {
  937. if (udc->clock_gating)
  938. mv_udc_disable_internal(udc);
  939. }
  940. static int mv_udc_get_frame(struct usb_gadget *gadget)
  941. {
  942. struct mv_udc *udc;
  943. u16 retval;
  944. if (!gadget)
  945. return -ENODEV;
  946. udc = container_of(gadget, struct mv_udc, gadget);
  947. retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
  948. return retval;
  949. }
  950. /* Tries to wake up the host connected to this gadget */
  951. static int mv_udc_wakeup(struct usb_gadget *gadget)
  952. {
  953. struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
  954. u32 portsc;
  955. /* Remote wakeup feature not enabled by host */
  956. if (!udc->remote_wakeup)
  957. return -ENOTSUPP;
  958. portsc = readl(&udc->op_regs->portsc);
  959. /* not suspended? */
  960. if (!(portsc & PORTSCX_PORT_SUSPEND))
  961. return 0;
  962. /* trigger force resume */
  963. portsc |= PORTSCX_PORT_FORCE_RESUME;
  964. writel(portsc, &udc->op_regs->portsc[0]);
  965. return 0;
  966. }
  967. static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
  968. {
  969. struct mv_udc *udc;
  970. unsigned long flags;
  971. int retval = 0;
  972. udc = container_of(gadget, struct mv_udc, gadget);
  973. spin_lock_irqsave(&udc->lock, flags);
  974. udc->vbus_active = (is_active != 0);
  975. dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
  976. __func__, udc->softconnect, udc->vbus_active);
  977. if (udc->driver && udc->softconnect && udc->vbus_active) {
  978. retval = mv_udc_enable(udc);
  979. if (retval == 0) {
  980. /* Clock is disabled, need re-init registers */
  981. udc_reset(udc);
  982. ep0_reset(udc);
  983. udc_start(udc);
  984. }
  985. } else if (udc->driver && udc->softconnect) {
  986. /* stop all the transfer in queue*/
  987. stop_activity(udc, udc->driver);
  988. udc_stop(udc);
  989. mv_udc_disable(udc);
  990. }
  991. spin_unlock_irqrestore(&udc->lock, flags);
  992. return retval;
  993. }
  994. static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
  995. {
  996. struct mv_udc *udc;
  997. unsigned long flags;
  998. int retval = 0;
  999. udc = container_of(gadget, struct mv_udc, gadget);
  1000. spin_lock_irqsave(&udc->lock, flags);
  1001. udc->softconnect = (is_on != 0);
  1002. dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
  1003. __func__, udc->softconnect, udc->vbus_active);
  1004. if (udc->driver && udc->softconnect && udc->vbus_active) {
  1005. retval = mv_udc_enable(udc);
  1006. if (retval == 0) {
  1007. /* Clock is disabled, need re-init registers */
  1008. udc_reset(udc);
  1009. ep0_reset(udc);
  1010. udc_start(udc);
  1011. }
  1012. } else if (udc->driver && udc->vbus_active) {
  1013. /* stop all the transfer in queue*/
  1014. stop_activity(udc, udc->driver);
  1015. udc_stop(udc);
  1016. mv_udc_disable(udc);
  1017. }
  1018. spin_unlock_irqrestore(&udc->lock, flags);
  1019. return retval;
  1020. }
  1021. static int mv_udc_start(struct usb_gadget_driver *driver,
  1022. int (*bind)(struct usb_gadget *));
  1023. static int mv_udc_stop(struct usb_gadget_driver *driver);
  1024. /* device controller usb_gadget_ops structure */
  1025. static const struct usb_gadget_ops mv_ops = {
  1026. /* returns the current frame number */
  1027. .get_frame = mv_udc_get_frame,
  1028. /* tries to wake up the host connected to this gadget */
  1029. .wakeup = mv_udc_wakeup,
  1030. /* notify controller that VBUS is powered or not */
  1031. .vbus_session = mv_udc_vbus_session,
  1032. /* D+ pullup, software-controlled connect/disconnect to USB host */
  1033. .pullup = mv_udc_pullup,
  1034. .start = mv_udc_start,
  1035. .stop = mv_udc_stop,
  1036. };
  1037. static int eps_init(struct mv_udc *udc)
  1038. {
  1039. struct mv_ep *ep;
  1040. char name[14];
  1041. int i;
  1042. /* initialize ep0 */
  1043. ep = &udc->eps[0];
  1044. ep->udc = udc;
  1045. strncpy(ep->name, "ep0", sizeof(ep->name));
  1046. ep->ep.name = ep->name;
  1047. ep->ep.ops = &mv_ep_ops;
  1048. ep->wedge = 0;
  1049. ep->stopped = 0;
  1050. ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
  1051. ep->ep_num = 0;
  1052. ep->desc = &mv_ep0_desc;
  1053. INIT_LIST_HEAD(&ep->queue);
  1054. ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
  1055. /* initialize other endpoints */
  1056. for (i = 2; i < udc->max_eps * 2; i++) {
  1057. ep = &udc->eps[i];
  1058. if (i % 2) {
  1059. snprintf(name, sizeof(name), "ep%din", i / 2);
  1060. ep->direction = EP_DIR_IN;
  1061. } else {
  1062. snprintf(name, sizeof(name), "ep%dout", i / 2);
  1063. ep->direction = EP_DIR_OUT;
  1064. }
  1065. ep->udc = udc;
  1066. strncpy(ep->name, name, sizeof(ep->name));
  1067. ep->ep.name = ep->name;
  1068. ep->ep.ops = &mv_ep_ops;
  1069. ep->stopped = 0;
  1070. ep->ep.maxpacket = (unsigned short) ~0;
  1071. ep->ep_num = i / 2;
  1072. INIT_LIST_HEAD(&ep->queue);
  1073. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1074. ep->dqh = &udc->ep_dqh[i];
  1075. }
  1076. return 0;
  1077. }
  1078. /* delete all endpoint requests, called with spinlock held */
  1079. static void nuke(struct mv_ep *ep, int status)
  1080. {
  1081. /* called with spinlock held */
  1082. ep->stopped = 1;
  1083. /* endpoint fifo flush */
  1084. mv_ep_fifo_flush(&ep->ep);
  1085. while (!list_empty(&ep->queue)) {
  1086. struct mv_req *req = NULL;
  1087. req = list_entry(ep->queue.next, struct mv_req, queue);
  1088. done(ep, req, status);
  1089. }
  1090. }
  1091. /* stop all USB activities */
  1092. static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
  1093. {
  1094. struct mv_ep *ep;
  1095. nuke(&udc->eps[0], -ESHUTDOWN);
  1096. list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
  1097. nuke(ep, -ESHUTDOWN);
  1098. }
  1099. /* report disconnect; the driver is already quiesced */
  1100. if (driver) {
  1101. spin_unlock(&udc->lock);
  1102. driver->disconnect(&udc->gadget);
  1103. spin_lock(&udc->lock);
  1104. }
  1105. }
  1106. static int mv_udc_start(struct usb_gadget_driver *driver,
  1107. int (*bind)(struct usb_gadget *))
  1108. {
  1109. struct mv_udc *udc = the_controller;
  1110. int retval = 0;
  1111. unsigned long flags;
  1112. if (!udc)
  1113. return -ENODEV;
  1114. if (udc->driver)
  1115. return -EBUSY;
  1116. spin_lock_irqsave(&udc->lock, flags);
  1117. /* hook up the driver ... */
  1118. driver->driver.bus = NULL;
  1119. udc->driver = driver;
  1120. udc->gadget.dev.driver = &driver->driver;
  1121. udc->usb_state = USB_STATE_ATTACHED;
  1122. udc->ep0_state = WAIT_FOR_SETUP;
  1123. udc->ep0_dir = EP_DIR_OUT;
  1124. spin_unlock_irqrestore(&udc->lock, flags);
  1125. retval = bind(&udc->gadget);
  1126. if (retval) {
  1127. dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
  1128. driver->driver.name, retval);
  1129. udc->driver = NULL;
  1130. udc->gadget.dev.driver = NULL;
  1131. return retval;
  1132. }
  1133. if (udc->transceiver) {
  1134. retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
  1135. if (retval) {
  1136. dev_err(&udc->dev->dev,
  1137. "unable to register peripheral to otg\n");
  1138. if (driver->unbind) {
  1139. driver->unbind(&udc->gadget);
  1140. udc->gadget.dev.driver = NULL;
  1141. udc->driver = NULL;
  1142. }
  1143. return retval;
  1144. }
  1145. }
  1146. /* pullup is always on */
  1147. mv_udc_pullup(&udc->gadget, 1);
  1148. /* When boot with cable attached, there will be no vbus irq occurred */
  1149. if (udc->qwork)
  1150. queue_work(udc->qwork, &udc->vbus_work);
  1151. return 0;
  1152. }
  1153. static int mv_udc_stop(struct usb_gadget_driver *driver)
  1154. {
  1155. struct mv_udc *udc = the_controller;
  1156. unsigned long flags;
  1157. if (!udc)
  1158. return -ENODEV;
  1159. spin_lock_irqsave(&udc->lock, flags);
  1160. mv_udc_enable(udc);
  1161. udc_stop(udc);
  1162. /* stop all usb activities */
  1163. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1164. stop_activity(udc, driver);
  1165. mv_udc_disable(udc);
  1166. spin_unlock_irqrestore(&udc->lock, flags);
  1167. /* unbind gadget driver */
  1168. driver->unbind(&udc->gadget);
  1169. udc->gadget.dev.driver = NULL;
  1170. udc->driver = NULL;
  1171. return 0;
  1172. }
  1173. static void mv_set_ptc(struct mv_udc *udc, u32 mode)
  1174. {
  1175. u32 portsc;
  1176. portsc = readl(&udc->op_regs->portsc[0]);
  1177. portsc |= mode << 16;
  1178. writel(portsc, &udc->op_regs->portsc[0]);
  1179. }
  1180. static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
  1181. {
  1182. struct mv_udc *udc = the_controller;
  1183. struct mv_req *req = container_of(_req, struct mv_req, req);
  1184. unsigned long flags;
  1185. dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
  1186. spin_lock_irqsave(&udc->lock, flags);
  1187. if (req->test_mode) {
  1188. mv_set_ptc(udc, req->test_mode);
  1189. req->test_mode = 0;
  1190. }
  1191. spin_unlock_irqrestore(&udc->lock, flags);
  1192. }
  1193. static int
  1194. udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
  1195. {
  1196. int retval = 0;
  1197. struct mv_req *req;
  1198. struct mv_ep *ep;
  1199. ep = &udc->eps[0];
  1200. udc->ep0_dir = direction;
  1201. udc->ep0_state = WAIT_FOR_OUT_STATUS;
  1202. req = udc->status_req;
  1203. /* fill in the reqest structure */
  1204. if (empty == false) {
  1205. *((u16 *) req->req.buf) = cpu_to_le16(status);
  1206. req->req.length = 2;
  1207. } else
  1208. req->req.length = 0;
  1209. req->ep = ep;
  1210. req->req.status = -EINPROGRESS;
  1211. req->req.actual = 0;
  1212. if (udc->test_mode) {
  1213. req->req.complete = prime_status_complete;
  1214. req->test_mode = udc->test_mode;
  1215. udc->test_mode = 0;
  1216. } else
  1217. req->req.complete = NULL;
  1218. req->dtd_count = 0;
  1219. if (req->req.dma == DMA_ADDR_INVALID) {
  1220. req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
  1221. req->req.buf, req->req.length,
  1222. ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1223. req->mapped = 1;
  1224. }
  1225. /* prime the data phase */
  1226. if (!req_to_dtd(req))
  1227. retval = queue_dtd(ep, req);
  1228. else{ /* no mem */
  1229. retval = -ENOMEM;
  1230. goto out;
  1231. }
  1232. if (retval) {
  1233. dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
  1234. goto out;
  1235. }
  1236. list_add_tail(&req->queue, &ep->queue);
  1237. return 0;
  1238. out:
  1239. return retval;
  1240. }
  1241. static void mv_udc_testmode(struct mv_udc *udc, u16 index)
  1242. {
  1243. if (index <= TEST_FORCE_EN) {
  1244. udc->test_mode = index;
  1245. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1246. ep0_stall(udc);
  1247. } else
  1248. dev_err(&udc->dev->dev,
  1249. "This test mode(%d) is not supported\n", index);
  1250. }
  1251. static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1252. {
  1253. udc->dev_addr = (u8)setup->wValue;
  1254. /* update usb state */
  1255. udc->usb_state = USB_STATE_ADDRESS;
  1256. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1257. ep0_stall(udc);
  1258. }
  1259. static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
  1260. struct usb_ctrlrequest *setup)
  1261. {
  1262. u16 status = 0;
  1263. int retval;
  1264. if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
  1265. != (USB_DIR_IN | USB_TYPE_STANDARD))
  1266. return;
  1267. if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
  1268. status = 1 << USB_DEVICE_SELF_POWERED;
  1269. status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
  1270. } else if ((setup->bRequestType & USB_RECIP_MASK)
  1271. == USB_RECIP_INTERFACE) {
  1272. /* get interface status */
  1273. status = 0;
  1274. } else if ((setup->bRequestType & USB_RECIP_MASK)
  1275. == USB_RECIP_ENDPOINT) {
  1276. u8 ep_num, direction;
  1277. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1278. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1279. ? EP_DIR_IN : EP_DIR_OUT;
  1280. status = ep_is_stall(udc, ep_num, direction)
  1281. << USB_ENDPOINT_HALT;
  1282. }
  1283. retval = udc_prime_status(udc, EP_DIR_IN, status, false);
  1284. if (retval)
  1285. ep0_stall(udc);
  1286. else
  1287. udc->ep0_state = DATA_STATE_XMIT;
  1288. }
  1289. static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1290. {
  1291. u8 ep_num;
  1292. u8 direction;
  1293. struct mv_ep *ep;
  1294. if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1295. == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
  1296. switch (setup->wValue) {
  1297. case USB_DEVICE_REMOTE_WAKEUP:
  1298. udc->remote_wakeup = 0;
  1299. break;
  1300. default:
  1301. goto out;
  1302. }
  1303. } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1304. == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
  1305. switch (setup->wValue) {
  1306. case USB_ENDPOINT_HALT:
  1307. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1308. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1309. ? EP_DIR_IN : EP_DIR_OUT;
  1310. if (setup->wValue != 0 || setup->wLength != 0
  1311. || ep_num > udc->max_eps)
  1312. goto out;
  1313. ep = &udc->eps[ep_num * 2 + direction];
  1314. if (ep->wedge == 1)
  1315. break;
  1316. spin_unlock(&udc->lock);
  1317. ep_set_stall(udc, ep_num, direction, 0);
  1318. spin_lock(&udc->lock);
  1319. break;
  1320. default:
  1321. goto out;
  1322. }
  1323. } else
  1324. goto out;
  1325. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1326. ep0_stall(udc);
  1327. out:
  1328. return;
  1329. }
  1330. static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1331. {
  1332. u8 ep_num;
  1333. u8 direction;
  1334. if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1335. == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
  1336. switch (setup->wValue) {
  1337. case USB_DEVICE_REMOTE_WAKEUP:
  1338. udc->remote_wakeup = 1;
  1339. break;
  1340. case USB_DEVICE_TEST_MODE:
  1341. if (setup->wIndex & 0xFF
  1342. || udc->gadget.speed != USB_SPEED_HIGH)
  1343. ep0_stall(udc);
  1344. if (udc->usb_state != USB_STATE_CONFIGURED
  1345. && udc->usb_state != USB_STATE_ADDRESS
  1346. && udc->usb_state != USB_STATE_DEFAULT)
  1347. ep0_stall(udc);
  1348. mv_udc_testmode(udc, (setup->wIndex >> 8));
  1349. goto out;
  1350. default:
  1351. goto out;
  1352. }
  1353. } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1354. == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
  1355. switch (setup->wValue) {
  1356. case USB_ENDPOINT_HALT:
  1357. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1358. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1359. ? EP_DIR_IN : EP_DIR_OUT;
  1360. if (setup->wValue != 0 || setup->wLength != 0
  1361. || ep_num > udc->max_eps)
  1362. goto out;
  1363. spin_unlock(&udc->lock);
  1364. ep_set_stall(udc, ep_num, direction, 1);
  1365. spin_lock(&udc->lock);
  1366. break;
  1367. default:
  1368. goto out;
  1369. }
  1370. } else
  1371. goto out;
  1372. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1373. ep0_stall(udc);
  1374. out:
  1375. return;
  1376. }
  1377. static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
  1378. struct usb_ctrlrequest *setup)
  1379. {
  1380. bool delegate = false;
  1381. nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
  1382. dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
  1383. setup->bRequestType, setup->bRequest,
  1384. setup->wValue, setup->wIndex, setup->wLength);
  1385. /* We process some stardard setup requests here */
  1386. if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1387. switch (setup->bRequest) {
  1388. case USB_REQ_GET_STATUS:
  1389. ch9getstatus(udc, ep_num, setup);
  1390. break;
  1391. case USB_REQ_SET_ADDRESS:
  1392. ch9setaddress(udc, setup);
  1393. break;
  1394. case USB_REQ_CLEAR_FEATURE:
  1395. ch9clearfeature(udc, setup);
  1396. break;
  1397. case USB_REQ_SET_FEATURE:
  1398. ch9setfeature(udc, setup);
  1399. break;
  1400. default:
  1401. delegate = true;
  1402. }
  1403. } else
  1404. delegate = true;
  1405. /* delegate USB standard requests to the gadget driver */
  1406. if (delegate == true) {
  1407. /* USB requests handled by gadget */
  1408. if (setup->wLength) {
  1409. /* DATA phase from gadget, STATUS phase from udc */
  1410. udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
  1411. ? EP_DIR_IN : EP_DIR_OUT;
  1412. spin_unlock(&udc->lock);
  1413. if (udc->driver->setup(&udc->gadget,
  1414. &udc->local_setup_buff) < 0)
  1415. ep0_stall(udc);
  1416. spin_lock(&udc->lock);
  1417. udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
  1418. ? DATA_STATE_XMIT : DATA_STATE_RECV;
  1419. } else {
  1420. /* no DATA phase, IN STATUS phase from gadget */
  1421. udc->ep0_dir = EP_DIR_IN;
  1422. spin_unlock(&udc->lock);
  1423. if (udc->driver->setup(&udc->gadget,
  1424. &udc->local_setup_buff) < 0)
  1425. ep0_stall(udc);
  1426. spin_lock(&udc->lock);
  1427. udc->ep0_state = WAIT_FOR_OUT_STATUS;
  1428. }
  1429. }
  1430. }
  1431. /* complete DATA or STATUS phase of ep0 prime status phase if needed */
  1432. static void ep0_req_complete(struct mv_udc *udc,
  1433. struct mv_ep *ep0, struct mv_req *req)
  1434. {
  1435. u32 new_addr;
  1436. if (udc->usb_state == USB_STATE_ADDRESS) {
  1437. /* set the new address */
  1438. new_addr = (u32)udc->dev_addr;
  1439. writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
  1440. &udc->op_regs->deviceaddr);
  1441. }
  1442. done(ep0, req, 0);
  1443. switch (udc->ep0_state) {
  1444. case DATA_STATE_XMIT:
  1445. /* receive status phase */
  1446. if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
  1447. ep0_stall(udc);
  1448. break;
  1449. case DATA_STATE_RECV:
  1450. /* send status phase */
  1451. if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
  1452. ep0_stall(udc);
  1453. break;
  1454. case WAIT_FOR_OUT_STATUS:
  1455. udc->ep0_state = WAIT_FOR_SETUP;
  1456. break;
  1457. case WAIT_FOR_SETUP:
  1458. dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
  1459. break;
  1460. default:
  1461. ep0_stall(udc);
  1462. break;
  1463. }
  1464. }
  1465. static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
  1466. {
  1467. u32 temp;
  1468. struct mv_dqh *dqh;
  1469. dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
  1470. /* Clear bit in ENDPTSETUPSTAT */
  1471. writel((1 << ep_num), &udc->op_regs->epsetupstat);
  1472. /* while a hazard exists when setup package arrives */
  1473. do {
  1474. /* Set Setup Tripwire */
  1475. temp = readl(&udc->op_regs->usbcmd);
  1476. writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
  1477. /* Copy the setup packet to local buffer */
  1478. memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
  1479. } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
  1480. /* Clear Setup Tripwire */
  1481. temp = readl(&udc->op_regs->usbcmd);
  1482. writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
  1483. }
  1484. static void irq_process_tr_complete(struct mv_udc *udc)
  1485. {
  1486. u32 tmp, bit_pos;
  1487. int i, ep_num = 0, direction = 0;
  1488. struct mv_ep *curr_ep;
  1489. struct mv_req *curr_req, *temp_req;
  1490. int status;
  1491. /*
  1492. * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
  1493. * because the setup packets are to be read ASAP
  1494. */
  1495. /* Process all Setup packet received interrupts */
  1496. tmp = readl(&udc->op_regs->epsetupstat);
  1497. if (tmp) {
  1498. for (i = 0; i < udc->max_eps; i++) {
  1499. if (tmp & (1 << i)) {
  1500. get_setup_data(udc, i,
  1501. (u8 *)(&udc->local_setup_buff));
  1502. handle_setup_packet(udc, i,
  1503. &udc->local_setup_buff);
  1504. }
  1505. }
  1506. }
  1507. /* Don't clear the endpoint setup status register here.
  1508. * It is cleared as a setup packet is read out of the buffer
  1509. */
  1510. /* Process non-setup transaction complete interrupts */
  1511. tmp = readl(&udc->op_regs->epcomplete);
  1512. if (!tmp)
  1513. return;
  1514. writel(tmp, &udc->op_regs->epcomplete);
  1515. for (i = 0; i < udc->max_eps * 2; i++) {
  1516. ep_num = i >> 1;
  1517. direction = i % 2;
  1518. bit_pos = 1 << (ep_num + 16 * direction);
  1519. if (!(bit_pos & tmp))
  1520. continue;
  1521. if (i == 1)
  1522. curr_ep = &udc->eps[0];
  1523. else
  1524. curr_ep = &udc->eps[i];
  1525. /* process the req queue until an uncomplete request */
  1526. list_for_each_entry_safe(curr_req, temp_req,
  1527. &curr_ep->queue, queue) {
  1528. status = process_ep_req(udc, i, curr_req);
  1529. if (status)
  1530. break;
  1531. /* write back status to req */
  1532. curr_req->req.status = status;
  1533. /* ep0 request completion */
  1534. if (ep_num == 0) {
  1535. ep0_req_complete(udc, curr_ep, curr_req);
  1536. break;
  1537. } else {
  1538. done(curr_ep, curr_req, status);
  1539. }
  1540. }
  1541. }
  1542. }
  1543. void irq_process_reset(struct mv_udc *udc)
  1544. {
  1545. u32 tmp;
  1546. unsigned int loops;
  1547. udc->ep0_dir = EP_DIR_OUT;
  1548. udc->ep0_state = WAIT_FOR_SETUP;
  1549. udc->remote_wakeup = 0; /* default to 0 on reset */
  1550. /* The address bits are past bit 25-31. Set the address */
  1551. tmp = readl(&udc->op_regs->deviceaddr);
  1552. tmp &= ~(USB_DEVICE_ADDRESS_MASK);
  1553. writel(tmp, &udc->op_regs->deviceaddr);
  1554. /* Clear all the setup token semaphores */
  1555. tmp = readl(&udc->op_regs->epsetupstat);
  1556. writel(tmp, &udc->op_regs->epsetupstat);
  1557. /* Clear all the endpoint complete status bits */
  1558. tmp = readl(&udc->op_regs->epcomplete);
  1559. writel(tmp, &udc->op_regs->epcomplete);
  1560. /* wait until all endptprime bits cleared */
  1561. loops = LOOPS(PRIME_TIMEOUT);
  1562. while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
  1563. if (loops == 0) {
  1564. dev_err(&udc->dev->dev,
  1565. "Timeout for ENDPTPRIME = 0x%x\n",
  1566. readl(&udc->op_regs->epprime));
  1567. break;
  1568. }
  1569. loops--;
  1570. udelay(LOOPS_USEC);
  1571. }
  1572. /* Write 1s to the Flush register */
  1573. writel((u32)~0, &udc->op_regs->epflush);
  1574. if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
  1575. dev_info(&udc->dev->dev, "usb bus reset\n");
  1576. udc->usb_state = USB_STATE_DEFAULT;
  1577. /* reset all the queues, stop all USB activities */
  1578. stop_activity(udc, udc->driver);
  1579. } else {
  1580. dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
  1581. readl(&udc->op_regs->portsc));
  1582. /*
  1583. * re-initialize
  1584. * controller reset
  1585. */
  1586. udc_reset(udc);
  1587. /* reset all the queues, stop all USB activities */
  1588. stop_activity(udc, udc->driver);
  1589. /* reset ep0 dQH and endptctrl */
  1590. ep0_reset(udc);
  1591. /* enable interrupt and set controller to run state */
  1592. udc_start(udc);
  1593. udc->usb_state = USB_STATE_ATTACHED;
  1594. }
  1595. }
  1596. static void handle_bus_resume(struct mv_udc *udc)
  1597. {
  1598. udc->usb_state = udc->resume_state;
  1599. udc->resume_state = 0;
  1600. /* report resume to the driver */
  1601. if (udc->driver) {
  1602. if (udc->driver->resume) {
  1603. spin_unlock(&udc->lock);
  1604. udc->driver->resume(&udc->gadget);
  1605. spin_lock(&udc->lock);
  1606. }
  1607. }
  1608. }
  1609. static void irq_process_suspend(struct mv_udc *udc)
  1610. {
  1611. udc->resume_state = udc->usb_state;
  1612. udc->usb_state = USB_STATE_SUSPENDED;
  1613. if (udc->driver->suspend) {
  1614. spin_unlock(&udc->lock);
  1615. udc->driver->suspend(&udc->gadget);
  1616. spin_lock(&udc->lock);
  1617. }
  1618. }
  1619. static void irq_process_port_change(struct mv_udc *udc)
  1620. {
  1621. u32 portsc;
  1622. portsc = readl(&udc->op_regs->portsc[0]);
  1623. if (!(portsc & PORTSCX_PORT_RESET)) {
  1624. /* Get the speed */
  1625. u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
  1626. switch (speed) {
  1627. case PORTSCX_PORT_SPEED_HIGH:
  1628. udc->gadget.speed = USB_SPEED_HIGH;
  1629. break;
  1630. case PORTSCX_PORT_SPEED_FULL:
  1631. udc->gadget.speed = USB_SPEED_FULL;
  1632. break;
  1633. case PORTSCX_PORT_SPEED_LOW:
  1634. udc->gadget.speed = USB_SPEED_LOW;
  1635. break;
  1636. default:
  1637. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1638. break;
  1639. }
  1640. }
  1641. if (portsc & PORTSCX_PORT_SUSPEND) {
  1642. udc->resume_state = udc->usb_state;
  1643. udc->usb_state = USB_STATE_SUSPENDED;
  1644. if (udc->driver->suspend) {
  1645. spin_unlock(&udc->lock);
  1646. udc->driver->suspend(&udc->gadget);
  1647. spin_lock(&udc->lock);
  1648. }
  1649. }
  1650. if (!(portsc & PORTSCX_PORT_SUSPEND)
  1651. && udc->usb_state == USB_STATE_SUSPENDED) {
  1652. handle_bus_resume(udc);
  1653. }
  1654. if (!udc->resume_state)
  1655. udc->usb_state = USB_STATE_DEFAULT;
  1656. }
  1657. static void irq_process_error(struct mv_udc *udc)
  1658. {
  1659. /* Increment the error count */
  1660. udc->errors++;
  1661. }
  1662. static irqreturn_t mv_udc_irq(int irq, void *dev)
  1663. {
  1664. struct mv_udc *udc = (struct mv_udc *)dev;
  1665. u32 status, intr;
  1666. /* Disable ISR when stopped bit is set */
  1667. if (udc->stopped)
  1668. return IRQ_NONE;
  1669. spin_lock(&udc->lock);
  1670. status = readl(&udc->op_regs->usbsts);
  1671. intr = readl(&udc->op_regs->usbintr);
  1672. status &= intr;
  1673. if (status == 0) {
  1674. spin_unlock(&udc->lock);
  1675. return IRQ_NONE;
  1676. }
  1677. /* Clear all the interrupts occurred */
  1678. writel(status, &udc->op_regs->usbsts);
  1679. if (status & USBSTS_ERR)
  1680. irq_process_error(udc);
  1681. if (status & USBSTS_RESET)
  1682. irq_process_reset(udc);
  1683. if (status & USBSTS_PORT_CHANGE)
  1684. irq_process_port_change(udc);
  1685. if (status & USBSTS_INT)
  1686. irq_process_tr_complete(udc);
  1687. if (status & USBSTS_SUSPEND)
  1688. irq_process_suspend(udc);
  1689. spin_unlock(&udc->lock);
  1690. return IRQ_HANDLED;
  1691. }
  1692. static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
  1693. {
  1694. struct mv_udc *udc = (struct mv_udc *)dev;
  1695. /* polling VBUS and init phy may cause too much time*/
  1696. if (udc->qwork)
  1697. queue_work(udc->qwork, &udc->vbus_work);
  1698. return IRQ_HANDLED;
  1699. }
  1700. static void mv_udc_vbus_work(struct work_struct *work)
  1701. {
  1702. struct mv_udc *udc;
  1703. unsigned int vbus;
  1704. udc = container_of(work, struct mv_udc, vbus_work);
  1705. if (!udc->pdata->vbus)
  1706. return;
  1707. vbus = udc->pdata->vbus->poll();
  1708. dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
  1709. if (vbus == VBUS_HIGH)
  1710. mv_udc_vbus_session(&udc->gadget, 1);
  1711. else if (vbus == VBUS_LOW)
  1712. mv_udc_vbus_session(&udc->gadget, 0);
  1713. }
  1714. /* release device structure */
  1715. static void gadget_release(struct device *_dev)
  1716. {
  1717. struct mv_udc *udc = the_controller;
  1718. complete(udc->done);
  1719. }
  1720. static int __devexit mv_udc_remove(struct platform_device *dev)
  1721. {
  1722. struct mv_udc *udc = the_controller;
  1723. int clk_i;
  1724. usb_del_gadget_udc(&udc->gadget);
  1725. if (udc->qwork) {
  1726. flush_workqueue(udc->qwork);
  1727. destroy_workqueue(udc->qwork);
  1728. }
  1729. /*
  1730. * If we have transceiver inited,
  1731. * then vbus irq will not be requested in udc driver.
  1732. */
  1733. if (udc->pdata && udc->pdata->vbus
  1734. && udc->clock_gating && udc->transceiver == NULL)
  1735. free_irq(udc->pdata->vbus->irq, &dev->dev);
  1736. /* free memory allocated in probe */
  1737. if (udc->dtd_pool)
  1738. dma_pool_destroy(udc->dtd_pool);
  1739. if (udc->ep_dqh)
  1740. dma_free_coherent(&dev->dev, udc->ep_dqh_size,
  1741. udc->ep_dqh, udc->ep_dqh_dma);
  1742. kfree(udc->eps);
  1743. if (udc->irq)
  1744. free_irq(udc->irq, &dev->dev);
  1745. mv_udc_disable(udc);
  1746. if (udc->cap_regs)
  1747. iounmap(udc->cap_regs);
  1748. if (udc->phy_regs)
  1749. iounmap(udc->phy_regs);
  1750. if (udc->status_req) {
  1751. kfree(udc->status_req->req.buf);
  1752. kfree(udc->status_req);
  1753. }
  1754. for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
  1755. clk_put(udc->clk[clk_i]);
  1756. device_unregister(&udc->gadget.dev);
  1757. /* free dev, wait for the release() finished */
  1758. wait_for_completion(udc->done);
  1759. kfree(udc);
  1760. the_controller = NULL;
  1761. return 0;
  1762. }
  1763. static int __devinit mv_udc_probe(struct platform_device *dev)
  1764. {
  1765. struct mv_usb_platform_data *pdata = dev->dev.platform_data;
  1766. struct mv_udc *udc;
  1767. int retval = 0;
  1768. int clk_i = 0;
  1769. struct resource *r;
  1770. size_t size;
  1771. if (pdata == NULL) {
  1772. dev_err(&dev->dev, "missing platform_data\n");
  1773. return -ENODEV;
  1774. }
  1775. size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
  1776. udc = kzalloc(size, GFP_KERNEL);
  1777. if (udc == NULL) {
  1778. dev_err(&dev->dev, "failed to allocate memory for udc\n");
  1779. return -ENOMEM;
  1780. }
  1781. the_controller = udc;
  1782. udc->done = &release_done;
  1783. udc->pdata = dev->dev.platform_data;
  1784. spin_lock_init(&udc->lock);
  1785. udc->dev = dev;
  1786. #ifdef CONFIG_USB_OTG_UTILS
  1787. if (pdata->mode == MV_USB_MODE_OTG)
  1788. udc->transceiver = otg_get_transceiver();
  1789. #endif
  1790. udc->clknum = pdata->clknum;
  1791. for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
  1792. udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
  1793. if (IS_ERR(udc->clk[clk_i])) {
  1794. retval = PTR_ERR(udc->clk[clk_i]);
  1795. goto err_put_clk;
  1796. }
  1797. }
  1798. r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
  1799. if (r == NULL) {
  1800. dev_err(&dev->dev, "no I/O memory resource defined\n");
  1801. retval = -ENODEV;
  1802. goto err_put_clk;
  1803. }
  1804. udc->cap_regs = (struct mv_cap_regs __iomem *)
  1805. ioremap(r->start, resource_size(r));
  1806. if (udc->cap_regs == NULL) {
  1807. dev_err(&dev->dev, "failed to map I/O memory\n");
  1808. retval = -EBUSY;
  1809. goto err_put_clk;
  1810. }
  1811. r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
  1812. if (r == NULL) {
  1813. dev_err(&dev->dev, "no phy I/O memory resource defined\n");
  1814. retval = -ENODEV;
  1815. goto err_iounmap_capreg;
  1816. }
  1817. udc->phy_regs = ioremap(r->start, resource_size(r));
  1818. if (udc->phy_regs == NULL) {
  1819. dev_err(&dev->dev, "failed to map phy I/O memory\n");
  1820. retval = -EBUSY;
  1821. goto err_iounmap_capreg;
  1822. }
  1823. /* we will acces controller register, so enable the clk */
  1824. retval = mv_udc_enable_internal(udc);
  1825. if (retval)
  1826. goto err_iounmap_phyreg;
  1827. udc->op_regs =
  1828. (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
  1829. + (readl(&udc->cap_regs->caplength_hciversion)
  1830. & CAPLENGTH_MASK));
  1831. udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
  1832. /*
  1833. * some platform will use usb to download image, it may not disconnect
  1834. * usb gadget before loading kernel. So first stop udc here.
  1835. */
  1836. udc_stop(udc);
  1837. writel(0xFFFFFFFF, &udc->op_regs->usbsts);
  1838. size = udc->max_eps * sizeof(struct mv_dqh) *2;
  1839. size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
  1840. udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
  1841. &udc->ep_dqh_dma, GFP_KERNEL);
  1842. if (udc->ep_dqh == NULL) {
  1843. dev_err(&dev->dev, "allocate dQH memory failed\n");
  1844. retval = -ENOMEM;
  1845. goto err_disable_clock;
  1846. }
  1847. udc->ep_dqh_size = size;
  1848. /* create dTD dma_pool resource */
  1849. udc->dtd_pool = dma_pool_create("mv_dtd",
  1850. &dev->dev,
  1851. sizeof(struct mv_dtd),
  1852. DTD_ALIGNMENT,
  1853. DMA_BOUNDARY);
  1854. if (!udc->dtd_pool) {
  1855. retval = -ENOMEM;
  1856. goto err_free_dma;
  1857. }
  1858. size = udc->max_eps * sizeof(struct mv_ep) *2;
  1859. udc->eps = kzalloc(size, GFP_KERNEL);
  1860. if (udc->eps == NULL) {
  1861. dev_err(&dev->dev, "allocate ep memory failed\n");
  1862. retval = -ENOMEM;
  1863. goto err_destroy_dma;
  1864. }
  1865. /* initialize ep0 status request structure */
  1866. udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
  1867. if (!udc->status_req) {
  1868. dev_err(&dev->dev, "allocate status_req memory failed\n");
  1869. retval = -ENOMEM;
  1870. goto err_free_eps;
  1871. }
  1872. INIT_LIST_HEAD(&udc->status_req->queue);
  1873. /* allocate a small amount of memory to get valid address */
  1874. udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
  1875. udc->status_req->req.dma = DMA_ADDR_INVALID;
  1876. udc->resume_state = USB_STATE_NOTATTACHED;
  1877. udc->usb_state = USB_STATE_POWERED;
  1878. udc->ep0_dir = EP_DIR_OUT;
  1879. udc->remote_wakeup = 0;
  1880. r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
  1881. if (r == NULL) {
  1882. dev_err(&dev->dev, "no IRQ resource defined\n");
  1883. retval = -ENODEV;
  1884. goto err_free_status_req;
  1885. }
  1886. udc->irq = r->start;
  1887. if (request_irq(udc->irq, mv_udc_irq,
  1888. IRQF_SHARED, driver_name, udc)) {
  1889. dev_err(&dev->dev, "Request irq %d for UDC failed\n",
  1890. udc->irq);
  1891. retval = -ENODEV;
  1892. goto err_free_status_req;
  1893. }
  1894. /* initialize gadget structure */
  1895. udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
  1896. udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
  1897. INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
  1898. udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
  1899. udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
  1900. /* the "gadget" abstracts/virtualizes the controller */
  1901. dev_set_name(&udc->gadget.dev, "gadget");
  1902. udc->gadget.dev.parent = &dev->dev;
  1903. udc->gadget.dev.dma_mask = dev->dev.dma_mask;
  1904. udc->gadget.dev.release = gadget_release;
  1905. udc->gadget.name = driver_name; /* gadget name */
  1906. retval = device_register(&udc->gadget.dev);
  1907. if (retval)
  1908. goto err_free_irq;
  1909. eps_init(udc);
  1910. /* VBUS detect: we can disable/enable clock on demand.*/
  1911. if (udc->transceiver)
  1912. udc->clock_gating = 1;
  1913. else if (pdata->vbus) {
  1914. udc->clock_gating = 1;
  1915. retval = request_threaded_irq(pdata->vbus->irq, NULL,
  1916. mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
  1917. if (retval) {
  1918. dev_info(&dev->dev,
  1919. "Can not request irq for VBUS, "
  1920. "disable clock gating\n");
  1921. udc->clock_gating = 0;
  1922. }
  1923. udc->qwork = create_singlethread_workqueue("mv_udc_queue");
  1924. if (!udc->qwork) {
  1925. dev_err(&dev->dev, "cannot create workqueue\n");
  1926. retval = -ENOMEM;
  1927. goto err_unregister;
  1928. }
  1929. INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
  1930. }
  1931. /*
  1932. * When clock gating is supported, we can disable clk and phy.
  1933. * If not, it means that VBUS detection is not supported, we
  1934. * have to enable vbus active all the time to let controller work.
  1935. */
  1936. if (udc->clock_gating)
  1937. mv_udc_disable_internal(udc);
  1938. else
  1939. udc->vbus_active = 1;
  1940. retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
  1941. if (retval)
  1942. goto err_unregister;
  1943. dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
  1944. udc->clock_gating ? "with" : "without");
  1945. return 0;
  1946. err_unregister:
  1947. if (udc->pdata && udc->pdata->vbus
  1948. && udc->clock_gating && udc->transceiver == NULL)
  1949. free_irq(pdata->vbus->irq, &dev->dev);
  1950. device_unregister(&udc->gadget.dev);
  1951. err_free_irq:
  1952. free_irq(udc->irq, &dev->dev);
  1953. err_free_status_req:
  1954. kfree(udc->status_req->req.buf);
  1955. kfree(udc->status_req);
  1956. err_free_eps:
  1957. kfree(udc->eps);
  1958. err_destroy_dma:
  1959. dma_pool_destroy(udc->dtd_pool);
  1960. err_free_dma:
  1961. dma_free_coherent(&dev->dev, udc->ep_dqh_size,
  1962. udc->ep_dqh, udc->ep_dqh_dma);
  1963. err_disable_clock:
  1964. mv_udc_disable_internal(udc);
  1965. err_iounmap_phyreg:
  1966. iounmap(udc->phy_regs);
  1967. err_iounmap_capreg:
  1968. iounmap(udc->cap_regs);
  1969. err_put_clk:
  1970. for (clk_i--; clk_i >= 0; clk_i--)
  1971. clk_put(udc->clk[clk_i]);
  1972. the_controller = NULL;
  1973. kfree(udc);
  1974. return retval;
  1975. }
  1976. #ifdef CONFIG_PM
  1977. static int mv_udc_suspend(struct device *_dev)
  1978. {
  1979. struct mv_udc *udc = the_controller;
  1980. /* if OTG is enabled, the following will be done in OTG driver*/
  1981. if (udc->transceiver)
  1982. return 0;
  1983. if (udc->pdata->vbus && udc->pdata->vbus->poll)
  1984. if (udc->pdata->vbus->poll() == VBUS_HIGH) {
  1985. dev_info(&udc->dev->dev, "USB cable is connected!\n");
  1986. return -EAGAIN;
  1987. }
  1988. /*
  1989. * only cable is unplugged, udc can suspend.
  1990. * So do not care about clock_gating == 1.
  1991. */
  1992. if (!udc->clock_gating) {
  1993. udc_stop(udc);
  1994. spin_lock_irq(&udc->lock);
  1995. /* stop all usb activities */
  1996. stop_activity(udc, udc->driver);
  1997. spin_unlock_irq(&udc->lock);
  1998. mv_udc_disable_internal(udc);
  1999. }
  2000. return 0;
  2001. }
  2002. static int mv_udc_resume(struct device *_dev)
  2003. {
  2004. struct mv_udc *udc = the_controller;
  2005. int retval;
  2006. /* if OTG is enabled, the following will be done in OTG driver*/
  2007. if (udc->transceiver)
  2008. return 0;
  2009. if (!udc->clock_gating) {
  2010. retval = mv_udc_enable_internal(udc);
  2011. if (retval)
  2012. return retval;
  2013. if (udc->driver && udc->softconnect) {
  2014. udc_reset(udc);
  2015. ep0_reset(udc);
  2016. udc_start(udc);
  2017. }
  2018. }
  2019. return 0;
  2020. }
  2021. static const struct dev_pm_ops mv_udc_pm_ops = {
  2022. .suspend = mv_udc_suspend,
  2023. .resume = mv_udc_resume,
  2024. };
  2025. #endif
  2026. static void mv_udc_shutdown(struct platform_device *dev)
  2027. {
  2028. struct mv_udc *udc = the_controller;
  2029. u32 mode;
  2030. /* reset controller mode to IDLE */
  2031. mode = readl(&udc->op_regs->usbmode);
  2032. mode &= ~3;
  2033. writel(mode, &udc->op_regs->usbmode);
  2034. }
  2035. static struct platform_driver udc_driver = {
  2036. .probe = mv_udc_probe,
  2037. .remove = __exit_p(mv_udc_remove),
  2038. .shutdown = mv_udc_shutdown,
  2039. .driver = {
  2040. .owner = THIS_MODULE,
  2041. .name = "mv-udc",
  2042. #ifdef CONFIG_PM
  2043. .pm = &mv_udc_pm_ops,
  2044. #endif
  2045. },
  2046. };
  2047. module_platform_driver(udc_driver);
  2048. MODULE_ALIAS("platform:mv-udc");
  2049. MODULE_DESCRIPTION(DRIVER_DESC);
  2050. MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
  2051. MODULE_VERSION(DRIVER_VERSION);
  2052. MODULE_LICENSE("GPL");