mv_udc_core.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493
  1. /*
  2. * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
  3. * Author: Chao Xie <chao.xie@marvell.com>
  4. * Neil Zhang <zhangwm@marvell.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/kernel.h>
  16. #include <linux/delay.h>
  17. #include <linux/ioport.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/timer.h>
  23. #include <linux/list.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/device.h>
  27. #include <linux/usb/ch9.h>
  28. #include <linux/usb/gadget.h>
  29. #include <linux/usb/otg.h>
  30. #include <linux/pm.h>
  31. #include <linux/io.h>
  32. #include <linux/irq.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/clk.h>
  35. #include <linux/platform_data/mv_usb.h>
  36. #include <asm/unaligned.h>
  37. #include "mv_udc.h"
  38. #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
  39. #define DRIVER_VERSION "8 Nov 2010"
  40. #define ep_dir(ep) (((ep)->ep_num == 0) ? \
  41. ((ep)->udc->ep0_dir) : ((ep)->direction))
  42. /* timeout value -- usec */
  43. #define RESET_TIMEOUT 10000
  44. #define FLUSH_TIMEOUT 10000
  45. #define EPSTATUS_TIMEOUT 10000
  46. #define PRIME_TIMEOUT 10000
  47. #define READSAFE_TIMEOUT 1000
  48. #define DTD_TIMEOUT 1000
  49. #define LOOPS_USEC_SHIFT 4
  50. #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
  51. #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
  52. static DECLARE_COMPLETION(release_done);
  53. static const char driver_name[] = "mv_udc";
  54. static const char driver_desc[] = DRIVER_DESC;
  55. /* controller device global variable */
  56. static struct mv_udc *the_controller;
  57. int mv_usb_otgsc;
  58. static void nuke(struct mv_ep *ep, int status);
  59. static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
  60. /* for endpoint 0 operations */
  61. static const struct usb_endpoint_descriptor mv_ep0_desc = {
  62. .bLength = USB_DT_ENDPOINT_SIZE,
  63. .bDescriptorType = USB_DT_ENDPOINT,
  64. .bEndpointAddress = 0,
  65. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  66. .wMaxPacketSize = EP0_MAX_PKT_SIZE,
  67. };
  68. static void ep0_reset(struct mv_udc *udc)
  69. {
  70. struct mv_ep *ep;
  71. u32 epctrlx;
  72. int i = 0;
  73. /* ep0 in and out */
  74. for (i = 0; i < 2; i++) {
  75. ep = &udc->eps[i];
  76. ep->udc = udc;
  77. /* ep0 dQH */
  78. ep->dqh = &udc->ep_dqh[i];
  79. /* configure ep0 endpoint capabilities in dQH */
  80. ep->dqh->max_packet_length =
  81. (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  82. | EP_QUEUE_HEAD_IOS;
  83. ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
  84. epctrlx = readl(&udc->op_regs->epctrlx[0]);
  85. if (i) { /* TX */
  86. epctrlx |= EPCTRL_TX_ENABLE
  87. | (USB_ENDPOINT_XFER_CONTROL
  88. << EPCTRL_TX_EP_TYPE_SHIFT);
  89. } else { /* RX */
  90. epctrlx |= EPCTRL_RX_ENABLE
  91. | (USB_ENDPOINT_XFER_CONTROL
  92. << EPCTRL_RX_EP_TYPE_SHIFT);
  93. }
  94. writel(epctrlx, &udc->op_regs->epctrlx[0]);
  95. }
  96. }
  97. /* protocol ep0 stall, will automatically be cleared on new transaction */
  98. static void ep0_stall(struct mv_udc *udc)
  99. {
  100. u32 epctrlx;
  101. /* set TX and RX to stall */
  102. epctrlx = readl(&udc->op_regs->epctrlx[0]);
  103. epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
  104. writel(epctrlx, &udc->op_regs->epctrlx[0]);
  105. /* update ep0 state */
  106. udc->ep0_state = WAIT_FOR_SETUP;
  107. udc->ep0_dir = EP_DIR_OUT;
  108. }
  109. static int process_ep_req(struct mv_udc *udc, int index,
  110. struct mv_req *curr_req)
  111. {
  112. struct mv_dtd *curr_dtd;
  113. struct mv_dqh *curr_dqh;
  114. int td_complete, actual, remaining_length;
  115. int i, direction;
  116. int retval = 0;
  117. u32 errors;
  118. u32 bit_pos;
  119. curr_dqh = &udc->ep_dqh[index];
  120. direction = index % 2;
  121. curr_dtd = curr_req->head;
  122. td_complete = 0;
  123. actual = curr_req->req.length;
  124. for (i = 0; i < curr_req->dtd_count; i++) {
  125. if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
  126. dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
  127. udc->eps[index].name);
  128. return 1;
  129. }
  130. errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
  131. if (!errors) {
  132. remaining_length =
  133. (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
  134. >> DTD_LENGTH_BIT_POS;
  135. actual -= remaining_length;
  136. if (remaining_length) {
  137. if (direction) {
  138. dev_dbg(&udc->dev->dev,
  139. "TX dTD remains data\n");
  140. retval = -EPROTO;
  141. break;
  142. } else
  143. break;
  144. }
  145. } else {
  146. dev_info(&udc->dev->dev,
  147. "complete_tr error: ep=%d %s: error = 0x%x\n",
  148. index >> 1, direction ? "SEND" : "RECV",
  149. errors);
  150. if (errors & DTD_STATUS_HALTED) {
  151. /* Clear the errors and Halt condition */
  152. curr_dqh->size_ioc_int_sts &= ~errors;
  153. retval = -EPIPE;
  154. } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
  155. retval = -EPROTO;
  156. } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
  157. retval = -EILSEQ;
  158. }
  159. }
  160. if (i != curr_req->dtd_count - 1)
  161. curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
  162. }
  163. if (retval)
  164. return retval;
  165. if (direction == EP_DIR_OUT)
  166. bit_pos = 1 << curr_req->ep->ep_num;
  167. else
  168. bit_pos = 1 << (16 + curr_req->ep->ep_num);
  169. while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
  170. if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
  171. while (readl(&udc->op_regs->epstatus) & bit_pos)
  172. udelay(1);
  173. break;
  174. }
  175. udelay(1);
  176. }
  177. curr_req->req.actual = actual;
  178. return 0;
  179. }
  180. /*
  181. * done() - retire a request; caller blocked irqs
  182. * @status : request status to be set, only works when
  183. * request is still in progress.
  184. */
  185. static void done(struct mv_ep *ep, struct mv_req *req, int status)
  186. {
  187. struct mv_udc *udc = NULL;
  188. unsigned char stopped = ep->stopped;
  189. struct mv_dtd *curr_td, *next_td;
  190. int j;
  191. udc = (struct mv_udc *)ep->udc;
  192. /* Removed the req from fsl_ep->queue */
  193. list_del_init(&req->queue);
  194. /* req.status should be set as -EINPROGRESS in ep_queue() */
  195. if (req->req.status == -EINPROGRESS)
  196. req->req.status = status;
  197. else
  198. status = req->req.status;
  199. /* Free dtd for the request */
  200. next_td = req->head;
  201. for (j = 0; j < req->dtd_count; j++) {
  202. curr_td = next_td;
  203. if (j != req->dtd_count - 1)
  204. next_td = curr_td->next_dtd_virt;
  205. dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
  206. }
  207. if (req->mapped) {
  208. dma_unmap_single(ep->udc->gadget.dev.parent,
  209. req->req.dma, req->req.length,
  210. ((ep_dir(ep) == EP_DIR_IN) ?
  211. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  212. req->req.dma = DMA_ADDR_INVALID;
  213. req->mapped = 0;
  214. } else
  215. dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
  216. req->req.dma, req->req.length,
  217. ((ep_dir(ep) == EP_DIR_IN) ?
  218. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  219. if (status && (status != -ESHUTDOWN))
  220. dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
  221. ep->ep.name, &req->req, status,
  222. req->req.actual, req->req.length);
  223. ep->stopped = 1;
  224. spin_unlock(&ep->udc->lock);
  225. /*
  226. * complete() is from gadget layer,
  227. * eg fsg->bulk_in_complete()
  228. */
  229. if (req->req.complete)
  230. req->req.complete(&ep->ep, &req->req);
  231. spin_lock(&ep->udc->lock);
  232. ep->stopped = stopped;
  233. }
  234. static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
  235. {
  236. struct mv_udc *udc;
  237. struct mv_dqh *dqh;
  238. u32 bit_pos, direction;
  239. u32 usbcmd, epstatus;
  240. unsigned int loops;
  241. int retval = 0;
  242. udc = ep->udc;
  243. direction = ep_dir(ep);
  244. dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
  245. bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
  246. /* check if the pipe is empty */
  247. if (!(list_empty(&ep->queue))) {
  248. struct mv_req *lastreq;
  249. lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
  250. lastreq->tail->dtd_next =
  251. req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  252. wmb();
  253. if (readl(&udc->op_regs->epprime) & bit_pos)
  254. goto done;
  255. loops = LOOPS(READSAFE_TIMEOUT);
  256. while (1) {
  257. /* start with setting the semaphores */
  258. usbcmd = readl(&udc->op_regs->usbcmd);
  259. usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
  260. writel(usbcmd, &udc->op_regs->usbcmd);
  261. /* read the endpoint status */
  262. epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
  263. /*
  264. * Reread the ATDTW semaphore bit to check if it is
  265. * cleared. When hardware see a hazard, it will clear
  266. * the bit or else we remain set to 1 and we can
  267. * proceed with priming of endpoint if not already
  268. * primed.
  269. */
  270. if (readl(&udc->op_regs->usbcmd)
  271. & USBCMD_ATDTW_TRIPWIRE_SET)
  272. break;
  273. loops--;
  274. if (loops == 0) {
  275. dev_err(&udc->dev->dev,
  276. "Timeout for ATDTW_TRIPWIRE...\n");
  277. retval = -ETIME;
  278. goto done;
  279. }
  280. udelay(LOOPS_USEC);
  281. }
  282. /* Clear the semaphore */
  283. usbcmd = readl(&udc->op_regs->usbcmd);
  284. usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
  285. writel(usbcmd, &udc->op_regs->usbcmd);
  286. if (epstatus)
  287. goto done;
  288. }
  289. /* Write dQH next pointer and terminate bit to 0 */
  290. dqh->next_dtd_ptr = req->head->td_dma
  291. & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  292. /* clear active and halt bit, in case set from a previous error */
  293. dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
  294. /* Ensure that updates to the QH will occure before priming. */
  295. wmb();
  296. /* Prime the Endpoint */
  297. writel(bit_pos, &udc->op_regs->epprime);
  298. done:
  299. return retval;
  300. }
  301. static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
  302. dma_addr_t *dma, int *is_last)
  303. {
  304. u32 temp;
  305. struct mv_dtd *dtd;
  306. struct mv_udc *udc;
  307. /* how big will this transfer be? */
  308. *length = min(req->req.length - req->req.actual,
  309. (unsigned)EP_MAX_LENGTH_TRANSFER);
  310. udc = req->ep->udc;
  311. /*
  312. * Be careful that no _GFP_HIGHMEM is set,
  313. * or we can not use dma_to_virt
  314. */
  315. dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
  316. if (dtd == NULL)
  317. return dtd;
  318. dtd->td_dma = *dma;
  319. /* initialize buffer page pointers */
  320. temp = (u32)(req->req.dma + req->req.actual);
  321. dtd->buff_ptr0 = cpu_to_le32(temp);
  322. temp &= ~0xFFF;
  323. dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
  324. dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
  325. dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
  326. dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
  327. req->req.actual += *length;
  328. /* zlp is needed if req->req.zero is set */
  329. if (req->req.zero) {
  330. if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
  331. *is_last = 1;
  332. else
  333. *is_last = 0;
  334. } else if (req->req.length == req->req.actual)
  335. *is_last = 1;
  336. else
  337. *is_last = 0;
  338. /* Fill in the transfer size; set active bit */
  339. temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
  340. /* Enable interrupt for the last dtd of a request */
  341. if (*is_last && !req->req.no_interrupt)
  342. temp |= DTD_IOC;
  343. dtd->size_ioc_sts = temp;
  344. mb();
  345. return dtd;
  346. }
  347. /* generate dTD linked list for a request */
  348. static int req_to_dtd(struct mv_req *req)
  349. {
  350. unsigned count;
  351. int is_last, is_first = 1;
  352. struct mv_dtd *dtd, *last_dtd = NULL;
  353. struct mv_udc *udc;
  354. dma_addr_t dma;
  355. udc = req->ep->udc;
  356. do {
  357. dtd = build_dtd(req, &count, &dma, &is_last);
  358. if (dtd == NULL)
  359. return -ENOMEM;
  360. if (is_first) {
  361. is_first = 0;
  362. req->head = dtd;
  363. } else {
  364. last_dtd->dtd_next = dma;
  365. last_dtd->next_dtd_virt = dtd;
  366. }
  367. last_dtd = dtd;
  368. req->dtd_count++;
  369. } while (!is_last);
  370. /* set terminate bit to 1 for the last dTD */
  371. dtd->dtd_next = DTD_NEXT_TERMINATE;
  372. req->tail = dtd;
  373. return 0;
  374. }
  375. static int mv_ep_enable(struct usb_ep *_ep,
  376. const struct usb_endpoint_descriptor *desc)
  377. {
  378. struct mv_udc *udc;
  379. struct mv_ep *ep;
  380. struct mv_dqh *dqh;
  381. u16 max = 0;
  382. u32 bit_pos, epctrlx, direction;
  383. unsigned char zlt = 0, ios = 0, mult = 0;
  384. unsigned long flags;
  385. ep = container_of(_ep, struct mv_ep, ep);
  386. udc = ep->udc;
  387. if (!_ep || !desc || ep->ep.desc
  388. || desc->bDescriptorType != USB_DT_ENDPOINT)
  389. return -EINVAL;
  390. if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
  391. return -ESHUTDOWN;
  392. direction = ep_dir(ep);
  393. max = usb_endpoint_maxp(desc);
  394. /*
  395. * disable HW zero length termination select
  396. * driver handles zero length packet through req->req.zero
  397. */
  398. zlt = 1;
  399. bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
  400. /* Check if the Endpoint is Primed */
  401. if ((readl(&udc->op_regs->epprime) & bit_pos)
  402. || (readl(&udc->op_regs->epstatus) & bit_pos)) {
  403. dev_info(&udc->dev->dev,
  404. "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
  405. " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
  406. (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
  407. (unsigned)readl(&udc->op_regs->epprime),
  408. (unsigned)readl(&udc->op_regs->epstatus),
  409. (unsigned)bit_pos);
  410. goto en_done;
  411. }
  412. /* Set the max packet length, interrupt on Setup and Mult fields */
  413. switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
  414. case USB_ENDPOINT_XFER_BULK:
  415. zlt = 1;
  416. mult = 0;
  417. break;
  418. case USB_ENDPOINT_XFER_CONTROL:
  419. ios = 1;
  420. case USB_ENDPOINT_XFER_INT:
  421. mult = 0;
  422. break;
  423. case USB_ENDPOINT_XFER_ISOC:
  424. /* Calculate transactions needed for high bandwidth iso */
  425. mult = (unsigned char)(1 + ((max >> 11) & 0x03));
  426. max = max & 0x7ff; /* bit 0~10 */
  427. /* 3 transactions at most */
  428. if (mult > 3)
  429. goto en_done;
  430. break;
  431. default:
  432. goto en_done;
  433. }
  434. spin_lock_irqsave(&udc->lock, flags);
  435. /* Get the endpoint queue head address */
  436. dqh = ep->dqh;
  437. dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  438. | (mult << EP_QUEUE_HEAD_MULT_POS)
  439. | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
  440. | (ios ? EP_QUEUE_HEAD_IOS : 0);
  441. dqh->next_dtd_ptr = 1;
  442. dqh->size_ioc_int_sts = 0;
  443. ep->ep.maxpacket = max;
  444. ep->ep.desc = desc;
  445. ep->stopped = 0;
  446. /* Enable the endpoint for Rx or Tx and set the endpoint type */
  447. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  448. if (direction == EP_DIR_IN) {
  449. epctrlx &= ~EPCTRL_TX_ALL_MASK;
  450. epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
  451. | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  452. << EPCTRL_TX_EP_TYPE_SHIFT);
  453. } else {
  454. epctrlx &= ~EPCTRL_RX_ALL_MASK;
  455. epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
  456. | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  457. << EPCTRL_RX_EP_TYPE_SHIFT);
  458. }
  459. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  460. /*
  461. * Implement Guideline (GL# USB-7) The unused endpoint type must
  462. * be programmed to bulk.
  463. */
  464. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  465. if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
  466. epctrlx |= (USB_ENDPOINT_XFER_BULK
  467. << EPCTRL_RX_EP_TYPE_SHIFT);
  468. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  469. }
  470. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  471. if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
  472. epctrlx |= (USB_ENDPOINT_XFER_BULK
  473. << EPCTRL_TX_EP_TYPE_SHIFT);
  474. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  475. }
  476. spin_unlock_irqrestore(&udc->lock, flags);
  477. return 0;
  478. en_done:
  479. return -EINVAL;
  480. }
  481. static int mv_ep_disable(struct usb_ep *_ep)
  482. {
  483. struct mv_udc *udc;
  484. struct mv_ep *ep;
  485. struct mv_dqh *dqh;
  486. u32 bit_pos, epctrlx, direction;
  487. unsigned long flags;
  488. ep = container_of(_ep, struct mv_ep, ep);
  489. if ((_ep == NULL) || !ep->ep.desc)
  490. return -EINVAL;
  491. udc = ep->udc;
  492. /* Get the endpoint queue head address */
  493. dqh = ep->dqh;
  494. spin_lock_irqsave(&udc->lock, flags);
  495. direction = ep_dir(ep);
  496. bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
  497. /* Reset the max packet length and the interrupt on Setup */
  498. dqh->max_packet_length = 0;
  499. /* Disable the endpoint for Rx or Tx and reset the endpoint type */
  500. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  501. epctrlx &= ~((direction == EP_DIR_IN)
  502. ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
  503. : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
  504. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  505. /* nuke all pending requests (does flush) */
  506. nuke(ep, -ESHUTDOWN);
  507. ep->ep.desc = NULL;
  508. ep->stopped = 1;
  509. spin_unlock_irqrestore(&udc->lock, flags);
  510. return 0;
  511. }
  512. static struct usb_request *
  513. mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  514. {
  515. struct mv_req *req = NULL;
  516. req = kzalloc(sizeof *req, gfp_flags);
  517. if (!req)
  518. return NULL;
  519. req->req.dma = DMA_ADDR_INVALID;
  520. INIT_LIST_HEAD(&req->queue);
  521. return &req->req;
  522. }
  523. static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
  524. {
  525. struct mv_req *req = NULL;
  526. req = container_of(_req, struct mv_req, req);
  527. if (_req)
  528. kfree(req);
  529. }
  530. static void mv_ep_fifo_flush(struct usb_ep *_ep)
  531. {
  532. struct mv_udc *udc;
  533. u32 bit_pos, direction;
  534. struct mv_ep *ep;
  535. unsigned int loops;
  536. if (!_ep)
  537. return;
  538. ep = container_of(_ep, struct mv_ep, ep);
  539. if (!ep->ep.desc)
  540. return;
  541. udc = ep->udc;
  542. direction = ep_dir(ep);
  543. if (ep->ep_num == 0)
  544. bit_pos = (1 << 16) | 1;
  545. else if (direction == EP_DIR_OUT)
  546. bit_pos = 1 << ep->ep_num;
  547. else
  548. bit_pos = 1 << (16 + ep->ep_num);
  549. loops = LOOPS(EPSTATUS_TIMEOUT);
  550. do {
  551. unsigned int inter_loops;
  552. if (loops == 0) {
  553. dev_err(&udc->dev->dev,
  554. "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
  555. (unsigned)readl(&udc->op_regs->epstatus),
  556. (unsigned)bit_pos);
  557. return;
  558. }
  559. /* Write 1 to the Flush register */
  560. writel(bit_pos, &udc->op_regs->epflush);
  561. /* Wait until flushing completed */
  562. inter_loops = LOOPS(FLUSH_TIMEOUT);
  563. while (readl(&udc->op_regs->epflush)) {
  564. /*
  565. * ENDPTFLUSH bit should be cleared to indicate this
  566. * operation is complete
  567. */
  568. if (inter_loops == 0) {
  569. dev_err(&udc->dev->dev,
  570. "TIMEOUT for ENDPTFLUSH=0x%x,"
  571. "bit_pos=0x%x\n",
  572. (unsigned)readl(&udc->op_regs->epflush),
  573. (unsigned)bit_pos);
  574. return;
  575. }
  576. inter_loops--;
  577. udelay(LOOPS_USEC);
  578. }
  579. loops--;
  580. } while (readl(&udc->op_regs->epstatus) & bit_pos);
  581. }
  582. /* queues (submits) an I/O request to an endpoint */
  583. static int
  584. mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
  585. {
  586. struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
  587. struct mv_req *req = container_of(_req, struct mv_req, req);
  588. struct mv_udc *udc = ep->udc;
  589. unsigned long flags;
  590. /* catch various bogus parameters */
  591. if (!_req || !req->req.complete || !req->req.buf
  592. || !list_empty(&req->queue)) {
  593. dev_err(&udc->dev->dev, "%s, bad params", __func__);
  594. return -EINVAL;
  595. }
  596. if (unlikely(!_ep || !ep->ep.desc)) {
  597. dev_err(&udc->dev->dev, "%s, bad ep", __func__);
  598. return -EINVAL;
  599. }
  600. if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  601. if (req->req.length > ep->ep.maxpacket)
  602. return -EMSGSIZE;
  603. }
  604. udc = ep->udc;
  605. if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
  606. return -ESHUTDOWN;
  607. req->ep = ep;
  608. /* map virtual address to hardware */
  609. if (req->req.dma == DMA_ADDR_INVALID) {
  610. req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
  611. req->req.buf,
  612. req->req.length, ep_dir(ep)
  613. ? DMA_TO_DEVICE
  614. : DMA_FROM_DEVICE);
  615. req->mapped = 1;
  616. } else {
  617. dma_sync_single_for_device(ep->udc->gadget.dev.parent,
  618. req->req.dma, req->req.length,
  619. ep_dir(ep)
  620. ? DMA_TO_DEVICE
  621. : DMA_FROM_DEVICE);
  622. req->mapped = 0;
  623. }
  624. req->req.status = -EINPROGRESS;
  625. req->req.actual = 0;
  626. req->dtd_count = 0;
  627. spin_lock_irqsave(&udc->lock, flags);
  628. /* build dtds and push them to device queue */
  629. if (!req_to_dtd(req)) {
  630. int retval;
  631. retval = queue_dtd(ep, req);
  632. if (retval) {
  633. spin_unlock_irqrestore(&udc->lock, flags);
  634. return retval;
  635. }
  636. } else {
  637. spin_unlock_irqrestore(&udc->lock, flags);
  638. return -ENOMEM;
  639. }
  640. /* Update ep0 state */
  641. if (ep->ep_num == 0)
  642. udc->ep0_state = DATA_STATE_XMIT;
  643. /* irq handler advances the queue */
  644. list_add_tail(&req->queue, &ep->queue);
  645. spin_unlock_irqrestore(&udc->lock, flags);
  646. return 0;
  647. }
  648. static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
  649. {
  650. struct mv_dqh *dqh = ep->dqh;
  651. u32 bit_pos;
  652. /* Write dQH next pointer and terminate bit to 0 */
  653. dqh->next_dtd_ptr = req->head->td_dma
  654. & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  655. /* clear active and halt bit, in case set from a previous error */
  656. dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
  657. /* Ensure that updates to the QH will occure before priming. */
  658. wmb();
  659. bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
  660. /* Prime the Endpoint */
  661. writel(bit_pos, &ep->udc->op_regs->epprime);
  662. }
  663. /* dequeues (cancels, unlinks) an I/O request from an endpoint */
  664. static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  665. {
  666. struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
  667. struct mv_req *req;
  668. struct mv_udc *udc = ep->udc;
  669. unsigned long flags;
  670. int stopped, ret = 0;
  671. u32 epctrlx;
  672. if (!_ep || !_req)
  673. return -EINVAL;
  674. spin_lock_irqsave(&ep->udc->lock, flags);
  675. stopped = ep->stopped;
  676. /* Stop the ep before we deal with the queue */
  677. ep->stopped = 1;
  678. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  679. if (ep_dir(ep) == EP_DIR_IN)
  680. epctrlx &= ~EPCTRL_TX_ENABLE;
  681. else
  682. epctrlx &= ~EPCTRL_RX_ENABLE;
  683. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  684. /* make sure it's actually queued on this endpoint */
  685. list_for_each_entry(req, &ep->queue, queue) {
  686. if (&req->req == _req)
  687. break;
  688. }
  689. if (&req->req != _req) {
  690. ret = -EINVAL;
  691. goto out;
  692. }
  693. /* The request is in progress, or completed but not dequeued */
  694. if (ep->queue.next == &req->queue) {
  695. _req->status = -ECONNRESET;
  696. mv_ep_fifo_flush(_ep); /* flush current transfer */
  697. /* The request isn't the last request in this ep queue */
  698. if (req->queue.next != &ep->queue) {
  699. struct mv_req *next_req;
  700. next_req = list_entry(req->queue.next,
  701. struct mv_req, queue);
  702. /* Point the QH to the first TD of next request */
  703. mv_prime_ep(ep, next_req);
  704. } else {
  705. struct mv_dqh *qh;
  706. qh = ep->dqh;
  707. qh->next_dtd_ptr = 1;
  708. qh->size_ioc_int_sts = 0;
  709. }
  710. /* The request hasn't been processed, patch up the TD chain */
  711. } else {
  712. struct mv_req *prev_req;
  713. prev_req = list_entry(req->queue.prev, struct mv_req, queue);
  714. writel(readl(&req->tail->dtd_next),
  715. &prev_req->tail->dtd_next);
  716. }
  717. done(ep, req, -ECONNRESET);
  718. /* Enable EP */
  719. out:
  720. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  721. if (ep_dir(ep) == EP_DIR_IN)
  722. epctrlx |= EPCTRL_TX_ENABLE;
  723. else
  724. epctrlx |= EPCTRL_RX_ENABLE;
  725. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  726. ep->stopped = stopped;
  727. spin_unlock_irqrestore(&ep->udc->lock, flags);
  728. return ret;
  729. }
  730. static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
  731. {
  732. u32 epctrlx;
  733. epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
  734. if (stall) {
  735. if (direction == EP_DIR_IN)
  736. epctrlx |= EPCTRL_TX_EP_STALL;
  737. else
  738. epctrlx |= EPCTRL_RX_EP_STALL;
  739. } else {
  740. if (direction == EP_DIR_IN) {
  741. epctrlx &= ~EPCTRL_TX_EP_STALL;
  742. epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
  743. } else {
  744. epctrlx &= ~EPCTRL_RX_EP_STALL;
  745. epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
  746. }
  747. }
  748. writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
  749. }
  750. static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
  751. {
  752. u32 epctrlx;
  753. epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
  754. if (direction == EP_DIR_OUT)
  755. return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
  756. else
  757. return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
  758. }
  759. static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  760. {
  761. struct mv_ep *ep;
  762. unsigned long flags = 0;
  763. int status = 0;
  764. struct mv_udc *udc;
  765. ep = container_of(_ep, struct mv_ep, ep);
  766. udc = ep->udc;
  767. if (!_ep || !ep->ep.desc) {
  768. status = -EINVAL;
  769. goto out;
  770. }
  771. if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  772. status = -EOPNOTSUPP;
  773. goto out;
  774. }
  775. /*
  776. * Attempt to halt IN ep will fail if any transfer requests
  777. * are still queue
  778. */
  779. if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
  780. status = -EAGAIN;
  781. goto out;
  782. }
  783. spin_lock_irqsave(&ep->udc->lock, flags);
  784. ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
  785. if (halt && wedge)
  786. ep->wedge = 1;
  787. else if (!halt)
  788. ep->wedge = 0;
  789. spin_unlock_irqrestore(&ep->udc->lock, flags);
  790. if (ep->ep_num == 0) {
  791. udc->ep0_state = WAIT_FOR_SETUP;
  792. udc->ep0_dir = EP_DIR_OUT;
  793. }
  794. out:
  795. return status;
  796. }
  797. static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
  798. {
  799. return mv_ep_set_halt_wedge(_ep, halt, 0);
  800. }
  801. static int mv_ep_set_wedge(struct usb_ep *_ep)
  802. {
  803. return mv_ep_set_halt_wedge(_ep, 1, 1);
  804. }
  805. static struct usb_ep_ops mv_ep_ops = {
  806. .enable = mv_ep_enable,
  807. .disable = mv_ep_disable,
  808. .alloc_request = mv_alloc_request,
  809. .free_request = mv_free_request,
  810. .queue = mv_ep_queue,
  811. .dequeue = mv_ep_dequeue,
  812. .set_wedge = mv_ep_set_wedge,
  813. .set_halt = mv_ep_set_halt,
  814. .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
  815. };
  816. static void udc_clock_enable(struct mv_udc *udc)
  817. {
  818. unsigned int i;
  819. for (i = 0; i < udc->clknum; i++)
  820. clk_enable(udc->clk[i]);
  821. }
  822. static void udc_clock_disable(struct mv_udc *udc)
  823. {
  824. unsigned int i;
  825. for (i = 0; i < udc->clknum; i++)
  826. clk_disable(udc->clk[i]);
  827. }
  828. static void udc_stop(struct mv_udc *udc)
  829. {
  830. u32 tmp;
  831. /* Disable interrupts */
  832. tmp = readl(&udc->op_regs->usbintr);
  833. tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
  834. USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
  835. writel(tmp, &udc->op_regs->usbintr);
  836. udc->stopped = 1;
  837. /* Reset the Run the bit in the command register to stop VUSB */
  838. tmp = readl(&udc->op_regs->usbcmd);
  839. tmp &= ~USBCMD_RUN_STOP;
  840. writel(tmp, &udc->op_regs->usbcmd);
  841. }
  842. static void udc_start(struct mv_udc *udc)
  843. {
  844. u32 usbintr;
  845. usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
  846. | USBINTR_PORT_CHANGE_DETECT_EN
  847. | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
  848. /* Enable interrupts */
  849. writel(usbintr, &udc->op_regs->usbintr);
  850. udc->stopped = 0;
  851. /* Set the Run bit in the command register */
  852. writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
  853. }
  854. static int udc_reset(struct mv_udc *udc)
  855. {
  856. unsigned int loops;
  857. u32 tmp, portsc;
  858. /* Stop the controller */
  859. tmp = readl(&udc->op_regs->usbcmd);
  860. tmp &= ~USBCMD_RUN_STOP;
  861. writel(tmp, &udc->op_regs->usbcmd);
  862. /* Reset the controller to get default values */
  863. writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
  864. /* wait for reset to complete */
  865. loops = LOOPS(RESET_TIMEOUT);
  866. while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
  867. if (loops == 0) {
  868. dev_err(&udc->dev->dev,
  869. "Wait for RESET completed TIMEOUT\n");
  870. return -ETIMEDOUT;
  871. }
  872. loops--;
  873. udelay(LOOPS_USEC);
  874. }
  875. /* set controller to device mode */
  876. tmp = readl(&udc->op_regs->usbmode);
  877. tmp |= USBMODE_CTRL_MODE_DEVICE;
  878. /* turn setup lockout off, require setup tripwire in usbcmd */
  879. tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
  880. writel(tmp, &udc->op_regs->usbmode);
  881. writel(0x0, &udc->op_regs->epsetupstat);
  882. /* Configure the Endpoint List Address */
  883. writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
  884. &udc->op_regs->eplistaddr);
  885. portsc = readl(&udc->op_regs->portsc[0]);
  886. if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
  887. portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
  888. if (udc->force_fs)
  889. portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
  890. else
  891. portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
  892. writel(portsc, &udc->op_regs->portsc[0]);
  893. tmp = readl(&udc->op_regs->epctrlx[0]);
  894. tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
  895. writel(tmp, &udc->op_regs->epctrlx[0]);
  896. return 0;
  897. }
  898. static int mv_udc_enable_internal(struct mv_udc *udc)
  899. {
  900. int retval;
  901. if (udc->active)
  902. return 0;
  903. dev_dbg(&udc->dev->dev, "enable udc\n");
  904. udc_clock_enable(udc);
  905. if (udc->pdata->phy_init) {
  906. retval = udc->pdata->phy_init(udc->phy_regs);
  907. if (retval) {
  908. dev_err(&udc->dev->dev,
  909. "init phy error %d\n", retval);
  910. udc_clock_disable(udc);
  911. return retval;
  912. }
  913. }
  914. udc->active = 1;
  915. return 0;
  916. }
  917. static int mv_udc_enable(struct mv_udc *udc)
  918. {
  919. if (udc->clock_gating)
  920. return mv_udc_enable_internal(udc);
  921. return 0;
  922. }
  923. static void mv_udc_disable_internal(struct mv_udc *udc)
  924. {
  925. if (udc->active) {
  926. dev_dbg(&udc->dev->dev, "disable udc\n");
  927. if (udc->pdata->phy_deinit)
  928. udc->pdata->phy_deinit(udc->phy_regs);
  929. udc_clock_disable(udc);
  930. udc->active = 0;
  931. }
  932. }
  933. static void mv_udc_disable(struct mv_udc *udc)
  934. {
  935. if (udc->clock_gating)
  936. mv_udc_disable_internal(udc);
  937. }
  938. static int mv_udc_get_frame(struct usb_gadget *gadget)
  939. {
  940. struct mv_udc *udc;
  941. u16 retval;
  942. if (!gadget)
  943. return -ENODEV;
  944. udc = container_of(gadget, struct mv_udc, gadget);
  945. retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
  946. return retval;
  947. }
  948. /* Tries to wake up the host connected to this gadget */
  949. static int mv_udc_wakeup(struct usb_gadget *gadget)
  950. {
  951. struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
  952. u32 portsc;
  953. /* Remote wakeup feature not enabled by host */
  954. if (!udc->remote_wakeup)
  955. return -ENOTSUPP;
  956. portsc = readl(&udc->op_regs->portsc);
  957. /* not suspended? */
  958. if (!(portsc & PORTSCX_PORT_SUSPEND))
  959. return 0;
  960. /* trigger force resume */
  961. portsc |= PORTSCX_PORT_FORCE_RESUME;
  962. writel(portsc, &udc->op_regs->portsc[0]);
  963. return 0;
  964. }
  965. static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
  966. {
  967. struct mv_udc *udc;
  968. unsigned long flags;
  969. int retval = 0;
  970. udc = container_of(gadget, struct mv_udc, gadget);
  971. spin_lock_irqsave(&udc->lock, flags);
  972. udc->vbus_active = (is_active != 0);
  973. dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
  974. __func__, udc->softconnect, udc->vbus_active);
  975. if (udc->driver && udc->softconnect && udc->vbus_active) {
  976. retval = mv_udc_enable(udc);
  977. if (retval == 0) {
  978. /* Clock is disabled, need re-init registers */
  979. udc_reset(udc);
  980. ep0_reset(udc);
  981. udc_start(udc);
  982. }
  983. } else if (udc->driver && udc->softconnect) {
  984. /* stop all the transfer in queue*/
  985. stop_activity(udc, udc->driver);
  986. udc_stop(udc);
  987. mv_udc_disable(udc);
  988. }
  989. spin_unlock_irqrestore(&udc->lock, flags);
  990. return retval;
  991. }
  992. static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
  993. {
  994. struct mv_udc *udc;
  995. unsigned long flags;
  996. int retval = 0;
  997. udc = container_of(gadget, struct mv_udc, gadget);
  998. spin_lock_irqsave(&udc->lock, flags);
  999. udc->softconnect = (is_on != 0);
  1000. dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
  1001. __func__, udc->softconnect, udc->vbus_active);
  1002. if (udc->driver && udc->softconnect && udc->vbus_active) {
  1003. retval = mv_udc_enable(udc);
  1004. if (retval == 0) {
  1005. /* Clock is disabled, need re-init registers */
  1006. udc_reset(udc);
  1007. ep0_reset(udc);
  1008. udc_start(udc);
  1009. }
  1010. } else if (udc->driver && udc->vbus_active) {
  1011. /* stop all the transfer in queue*/
  1012. stop_activity(udc, udc->driver);
  1013. udc_stop(udc);
  1014. mv_udc_disable(udc);
  1015. }
  1016. spin_unlock_irqrestore(&udc->lock, flags);
  1017. return retval;
  1018. }
  1019. static int mv_udc_start(struct usb_gadget_driver *driver,
  1020. int (*bind)(struct usb_gadget *));
  1021. static int mv_udc_stop(struct usb_gadget_driver *driver);
  1022. /* device controller usb_gadget_ops structure */
  1023. static const struct usb_gadget_ops mv_ops = {
  1024. /* returns the current frame number */
  1025. .get_frame = mv_udc_get_frame,
  1026. /* tries to wake up the host connected to this gadget */
  1027. .wakeup = mv_udc_wakeup,
  1028. /* notify controller that VBUS is powered or not */
  1029. .vbus_session = mv_udc_vbus_session,
  1030. /* D+ pullup, software-controlled connect/disconnect to USB host */
  1031. .pullup = mv_udc_pullup,
  1032. .start = mv_udc_start,
  1033. .stop = mv_udc_stop,
  1034. };
  1035. static int eps_init(struct mv_udc *udc)
  1036. {
  1037. struct mv_ep *ep;
  1038. char name[14];
  1039. int i;
  1040. /* initialize ep0 */
  1041. ep = &udc->eps[0];
  1042. ep->udc = udc;
  1043. strncpy(ep->name, "ep0", sizeof(ep->name));
  1044. ep->ep.name = ep->name;
  1045. ep->ep.ops = &mv_ep_ops;
  1046. ep->wedge = 0;
  1047. ep->stopped = 0;
  1048. ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
  1049. ep->ep_num = 0;
  1050. ep->ep.desc = &mv_ep0_desc;
  1051. INIT_LIST_HEAD(&ep->queue);
  1052. ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
  1053. /* initialize other endpoints */
  1054. for (i = 2; i < udc->max_eps * 2; i++) {
  1055. ep = &udc->eps[i];
  1056. if (i % 2) {
  1057. snprintf(name, sizeof(name), "ep%din", i / 2);
  1058. ep->direction = EP_DIR_IN;
  1059. } else {
  1060. snprintf(name, sizeof(name), "ep%dout", i / 2);
  1061. ep->direction = EP_DIR_OUT;
  1062. }
  1063. ep->udc = udc;
  1064. strncpy(ep->name, name, sizeof(ep->name));
  1065. ep->ep.name = ep->name;
  1066. ep->ep.ops = &mv_ep_ops;
  1067. ep->stopped = 0;
  1068. ep->ep.maxpacket = (unsigned short) ~0;
  1069. ep->ep_num = i / 2;
  1070. INIT_LIST_HEAD(&ep->queue);
  1071. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1072. ep->dqh = &udc->ep_dqh[i];
  1073. }
  1074. return 0;
  1075. }
  1076. /* delete all endpoint requests, called with spinlock held */
  1077. static void nuke(struct mv_ep *ep, int status)
  1078. {
  1079. /* called with spinlock held */
  1080. ep->stopped = 1;
  1081. /* endpoint fifo flush */
  1082. mv_ep_fifo_flush(&ep->ep);
  1083. while (!list_empty(&ep->queue)) {
  1084. struct mv_req *req = NULL;
  1085. req = list_entry(ep->queue.next, struct mv_req, queue);
  1086. done(ep, req, status);
  1087. }
  1088. }
  1089. /* stop all USB activities */
  1090. static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
  1091. {
  1092. struct mv_ep *ep;
  1093. nuke(&udc->eps[0], -ESHUTDOWN);
  1094. list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
  1095. nuke(ep, -ESHUTDOWN);
  1096. }
  1097. /* report disconnect; the driver is already quiesced */
  1098. if (driver) {
  1099. spin_unlock(&udc->lock);
  1100. driver->disconnect(&udc->gadget);
  1101. spin_lock(&udc->lock);
  1102. }
  1103. }
  1104. static int mv_udc_start(struct usb_gadget_driver *driver,
  1105. int (*bind)(struct usb_gadget *))
  1106. {
  1107. struct mv_udc *udc = the_controller;
  1108. int retval = 0;
  1109. unsigned long flags;
  1110. if (!udc)
  1111. return -ENODEV;
  1112. if (udc->driver)
  1113. return -EBUSY;
  1114. spin_lock_irqsave(&udc->lock, flags);
  1115. /* hook up the driver ... */
  1116. driver->driver.bus = NULL;
  1117. udc->driver = driver;
  1118. udc->gadget.dev.driver = &driver->driver;
  1119. udc->usb_state = USB_STATE_ATTACHED;
  1120. udc->ep0_state = WAIT_FOR_SETUP;
  1121. udc->ep0_dir = EP_DIR_OUT;
  1122. spin_unlock_irqrestore(&udc->lock, flags);
  1123. retval = bind(&udc->gadget);
  1124. if (retval) {
  1125. dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
  1126. driver->driver.name, retval);
  1127. udc->driver = NULL;
  1128. udc->gadget.dev.driver = NULL;
  1129. return retval;
  1130. }
  1131. if (udc->transceiver) {
  1132. retval = otg_set_peripheral(udc->transceiver->otg,
  1133. &udc->gadget);
  1134. if (retval) {
  1135. dev_err(&udc->dev->dev,
  1136. "unable to register peripheral to otg\n");
  1137. if (driver->unbind) {
  1138. driver->unbind(&udc->gadget);
  1139. udc->gadget.dev.driver = NULL;
  1140. udc->driver = NULL;
  1141. }
  1142. return retval;
  1143. }
  1144. }
  1145. /* pullup is always on */
  1146. mv_udc_pullup(&udc->gadget, 1);
  1147. /* When boot with cable attached, there will be no vbus irq occurred */
  1148. if (udc->qwork)
  1149. queue_work(udc->qwork, &udc->vbus_work);
  1150. return 0;
  1151. }
  1152. static int mv_udc_stop(struct usb_gadget_driver *driver)
  1153. {
  1154. struct mv_udc *udc = the_controller;
  1155. unsigned long flags;
  1156. if (!udc)
  1157. return -ENODEV;
  1158. spin_lock_irqsave(&udc->lock, flags);
  1159. mv_udc_enable(udc);
  1160. udc_stop(udc);
  1161. /* stop all usb activities */
  1162. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1163. stop_activity(udc, driver);
  1164. mv_udc_disable(udc);
  1165. spin_unlock_irqrestore(&udc->lock, flags);
  1166. /* unbind gadget driver */
  1167. driver->unbind(&udc->gadget);
  1168. udc->gadget.dev.driver = NULL;
  1169. udc->driver = NULL;
  1170. return 0;
  1171. }
  1172. static void mv_set_ptc(struct mv_udc *udc, u32 mode)
  1173. {
  1174. u32 portsc;
  1175. portsc = readl(&udc->op_regs->portsc[0]);
  1176. portsc |= mode << 16;
  1177. writel(portsc, &udc->op_regs->portsc[0]);
  1178. }
  1179. static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
  1180. {
  1181. struct mv_udc *udc = the_controller;
  1182. struct mv_req *req = container_of(_req, struct mv_req, req);
  1183. unsigned long flags;
  1184. dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
  1185. spin_lock_irqsave(&udc->lock, flags);
  1186. if (req->test_mode) {
  1187. mv_set_ptc(udc, req->test_mode);
  1188. req->test_mode = 0;
  1189. }
  1190. spin_unlock_irqrestore(&udc->lock, flags);
  1191. }
  1192. static int
  1193. udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
  1194. {
  1195. int retval = 0;
  1196. struct mv_req *req;
  1197. struct mv_ep *ep;
  1198. ep = &udc->eps[0];
  1199. udc->ep0_dir = direction;
  1200. udc->ep0_state = WAIT_FOR_OUT_STATUS;
  1201. req = udc->status_req;
  1202. /* fill in the reqest structure */
  1203. if (empty == false) {
  1204. *((u16 *) req->req.buf) = cpu_to_le16(status);
  1205. req->req.length = 2;
  1206. } else
  1207. req->req.length = 0;
  1208. req->ep = ep;
  1209. req->req.status = -EINPROGRESS;
  1210. req->req.actual = 0;
  1211. if (udc->test_mode) {
  1212. req->req.complete = prime_status_complete;
  1213. req->test_mode = udc->test_mode;
  1214. udc->test_mode = 0;
  1215. } else
  1216. req->req.complete = NULL;
  1217. req->dtd_count = 0;
  1218. if (req->req.dma == DMA_ADDR_INVALID) {
  1219. req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
  1220. req->req.buf, req->req.length,
  1221. ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1222. req->mapped = 1;
  1223. }
  1224. /* prime the data phase */
  1225. if (!req_to_dtd(req))
  1226. retval = queue_dtd(ep, req);
  1227. else{ /* no mem */
  1228. retval = -ENOMEM;
  1229. goto out;
  1230. }
  1231. if (retval) {
  1232. dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
  1233. goto out;
  1234. }
  1235. list_add_tail(&req->queue, &ep->queue);
  1236. return 0;
  1237. out:
  1238. return retval;
  1239. }
  1240. static void mv_udc_testmode(struct mv_udc *udc, u16 index)
  1241. {
  1242. if (index <= TEST_FORCE_EN) {
  1243. udc->test_mode = index;
  1244. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1245. ep0_stall(udc);
  1246. } else
  1247. dev_err(&udc->dev->dev,
  1248. "This test mode(%d) is not supported\n", index);
  1249. }
  1250. static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1251. {
  1252. udc->dev_addr = (u8)setup->wValue;
  1253. /* update usb state */
  1254. udc->usb_state = USB_STATE_ADDRESS;
  1255. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1256. ep0_stall(udc);
  1257. }
  1258. static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
  1259. struct usb_ctrlrequest *setup)
  1260. {
  1261. u16 status = 0;
  1262. int retval;
  1263. if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
  1264. != (USB_DIR_IN | USB_TYPE_STANDARD))
  1265. return;
  1266. if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
  1267. status = 1 << USB_DEVICE_SELF_POWERED;
  1268. status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
  1269. } else if ((setup->bRequestType & USB_RECIP_MASK)
  1270. == USB_RECIP_INTERFACE) {
  1271. /* get interface status */
  1272. status = 0;
  1273. } else if ((setup->bRequestType & USB_RECIP_MASK)
  1274. == USB_RECIP_ENDPOINT) {
  1275. u8 ep_num, direction;
  1276. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1277. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1278. ? EP_DIR_IN : EP_DIR_OUT;
  1279. status = ep_is_stall(udc, ep_num, direction)
  1280. << USB_ENDPOINT_HALT;
  1281. }
  1282. retval = udc_prime_status(udc, EP_DIR_IN, status, false);
  1283. if (retval)
  1284. ep0_stall(udc);
  1285. else
  1286. udc->ep0_state = DATA_STATE_XMIT;
  1287. }
  1288. static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1289. {
  1290. u8 ep_num;
  1291. u8 direction;
  1292. struct mv_ep *ep;
  1293. if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1294. == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
  1295. switch (setup->wValue) {
  1296. case USB_DEVICE_REMOTE_WAKEUP:
  1297. udc->remote_wakeup = 0;
  1298. break;
  1299. default:
  1300. goto out;
  1301. }
  1302. } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1303. == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
  1304. switch (setup->wValue) {
  1305. case USB_ENDPOINT_HALT:
  1306. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1307. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1308. ? EP_DIR_IN : EP_DIR_OUT;
  1309. if (setup->wValue != 0 || setup->wLength != 0
  1310. || ep_num > udc->max_eps)
  1311. goto out;
  1312. ep = &udc->eps[ep_num * 2 + direction];
  1313. if (ep->wedge == 1)
  1314. break;
  1315. spin_unlock(&udc->lock);
  1316. ep_set_stall(udc, ep_num, direction, 0);
  1317. spin_lock(&udc->lock);
  1318. break;
  1319. default:
  1320. goto out;
  1321. }
  1322. } else
  1323. goto out;
  1324. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1325. ep0_stall(udc);
  1326. out:
  1327. return;
  1328. }
  1329. static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1330. {
  1331. u8 ep_num;
  1332. u8 direction;
  1333. if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1334. == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
  1335. switch (setup->wValue) {
  1336. case USB_DEVICE_REMOTE_WAKEUP:
  1337. udc->remote_wakeup = 1;
  1338. break;
  1339. case USB_DEVICE_TEST_MODE:
  1340. if (setup->wIndex & 0xFF
  1341. || udc->gadget.speed != USB_SPEED_HIGH)
  1342. ep0_stall(udc);
  1343. if (udc->usb_state != USB_STATE_CONFIGURED
  1344. && udc->usb_state != USB_STATE_ADDRESS
  1345. && udc->usb_state != USB_STATE_DEFAULT)
  1346. ep0_stall(udc);
  1347. mv_udc_testmode(udc, (setup->wIndex >> 8));
  1348. goto out;
  1349. default:
  1350. goto out;
  1351. }
  1352. } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1353. == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
  1354. switch (setup->wValue) {
  1355. case USB_ENDPOINT_HALT:
  1356. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1357. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1358. ? EP_DIR_IN : EP_DIR_OUT;
  1359. if (setup->wValue != 0 || setup->wLength != 0
  1360. || ep_num > udc->max_eps)
  1361. goto out;
  1362. spin_unlock(&udc->lock);
  1363. ep_set_stall(udc, ep_num, direction, 1);
  1364. spin_lock(&udc->lock);
  1365. break;
  1366. default:
  1367. goto out;
  1368. }
  1369. } else
  1370. goto out;
  1371. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1372. ep0_stall(udc);
  1373. out:
  1374. return;
  1375. }
  1376. static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
  1377. struct usb_ctrlrequest *setup)
  1378. {
  1379. bool delegate = false;
  1380. nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
  1381. dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
  1382. setup->bRequestType, setup->bRequest,
  1383. setup->wValue, setup->wIndex, setup->wLength);
  1384. /* We process some stardard setup requests here */
  1385. if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1386. switch (setup->bRequest) {
  1387. case USB_REQ_GET_STATUS:
  1388. ch9getstatus(udc, ep_num, setup);
  1389. break;
  1390. case USB_REQ_SET_ADDRESS:
  1391. ch9setaddress(udc, setup);
  1392. break;
  1393. case USB_REQ_CLEAR_FEATURE:
  1394. ch9clearfeature(udc, setup);
  1395. break;
  1396. case USB_REQ_SET_FEATURE:
  1397. ch9setfeature(udc, setup);
  1398. break;
  1399. default:
  1400. delegate = true;
  1401. }
  1402. } else
  1403. delegate = true;
  1404. /* delegate USB standard requests to the gadget driver */
  1405. if (delegate == true) {
  1406. /* USB requests handled by gadget */
  1407. if (setup->wLength) {
  1408. /* DATA phase from gadget, STATUS phase from udc */
  1409. udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
  1410. ? EP_DIR_IN : EP_DIR_OUT;
  1411. spin_unlock(&udc->lock);
  1412. if (udc->driver->setup(&udc->gadget,
  1413. &udc->local_setup_buff) < 0)
  1414. ep0_stall(udc);
  1415. spin_lock(&udc->lock);
  1416. udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
  1417. ? DATA_STATE_XMIT : DATA_STATE_RECV;
  1418. } else {
  1419. /* no DATA phase, IN STATUS phase from gadget */
  1420. udc->ep0_dir = EP_DIR_IN;
  1421. spin_unlock(&udc->lock);
  1422. if (udc->driver->setup(&udc->gadget,
  1423. &udc->local_setup_buff) < 0)
  1424. ep0_stall(udc);
  1425. spin_lock(&udc->lock);
  1426. udc->ep0_state = WAIT_FOR_OUT_STATUS;
  1427. }
  1428. }
  1429. }
  1430. /* complete DATA or STATUS phase of ep0 prime status phase if needed */
  1431. static void ep0_req_complete(struct mv_udc *udc,
  1432. struct mv_ep *ep0, struct mv_req *req)
  1433. {
  1434. u32 new_addr;
  1435. if (udc->usb_state == USB_STATE_ADDRESS) {
  1436. /* set the new address */
  1437. new_addr = (u32)udc->dev_addr;
  1438. writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
  1439. &udc->op_regs->deviceaddr);
  1440. }
  1441. done(ep0, req, 0);
  1442. switch (udc->ep0_state) {
  1443. case DATA_STATE_XMIT:
  1444. /* receive status phase */
  1445. if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
  1446. ep0_stall(udc);
  1447. break;
  1448. case DATA_STATE_RECV:
  1449. /* send status phase */
  1450. if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
  1451. ep0_stall(udc);
  1452. break;
  1453. case WAIT_FOR_OUT_STATUS:
  1454. udc->ep0_state = WAIT_FOR_SETUP;
  1455. break;
  1456. case WAIT_FOR_SETUP:
  1457. dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
  1458. break;
  1459. default:
  1460. ep0_stall(udc);
  1461. break;
  1462. }
  1463. }
  1464. static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
  1465. {
  1466. u32 temp;
  1467. struct mv_dqh *dqh;
  1468. dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
  1469. /* Clear bit in ENDPTSETUPSTAT */
  1470. writel((1 << ep_num), &udc->op_regs->epsetupstat);
  1471. /* while a hazard exists when setup package arrives */
  1472. do {
  1473. /* Set Setup Tripwire */
  1474. temp = readl(&udc->op_regs->usbcmd);
  1475. writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
  1476. /* Copy the setup packet to local buffer */
  1477. memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
  1478. } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
  1479. /* Clear Setup Tripwire */
  1480. temp = readl(&udc->op_regs->usbcmd);
  1481. writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
  1482. }
  1483. static void irq_process_tr_complete(struct mv_udc *udc)
  1484. {
  1485. u32 tmp, bit_pos;
  1486. int i, ep_num = 0, direction = 0;
  1487. struct mv_ep *curr_ep;
  1488. struct mv_req *curr_req, *temp_req;
  1489. int status;
  1490. /*
  1491. * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
  1492. * because the setup packets are to be read ASAP
  1493. */
  1494. /* Process all Setup packet received interrupts */
  1495. tmp = readl(&udc->op_regs->epsetupstat);
  1496. if (tmp) {
  1497. for (i = 0; i < udc->max_eps; i++) {
  1498. if (tmp & (1 << i)) {
  1499. get_setup_data(udc, i,
  1500. (u8 *)(&udc->local_setup_buff));
  1501. handle_setup_packet(udc, i,
  1502. &udc->local_setup_buff);
  1503. }
  1504. }
  1505. }
  1506. /* Don't clear the endpoint setup status register here.
  1507. * It is cleared as a setup packet is read out of the buffer
  1508. */
  1509. /* Process non-setup transaction complete interrupts */
  1510. tmp = readl(&udc->op_regs->epcomplete);
  1511. if (!tmp)
  1512. return;
  1513. writel(tmp, &udc->op_regs->epcomplete);
  1514. for (i = 0; i < udc->max_eps * 2; i++) {
  1515. ep_num = i >> 1;
  1516. direction = i % 2;
  1517. bit_pos = 1 << (ep_num + 16 * direction);
  1518. if (!(bit_pos & tmp))
  1519. continue;
  1520. if (i == 1)
  1521. curr_ep = &udc->eps[0];
  1522. else
  1523. curr_ep = &udc->eps[i];
  1524. /* process the req queue until an uncomplete request */
  1525. list_for_each_entry_safe(curr_req, temp_req,
  1526. &curr_ep->queue, queue) {
  1527. status = process_ep_req(udc, i, curr_req);
  1528. if (status)
  1529. break;
  1530. /* write back status to req */
  1531. curr_req->req.status = status;
  1532. /* ep0 request completion */
  1533. if (ep_num == 0) {
  1534. ep0_req_complete(udc, curr_ep, curr_req);
  1535. break;
  1536. } else {
  1537. done(curr_ep, curr_req, status);
  1538. }
  1539. }
  1540. }
  1541. }
  1542. void irq_process_reset(struct mv_udc *udc)
  1543. {
  1544. u32 tmp;
  1545. unsigned int loops;
  1546. udc->ep0_dir = EP_DIR_OUT;
  1547. udc->ep0_state = WAIT_FOR_SETUP;
  1548. udc->remote_wakeup = 0; /* default to 0 on reset */
  1549. /* The address bits are past bit 25-31. Set the address */
  1550. tmp = readl(&udc->op_regs->deviceaddr);
  1551. tmp &= ~(USB_DEVICE_ADDRESS_MASK);
  1552. writel(tmp, &udc->op_regs->deviceaddr);
  1553. /* Clear all the setup token semaphores */
  1554. tmp = readl(&udc->op_regs->epsetupstat);
  1555. writel(tmp, &udc->op_regs->epsetupstat);
  1556. /* Clear all the endpoint complete status bits */
  1557. tmp = readl(&udc->op_regs->epcomplete);
  1558. writel(tmp, &udc->op_regs->epcomplete);
  1559. /* wait until all endptprime bits cleared */
  1560. loops = LOOPS(PRIME_TIMEOUT);
  1561. while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
  1562. if (loops == 0) {
  1563. dev_err(&udc->dev->dev,
  1564. "Timeout for ENDPTPRIME = 0x%x\n",
  1565. readl(&udc->op_regs->epprime));
  1566. break;
  1567. }
  1568. loops--;
  1569. udelay(LOOPS_USEC);
  1570. }
  1571. /* Write 1s to the Flush register */
  1572. writel((u32)~0, &udc->op_regs->epflush);
  1573. if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
  1574. dev_info(&udc->dev->dev, "usb bus reset\n");
  1575. udc->usb_state = USB_STATE_DEFAULT;
  1576. /* reset all the queues, stop all USB activities */
  1577. stop_activity(udc, udc->driver);
  1578. } else {
  1579. dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
  1580. readl(&udc->op_regs->portsc));
  1581. /*
  1582. * re-initialize
  1583. * controller reset
  1584. */
  1585. udc_reset(udc);
  1586. /* reset all the queues, stop all USB activities */
  1587. stop_activity(udc, udc->driver);
  1588. /* reset ep0 dQH and endptctrl */
  1589. ep0_reset(udc);
  1590. /* enable interrupt and set controller to run state */
  1591. udc_start(udc);
  1592. udc->usb_state = USB_STATE_ATTACHED;
  1593. }
  1594. }
  1595. static void handle_bus_resume(struct mv_udc *udc)
  1596. {
  1597. udc->usb_state = udc->resume_state;
  1598. udc->resume_state = 0;
  1599. /* report resume to the driver */
  1600. if (udc->driver) {
  1601. if (udc->driver->resume) {
  1602. spin_unlock(&udc->lock);
  1603. udc->driver->resume(&udc->gadget);
  1604. spin_lock(&udc->lock);
  1605. }
  1606. }
  1607. }
  1608. static void irq_process_suspend(struct mv_udc *udc)
  1609. {
  1610. udc->resume_state = udc->usb_state;
  1611. udc->usb_state = USB_STATE_SUSPENDED;
  1612. if (udc->driver->suspend) {
  1613. spin_unlock(&udc->lock);
  1614. udc->driver->suspend(&udc->gadget);
  1615. spin_lock(&udc->lock);
  1616. }
  1617. }
  1618. static void irq_process_port_change(struct mv_udc *udc)
  1619. {
  1620. u32 portsc;
  1621. portsc = readl(&udc->op_regs->portsc[0]);
  1622. if (!(portsc & PORTSCX_PORT_RESET)) {
  1623. /* Get the speed */
  1624. u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
  1625. switch (speed) {
  1626. case PORTSCX_PORT_SPEED_HIGH:
  1627. udc->gadget.speed = USB_SPEED_HIGH;
  1628. break;
  1629. case PORTSCX_PORT_SPEED_FULL:
  1630. udc->gadget.speed = USB_SPEED_FULL;
  1631. break;
  1632. case PORTSCX_PORT_SPEED_LOW:
  1633. udc->gadget.speed = USB_SPEED_LOW;
  1634. break;
  1635. default:
  1636. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1637. break;
  1638. }
  1639. }
  1640. if (portsc & PORTSCX_PORT_SUSPEND) {
  1641. udc->resume_state = udc->usb_state;
  1642. udc->usb_state = USB_STATE_SUSPENDED;
  1643. if (udc->driver->suspend) {
  1644. spin_unlock(&udc->lock);
  1645. udc->driver->suspend(&udc->gadget);
  1646. spin_lock(&udc->lock);
  1647. }
  1648. }
  1649. if (!(portsc & PORTSCX_PORT_SUSPEND)
  1650. && udc->usb_state == USB_STATE_SUSPENDED) {
  1651. handle_bus_resume(udc);
  1652. }
  1653. if (!udc->resume_state)
  1654. udc->usb_state = USB_STATE_DEFAULT;
  1655. }
  1656. static void irq_process_error(struct mv_udc *udc)
  1657. {
  1658. /* Increment the error count */
  1659. udc->errors++;
  1660. }
  1661. static irqreturn_t mv_udc_irq(int irq, void *dev)
  1662. {
  1663. struct mv_udc *udc = (struct mv_udc *)dev;
  1664. u32 status, intr;
  1665. /* Disable ISR when stopped bit is set */
  1666. if (udc->stopped)
  1667. return IRQ_NONE;
  1668. spin_lock(&udc->lock);
  1669. status = readl(&udc->op_regs->usbsts);
  1670. intr = readl(&udc->op_regs->usbintr);
  1671. status &= intr;
  1672. if (status == 0) {
  1673. spin_unlock(&udc->lock);
  1674. return IRQ_NONE;
  1675. }
  1676. /* Clear all the interrupts occurred */
  1677. writel(status, &udc->op_regs->usbsts);
  1678. if (status & USBSTS_ERR)
  1679. irq_process_error(udc);
  1680. if (status & USBSTS_RESET)
  1681. irq_process_reset(udc);
  1682. if (status & USBSTS_PORT_CHANGE)
  1683. irq_process_port_change(udc);
  1684. if (status & USBSTS_INT)
  1685. irq_process_tr_complete(udc);
  1686. if (status & USBSTS_SUSPEND)
  1687. irq_process_suspend(udc);
  1688. spin_unlock(&udc->lock);
  1689. return IRQ_HANDLED;
  1690. }
  1691. static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
  1692. {
  1693. struct mv_udc *udc = (struct mv_udc *)dev;
  1694. /* polling VBUS and init phy may cause too much time*/
  1695. if (udc->qwork)
  1696. queue_work(udc->qwork, &udc->vbus_work);
  1697. return IRQ_HANDLED;
  1698. }
  1699. static void mv_udc_vbus_work(struct work_struct *work)
  1700. {
  1701. struct mv_udc *udc;
  1702. unsigned int vbus;
  1703. udc = container_of(work, struct mv_udc, vbus_work);
  1704. if (!udc->pdata->vbus)
  1705. return;
  1706. vbus = udc->pdata->vbus->poll();
  1707. dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
  1708. if (vbus == VBUS_HIGH)
  1709. mv_udc_vbus_session(&udc->gadget, 1);
  1710. else if (vbus == VBUS_LOW)
  1711. mv_udc_vbus_session(&udc->gadget, 0);
  1712. }
  1713. /* release device structure */
  1714. static void gadget_release(struct device *_dev)
  1715. {
  1716. struct mv_udc *udc = the_controller;
  1717. complete(udc->done);
  1718. }
  1719. static int __devexit mv_udc_remove(struct platform_device *dev)
  1720. {
  1721. struct mv_udc *udc = the_controller;
  1722. int clk_i;
  1723. usb_del_gadget_udc(&udc->gadget);
  1724. if (udc->qwork) {
  1725. flush_workqueue(udc->qwork);
  1726. destroy_workqueue(udc->qwork);
  1727. }
  1728. /*
  1729. * If we have transceiver inited,
  1730. * then vbus irq will not be requested in udc driver.
  1731. */
  1732. if (udc->pdata && udc->pdata->vbus
  1733. && udc->clock_gating && udc->transceiver == NULL)
  1734. free_irq(udc->pdata->vbus->irq, &dev->dev);
  1735. /* free memory allocated in probe */
  1736. if (udc->dtd_pool)
  1737. dma_pool_destroy(udc->dtd_pool);
  1738. if (udc->ep_dqh)
  1739. dma_free_coherent(&dev->dev, udc->ep_dqh_size,
  1740. udc->ep_dqh, udc->ep_dqh_dma);
  1741. kfree(udc->eps);
  1742. if (udc->irq)
  1743. free_irq(udc->irq, &dev->dev);
  1744. mv_udc_disable(udc);
  1745. if (udc->cap_regs)
  1746. iounmap(udc->cap_regs);
  1747. if (udc->phy_regs)
  1748. iounmap(udc->phy_regs);
  1749. if (udc->status_req) {
  1750. kfree(udc->status_req->req.buf);
  1751. kfree(udc->status_req);
  1752. }
  1753. for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
  1754. clk_put(udc->clk[clk_i]);
  1755. device_unregister(&udc->gadget.dev);
  1756. /* free dev, wait for the release() finished */
  1757. wait_for_completion(udc->done);
  1758. kfree(udc);
  1759. the_controller = NULL;
  1760. return 0;
  1761. }
  1762. static int __devinit mv_udc_probe(struct platform_device *dev)
  1763. {
  1764. struct mv_usb_platform_data *pdata = dev->dev.platform_data;
  1765. struct mv_udc *udc;
  1766. int retval = 0;
  1767. int clk_i = 0;
  1768. struct resource *r;
  1769. size_t size;
  1770. if (pdata == NULL) {
  1771. dev_err(&dev->dev, "missing platform_data\n");
  1772. return -ENODEV;
  1773. }
  1774. size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
  1775. udc = kzalloc(size, GFP_KERNEL);
  1776. if (udc == NULL) {
  1777. dev_err(&dev->dev, "failed to allocate memory for udc\n");
  1778. return -ENOMEM;
  1779. }
  1780. the_controller = udc;
  1781. udc->done = &release_done;
  1782. udc->pdata = dev->dev.platform_data;
  1783. spin_lock_init(&udc->lock);
  1784. udc->dev = dev;
  1785. #ifdef CONFIG_USB_OTG_UTILS
  1786. if (pdata->mode == MV_USB_MODE_OTG)
  1787. udc->transceiver = usb_get_transceiver();
  1788. #endif
  1789. udc->clknum = pdata->clknum;
  1790. for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
  1791. udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
  1792. if (IS_ERR(udc->clk[clk_i])) {
  1793. retval = PTR_ERR(udc->clk[clk_i]);
  1794. goto err_put_clk;
  1795. }
  1796. }
  1797. r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
  1798. if (r == NULL) {
  1799. dev_err(&dev->dev, "no I/O memory resource defined\n");
  1800. retval = -ENODEV;
  1801. goto err_put_clk;
  1802. }
  1803. udc->cap_regs = (struct mv_cap_regs __iomem *)
  1804. ioremap(r->start, resource_size(r));
  1805. if (udc->cap_regs == NULL) {
  1806. dev_err(&dev->dev, "failed to map I/O memory\n");
  1807. retval = -EBUSY;
  1808. goto err_put_clk;
  1809. }
  1810. r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
  1811. if (r == NULL) {
  1812. dev_err(&dev->dev, "no phy I/O memory resource defined\n");
  1813. retval = -ENODEV;
  1814. goto err_iounmap_capreg;
  1815. }
  1816. udc->phy_regs = ioremap(r->start, resource_size(r));
  1817. if (udc->phy_regs == NULL) {
  1818. dev_err(&dev->dev, "failed to map phy I/O memory\n");
  1819. retval = -EBUSY;
  1820. goto err_iounmap_capreg;
  1821. }
  1822. /* we will acces controller register, so enable the clk */
  1823. retval = mv_udc_enable_internal(udc);
  1824. if (retval)
  1825. goto err_iounmap_phyreg;
  1826. udc->op_regs =
  1827. (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
  1828. + (readl(&udc->cap_regs->caplength_hciversion)
  1829. & CAPLENGTH_MASK));
  1830. udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
  1831. /*
  1832. * some platform will use usb to download image, it may not disconnect
  1833. * usb gadget before loading kernel. So first stop udc here.
  1834. */
  1835. udc_stop(udc);
  1836. writel(0xFFFFFFFF, &udc->op_regs->usbsts);
  1837. size = udc->max_eps * sizeof(struct mv_dqh) *2;
  1838. size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
  1839. udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
  1840. &udc->ep_dqh_dma, GFP_KERNEL);
  1841. if (udc->ep_dqh == NULL) {
  1842. dev_err(&dev->dev, "allocate dQH memory failed\n");
  1843. retval = -ENOMEM;
  1844. goto err_disable_clock;
  1845. }
  1846. udc->ep_dqh_size = size;
  1847. /* create dTD dma_pool resource */
  1848. udc->dtd_pool = dma_pool_create("mv_dtd",
  1849. &dev->dev,
  1850. sizeof(struct mv_dtd),
  1851. DTD_ALIGNMENT,
  1852. DMA_BOUNDARY);
  1853. if (!udc->dtd_pool) {
  1854. retval = -ENOMEM;
  1855. goto err_free_dma;
  1856. }
  1857. size = udc->max_eps * sizeof(struct mv_ep) *2;
  1858. udc->eps = kzalloc(size, GFP_KERNEL);
  1859. if (udc->eps == NULL) {
  1860. dev_err(&dev->dev, "allocate ep memory failed\n");
  1861. retval = -ENOMEM;
  1862. goto err_destroy_dma;
  1863. }
  1864. /* initialize ep0 status request structure */
  1865. udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
  1866. if (!udc->status_req) {
  1867. dev_err(&dev->dev, "allocate status_req memory failed\n");
  1868. retval = -ENOMEM;
  1869. goto err_free_eps;
  1870. }
  1871. INIT_LIST_HEAD(&udc->status_req->queue);
  1872. /* allocate a small amount of memory to get valid address */
  1873. udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
  1874. udc->status_req->req.dma = DMA_ADDR_INVALID;
  1875. udc->resume_state = USB_STATE_NOTATTACHED;
  1876. udc->usb_state = USB_STATE_POWERED;
  1877. udc->ep0_dir = EP_DIR_OUT;
  1878. udc->remote_wakeup = 0;
  1879. r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
  1880. if (r == NULL) {
  1881. dev_err(&dev->dev, "no IRQ resource defined\n");
  1882. retval = -ENODEV;
  1883. goto err_free_status_req;
  1884. }
  1885. udc->irq = r->start;
  1886. if (request_irq(udc->irq, mv_udc_irq,
  1887. IRQF_SHARED, driver_name, udc)) {
  1888. dev_err(&dev->dev, "Request irq %d for UDC failed\n",
  1889. udc->irq);
  1890. retval = -ENODEV;
  1891. goto err_free_status_req;
  1892. }
  1893. /* initialize gadget structure */
  1894. udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
  1895. udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
  1896. INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
  1897. udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
  1898. udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
  1899. /* the "gadget" abstracts/virtualizes the controller */
  1900. dev_set_name(&udc->gadget.dev, "gadget");
  1901. udc->gadget.dev.parent = &dev->dev;
  1902. udc->gadget.dev.dma_mask = dev->dev.dma_mask;
  1903. udc->gadget.dev.release = gadget_release;
  1904. udc->gadget.name = driver_name; /* gadget name */
  1905. retval = device_register(&udc->gadget.dev);
  1906. if (retval)
  1907. goto err_free_irq;
  1908. eps_init(udc);
  1909. /* VBUS detect: we can disable/enable clock on demand.*/
  1910. if (udc->transceiver)
  1911. udc->clock_gating = 1;
  1912. else if (pdata->vbus) {
  1913. udc->clock_gating = 1;
  1914. retval = request_threaded_irq(pdata->vbus->irq, NULL,
  1915. mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
  1916. if (retval) {
  1917. dev_info(&dev->dev,
  1918. "Can not request irq for VBUS, "
  1919. "disable clock gating\n");
  1920. udc->clock_gating = 0;
  1921. }
  1922. udc->qwork = create_singlethread_workqueue("mv_udc_queue");
  1923. if (!udc->qwork) {
  1924. dev_err(&dev->dev, "cannot create workqueue\n");
  1925. retval = -ENOMEM;
  1926. goto err_unregister;
  1927. }
  1928. INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
  1929. }
  1930. /*
  1931. * When clock gating is supported, we can disable clk and phy.
  1932. * If not, it means that VBUS detection is not supported, we
  1933. * have to enable vbus active all the time to let controller work.
  1934. */
  1935. if (udc->clock_gating)
  1936. mv_udc_disable_internal(udc);
  1937. else
  1938. udc->vbus_active = 1;
  1939. retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
  1940. if (retval)
  1941. goto err_unregister;
  1942. dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
  1943. udc->clock_gating ? "with" : "without");
  1944. return 0;
  1945. err_unregister:
  1946. if (udc->pdata && udc->pdata->vbus
  1947. && udc->clock_gating && udc->transceiver == NULL)
  1948. free_irq(pdata->vbus->irq, &dev->dev);
  1949. device_unregister(&udc->gadget.dev);
  1950. err_free_irq:
  1951. free_irq(udc->irq, &dev->dev);
  1952. err_free_status_req:
  1953. kfree(udc->status_req->req.buf);
  1954. kfree(udc->status_req);
  1955. err_free_eps:
  1956. kfree(udc->eps);
  1957. err_destroy_dma:
  1958. dma_pool_destroy(udc->dtd_pool);
  1959. err_free_dma:
  1960. dma_free_coherent(&dev->dev, udc->ep_dqh_size,
  1961. udc->ep_dqh, udc->ep_dqh_dma);
  1962. err_disable_clock:
  1963. mv_udc_disable_internal(udc);
  1964. err_iounmap_phyreg:
  1965. iounmap(udc->phy_regs);
  1966. err_iounmap_capreg:
  1967. iounmap(udc->cap_regs);
  1968. err_put_clk:
  1969. for (clk_i--; clk_i >= 0; clk_i--)
  1970. clk_put(udc->clk[clk_i]);
  1971. the_controller = NULL;
  1972. kfree(udc);
  1973. return retval;
  1974. }
  1975. #ifdef CONFIG_PM
  1976. static int mv_udc_suspend(struct device *_dev)
  1977. {
  1978. struct mv_udc *udc = the_controller;
  1979. /* if OTG is enabled, the following will be done in OTG driver*/
  1980. if (udc->transceiver)
  1981. return 0;
  1982. if (udc->pdata->vbus && udc->pdata->vbus->poll)
  1983. if (udc->pdata->vbus->poll() == VBUS_HIGH) {
  1984. dev_info(&udc->dev->dev, "USB cable is connected!\n");
  1985. return -EAGAIN;
  1986. }
  1987. /*
  1988. * only cable is unplugged, udc can suspend.
  1989. * So do not care about clock_gating == 1.
  1990. */
  1991. if (!udc->clock_gating) {
  1992. udc_stop(udc);
  1993. spin_lock_irq(&udc->lock);
  1994. /* stop all usb activities */
  1995. stop_activity(udc, udc->driver);
  1996. spin_unlock_irq(&udc->lock);
  1997. mv_udc_disable_internal(udc);
  1998. }
  1999. return 0;
  2000. }
  2001. static int mv_udc_resume(struct device *_dev)
  2002. {
  2003. struct mv_udc *udc = the_controller;
  2004. int retval;
  2005. /* if OTG is enabled, the following will be done in OTG driver*/
  2006. if (udc->transceiver)
  2007. return 0;
  2008. if (!udc->clock_gating) {
  2009. retval = mv_udc_enable_internal(udc);
  2010. if (retval)
  2011. return retval;
  2012. if (udc->driver && udc->softconnect) {
  2013. udc_reset(udc);
  2014. ep0_reset(udc);
  2015. udc_start(udc);
  2016. }
  2017. }
  2018. return 0;
  2019. }
  2020. static const struct dev_pm_ops mv_udc_pm_ops = {
  2021. .suspend = mv_udc_suspend,
  2022. .resume = mv_udc_resume,
  2023. };
  2024. #endif
  2025. static void mv_udc_shutdown(struct platform_device *dev)
  2026. {
  2027. struct mv_udc *udc = the_controller;
  2028. u32 mode;
  2029. /* reset controller mode to IDLE */
  2030. mode = readl(&udc->op_regs->usbmode);
  2031. mode &= ~3;
  2032. writel(mode, &udc->op_regs->usbmode);
  2033. }
  2034. static struct platform_driver udc_driver = {
  2035. .probe = mv_udc_probe,
  2036. .remove = __exit_p(mv_udc_remove),
  2037. .shutdown = mv_udc_shutdown,
  2038. .driver = {
  2039. .owner = THIS_MODULE,
  2040. .name = "mv-udc",
  2041. #ifdef CONFIG_PM
  2042. .pm = &mv_udc_pm_ops,
  2043. #endif
  2044. },
  2045. };
  2046. module_platform_driver(udc_driver);
  2047. MODULE_ALIAS("platform:mv-udc");
  2048. MODULE_DESCRIPTION(DRIVER_DESC);
  2049. MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
  2050. MODULE_VERSION(DRIVER_VERSION);
  2051. MODULE_LICENSE("GPL");