mv_udc_core.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492
  1. /*
  2. * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
  3. * Author: Chao Xie <chao.xie@marvell.com>
  4. * Neil Zhang <zhangwm@marvell.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/kernel.h>
  16. #include <linux/delay.h>
  17. #include <linux/ioport.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/init.h>
  23. #include <linux/timer.h>
  24. #include <linux/list.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/moduleparam.h>
  27. #include <linux/device.h>
  28. #include <linux/usb/ch9.h>
  29. #include <linux/usb/gadget.h>
  30. #include <linux/usb/otg.h>
  31. #include <linux/pm.h>
  32. #include <linux/io.h>
  33. #include <linux/irq.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/clk.h>
  36. #include <linux/platform_data/mv_usb.h>
  37. #include <asm/unaligned.h>
  38. #include "mv_udc.h"
  39. #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
  40. #define DRIVER_VERSION "8 Nov 2010"
  41. #define ep_dir(ep) (((ep)->ep_num == 0) ? \
  42. ((ep)->udc->ep0_dir) : ((ep)->direction))
  43. /* timeout value -- usec */
  44. #define RESET_TIMEOUT 10000
  45. #define FLUSH_TIMEOUT 10000
  46. #define EPSTATUS_TIMEOUT 10000
  47. #define PRIME_TIMEOUT 10000
  48. #define READSAFE_TIMEOUT 1000
  49. #define LOOPS_USEC_SHIFT 1
  50. #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
  51. #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
  52. static DECLARE_COMPLETION(release_done);
  53. static const char driver_name[] = "mv_udc";
  54. static const char driver_desc[] = DRIVER_DESC;
  55. /* controller device global variable */
  56. static struct mv_udc *the_controller;
  57. static void nuke(struct mv_ep *ep, int status);
  58. static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
  59. /* for endpoint 0 operations */
  60. static const struct usb_endpoint_descriptor mv_ep0_desc = {
  61. .bLength = USB_DT_ENDPOINT_SIZE,
  62. .bDescriptorType = USB_DT_ENDPOINT,
  63. .bEndpointAddress = 0,
  64. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  65. .wMaxPacketSize = EP0_MAX_PKT_SIZE,
  66. };
  67. static void ep0_reset(struct mv_udc *udc)
  68. {
  69. struct mv_ep *ep;
  70. u32 epctrlx;
  71. int i = 0;
  72. /* ep0 in and out */
  73. for (i = 0; i < 2; i++) {
  74. ep = &udc->eps[i];
  75. ep->udc = udc;
  76. /* ep0 dQH */
  77. ep->dqh = &udc->ep_dqh[i];
  78. /* configure ep0 endpoint capabilities in dQH */
  79. ep->dqh->max_packet_length =
  80. (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  81. | EP_QUEUE_HEAD_IOS;
  82. ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
  83. epctrlx = readl(&udc->op_regs->epctrlx[0]);
  84. if (i) { /* TX */
  85. epctrlx |= EPCTRL_TX_ENABLE
  86. | (USB_ENDPOINT_XFER_CONTROL
  87. << EPCTRL_TX_EP_TYPE_SHIFT);
  88. } else { /* RX */
  89. epctrlx |= EPCTRL_RX_ENABLE
  90. | (USB_ENDPOINT_XFER_CONTROL
  91. << EPCTRL_RX_EP_TYPE_SHIFT);
  92. }
  93. writel(epctrlx, &udc->op_regs->epctrlx[0]);
  94. }
  95. }
  96. /* protocol ep0 stall, will automatically be cleared on new transaction */
  97. static void ep0_stall(struct mv_udc *udc)
  98. {
  99. u32 epctrlx;
  100. /* set TX and RX to stall */
  101. epctrlx = readl(&udc->op_regs->epctrlx[0]);
  102. epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
  103. writel(epctrlx, &udc->op_regs->epctrlx[0]);
  104. /* update ep0 state */
  105. udc->ep0_state = WAIT_FOR_SETUP;
  106. udc->ep0_dir = EP_DIR_OUT;
  107. }
  108. static int process_ep_req(struct mv_udc *udc, int index,
  109. struct mv_req *curr_req)
  110. {
  111. struct mv_dtd *curr_dtd;
  112. struct mv_dqh *curr_dqh;
  113. int td_complete, actual, remaining_length;
  114. int i, direction;
  115. int retval = 0;
  116. u32 errors;
  117. u32 bit_pos;
  118. curr_dqh = &udc->ep_dqh[index];
  119. direction = index % 2;
  120. curr_dtd = curr_req->head;
  121. td_complete = 0;
  122. actual = curr_req->req.length;
  123. for (i = 0; i < curr_req->dtd_count; i++) {
  124. if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
  125. dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
  126. udc->eps[index].name);
  127. return 1;
  128. }
  129. errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
  130. if (!errors) {
  131. remaining_length =
  132. (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
  133. >> DTD_LENGTH_BIT_POS;
  134. actual -= remaining_length;
  135. if (remaining_length) {
  136. if (direction) {
  137. dev_dbg(&udc->dev->dev,
  138. "TX dTD remains data\n");
  139. retval = -EPROTO;
  140. break;
  141. } else
  142. break;
  143. }
  144. } else {
  145. dev_info(&udc->dev->dev,
  146. "complete_tr error: ep=%d %s: error = 0x%x\n",
  147. index >> 1, direction ? "SEND" : "RECV",
  148. errors);
  149. if (errors & DTD_STATUS_HALTED) {
  150. /* Clear the errors and Halt condition */
  151. curr_dqh->size_ioc_int_sts &= ~errors;
  152. retval = -EPIPE;
  153. } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
  154. retval = -EPROTO;
  155. } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
  156. retval = -EILSEQ;
  157. }
  158. }
  159. if (i != curr_req->dtd_count - 1)
  160. curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
  161. }
  162. if (retval)
  163. return retval;
  164. if (direction == EP_DIR_OUT)
  165. bit_pos = 1 << curr_req->ep->ep_num;
  166. else
  167. bit_pos = 1 << (16 + curr_req->ep->ep_num);
  168. while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
  169. if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
  170. while (readl(&udc->op_regs->epstatus) & bit_pos)
  171. udelay(1);
  172. break;
  173. }
  174. udelay(1);
  175. }
  176. curr_req->req.actual = actual;
  177. return 0;
  178. }
  179. /*
  180. * done() - retire a request; caller blocked irqs
  181. * @status : request status to be set, only works when
  182. * request is still in progress.
  183. */
  184. static void done(struct mv_ep *ep, struct mv_req *req, int status)
  185. {
  186. struct mv_udc *udc = NULL;
  187. unsigned char stopped = ep->stopped;
  188. struct mv_dtd *curr_td, *next_td;
  189. int j;
  190. udc = (struct mv_udc *)ep->udc;
  191. /* Removed the req from fsl_ep->queue */
  192. list_del_init(&req->queue);
  193. /* req.status should be set as -EINPROGRESS in ep_queue() */
  194. if (req->req.status == -EINPROGRESS)
  195. req->req.status = status;
  196. else
  197. status = req->req.status;
  198. /* Free dtd for the request */
  199. next_td = req->head;
  200. for (j = 0; j < req->dtd_count; j++) {
  201. curr_td = next_td;
  202. if (j != req->dtd_count - 1)
  203. next_td = curr_td->next_dtd_virt;
  204. dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
  205. }
  206. if (req->mapped) {
  207. dma_unmap_single(ep->udc->gadget.dev.parent,
  208. req->req.dma, req->req.length,
  209. ((ep_dir(ep) == EP_DIR_IN) ?
  210. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  211. req->req.dma = DMA_ADDR_INVALID;
  212. req->mapped = 0;
  213. } else
  214. dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
  215. req->req.dma, req->req.length,
  216. ((ep_dir(ep) == EP_DIR_IN) ?
  217. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  218. if (status && (status != -ESHUTDOWN))
  219. dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
  220. ep->ep.name, &req->req, status,
  221. req->req.actual, req->req.length);
  222. ep->stopped = 1;
  223. spin_unlock(&ep->udc->lock);
  224. /*
  225. * complete() is from gadget layer,
  226. * eg fsg->bulk_in_complete()
  227. */
  228. if (req->req.complete)
  229. req->req.complete(&ep->ep, &req->req);
  230. spin_lock(&ep->udc->lock);
  231. ep->stopped = stopped;
  232. }
  233. static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
  234. {
  235. struct mv_udc *udc;
  236. struct mv_dqh *dqh;
  237. u32 bit_pos, direction;
  238. u32 usbcmd, epstatus;
  239. unsigned int loops;
  240. int retval = 0;
  241. udc = ep->udc;
  242. direction = ep_dir(ep);
  243. dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
  244. bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
  245. /* check if the pipe is empty */
  246. if (!(list_empty(&ep->queue))) {
  247. struct mv_req *lastreq;
  248. lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
  249. lastreq->tail->dtd_next =
  250. req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  251. wmb();
  252. if (readl(&udc->op_regs->epprime) & bit_pos)
  253. goto done;
  254. loops = LOOPS(READSAFE_TIMEOUT);
  255. while (1) {
  256. /* start with setting the semaphores */
  257. usbcmd = readl(&udc->op_regs->usbcmd);
  258. usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
  259. writel(usbcmd, &udc->op_regs->usbcmd);
  260. /* read the endpoint status */
  261. epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
  262. /*
  263. * Reread the ATDTW semaphore bit to check if it is
  264. * cleared. When hardware see a hazard, it will clear
  265. * the bit or else we remain set to 1 and we can
  266. * proceed with priming of endpoint if not already
  267. * primed.
  268. */
  269. if (readl(&udc->op_regs->usbcmd)
  270. & USBCMD_ATDTW_TRIPWIRE_SET)
  271. break;
  272. loops--;
  273. if (loops == 0) {
  274. dev_err(&udc->dev->dev,
  275. "Timeout for ATDTW_TRIPWIRE...\n");
  276. retval = -ETIME;
  277. goto done;
  278. }
  279. udelay(LOOPS_USEC);
  280. }
  281. /* Clear the semaphore */
  282. usbcmd = readl(&udc->op_regs->usbcmd);
  283. usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
  284. writel(usbcmd, &udc->op_regs->usbcmd);
  285. if (epstatus)
  286. goto done;
  287. }
  288. /* Write dQH next pointer and terminate bit to 0 */
  289. dqh->next_dtd_ptr = req->head->td_dma
  290. & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  291. /* clear active and halt bit, in case set from a previous error */
  292. dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
  293. /* Ensure that updates to the QH will occure before priming. */
  294. wmb();
  295. /* Prime the Endpoint */
  296. writel(bit_pos, &udc->op_regs->epprime);
  297. done:
  298. return retval;
  299. }
  300. static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
  301. dma_addr_t *dma, int *is_last)
  302. {
  303. u32 temp;
  304. struct mv_dtd *dtd;
  305. struct mv_udc *udc;
  306. /* how big will this transfer be? */
  307. *length = min(req->req.length - req->req.actual,
  308. (unsigned)EP_MAX_LENGTH_TRANSFER);
  309. udc = req->ep->udc;
  310. /*
  311. * Be careful that no _GFP_HIGHMEM is set,
  312. * or we can not use dma_to_virt
  313. */
  314. dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
  315. if (dtd == NULL)
  316. return dtd;
  317. dtd->td_dma = *dma;
  318. /* initialize buffer page pointers */
  319. temp = (u32)(req->req.dma + req->req.actual);
  320. dtd->buff_ptr0 = cpu_to_le32(temp);
  321. temp &= ~0xFFF;
  322. dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
  323. dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
  324. dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
  325. dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
  326. req->req.actual += *length;
  327. /* zlp is needed if req->req.zero is set */
  328. if (req->req.zero) {
  329. if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
  330. *is_last = 1;
  331. else
  332. *is_last = 0;
  333. } else if (req->req.length == req->req.actual)
  334. *is_last = 1;
  335. else
  336. *is_last = 0;
  337. /* Fill in the transfer size; set active bit */
  338. temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
  339. /* Enable interrupt for the last dtd of a request */
  340. if (*is_last && !req->req.no_interrupt)
  341. temp |= DTD_IOC;
  342. dtd->size_ioc_sts = temp;
  343. mb();
  344. return dtd;
  345. }
  346. /* generate dTD linked list for a request */
  347. static int req_to_dtd(struct mv_req *req)
  348. {
  349. unsigned count;
  350. int is_last, is_first = 1;
  351. struct mv_dtd *dtd, *last_dtd = NULL;
  352. struct mv_udc *udc;
  353. dma_addr_t dma;
  354. udc = req->ep->udc;
  355. do {
  356. dtd = build_dtd(req, &count, &dma, &is_last);
  357. if (dtd == NULL)
  358. return -ENOMEM;
  359. if (is_first) {
  360. is_first = 0;
  361. req->head = dtd;
  362. } else {
  363. last_dtd->dtd_next = dma;
  364. last_dtd->next_dtd_virt = dtd;
  365. }
  366. last_dtd = dtd;
  367. req->dtd_count++;
  368. } while (!is_last);
  369. /* set terminate bit to 1 for the last dTD */
  370. dtd->dtd_next = DTD_NEXT_TERMINATE;
  371. req->tail = dtd;
  372. return 0;
  373. }
  374. static int mv_ep_enable(struct usb_ep *_ep,
  375. const struct usb_endpoint_descriptor *desc)
  376. {
  377. struct mv_udc *udc;
  378. struct mv_ep *ep;
  379. struct mv_dqh *dqh;
  380. u16 max = 0;
  381. u32 bit_pos, epctrlx, direction;
  382. unsigned char zlt = 0, ios = 0, mult = 0;
  383. unsigned long flags;
  384. ep = container_of(_ep, struct mv_ep, ep);
  385. udc = ep->udc;
  386. if (!_ep || !desc
  387. || desc->bDescriptorType != USB_DT_ENDPOINT)
  388. return -EINVAL;
  389. if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
  390. return -ESHUTDOWN;
  391. direction = ep_dir(ep);
  392. max = usb_endpoint_maxp(desc);
  393. /*
  394. * disable HW zero length termination select
  395. * driver handles zero length packet through req->req.zero
  396. */
  397. zlt = 1;
  398. bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
  399. /* Check if the Endpoint is Primed */
  400. if ((readl(&udc->op_regs->epprime) & bit_pos)
  401. || (readl(&udc->op_regs->epstatus) & bit_pos)) {
  402. dev_info(&udc->dev->dev,
  403. "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
  404. " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
  405. (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
  406. (unsigned)readl(&udc->op_regs->epprime),
  407. (unsigned)readl(&udc->op_regs->epstatus),
  408. (unsigned)bit_pos);
  409. goto en_done;
  410. }
  411. /* Set the max packet length, interrupt on Setup and Mult fields */
  412. switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
  413. case USB_ENDPOINT_XFER_BULK:
  414. zlt = 1;
  415. mult = 0;
  416. break;
  417. case USB_ENDPOINT_XFER_CONTROL:
  418. ios = 1;
  419. case USB_ENDPOINT_XFER_INT:
  420. mult = 0;
  421. break;
  422. case USB_ENDPOINT_XFER_ISOC:
  423. /* Calculate transactions needed for high bandwidth iso */
  424. mult = (unsigned char)(1 + ((max >> 11) & 0x03));
  425. max = max & 0x7ff; /* bit 0~10 */
  426. /* 3 transactions at most */
  427. if (mult > 3)
  428. goto en_done;
  429. break;
  430. default:
  431. goto en_done;
  432. }
  433. spin_lock_irqsave(&udc->lock, flags);
  434. /* Get the endpoint queue head address */
  435. dqh = ep->dqh;
  436. dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  437. | (mult << EP_QUEUE_HEAD_MULT_POS)
  438. | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
  439. | (ios ? EP_QUEUE_HEAD_IOS : 0);
  440. dqh->next_dtd_ptr = 1;
  441. dqh->size_ioc_int_sts = 0;
  442. ep->ep.maxpacket = max;
  443. ep->ep.desc = desc;
  444. ep->stopped = 0;
  445. /* Enable the endpoint for Rx or Tx and set the endpoint type */
  446. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  447. if (direction == EP_DIR_IN) {
  448. epctrlx &= ~EPCTRL_TX_ALL_MASK;
  449. epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
  450. | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  451. << EPCTRL_TX_EP_TYPE_SHIFT);
  452. } else {
  453. epctrlx &= ~EPCTRL_RX_ALL_MASK;
  454. epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
  455. | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  456. << EPCTRL_RX_EP_TYPE_SHIFT);
  457. }
  458. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  459. /*
  460. * Implement Guideline (GL# USB-7) The unused endpoint type must
  461. * be programmed to bulk.
  462. */
  463. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  464. if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
  465. epctrlx |= (USB_ENDPOINT_XFER_BULK
  466. << EPCTRL_RX_EP_TYPE_SHIFT);
  467. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  468. }
  469. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  470. if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
  471. epctrlx |= (USB_ENDPOINT_XFER_BULK
  472. << EPCTRL_TX_EP_TYPE_SHIFT);
  473. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  474. }
  475. spin_unlock_irqrestore(&udc->lock, flags);
  476. return 0;
  477. en_done:
  478. return -EINVAL;
  479. }
  480. static int mv_ep_disable(struct usb_ep *_ep)
  481. {
  482. struct mv_udc *udc;
  483. struct mv_ep *ep;
  484. struct mv_dqh *dqh;
  485. u32 bit_pos, epctrlx, direction;
  486. unsigned long flags;
  487. ep = container_of(_ep, struct mv_ep, ep);
  488. if ((_ep == NULL) || !ep->ep.desc)
  489. return -EINVAL;
  490. udc = ep->udc;
  491. /* Get the endpoint queue head address */
  492. dqh = ep->dqh;
  493. spin_lock_irqsave(&udc->lock, flags);
  494. direction = ep_dir(ep);
  495. bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
  496. /* Reset the max packet length and the interrupt on Setup */
  497. dqh->max_packet_length = 0;
  498. /* Disable the endpoint for Rx or Tx and reset the endpoint type */
  499. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  500. epctrlx &= ~((direction == EP_DIR_IN)
  501. ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
  502. : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
  503. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  504. /* nuke all pending requests (does flush) */
  505. nuke(ep, -ESHUTDOWN);
  506. ep->ep.desc = NULL;
  507. ep->stopped = 1;
  508. spin_unlock_irqrestore(&udc->lock, flags);
  509. return 0;
  510. }
  511. static struct usb_request *
  512. mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  513. {
  514. struct mv_req *req = NULL;
  515. req = kzalloc(sizeof *req, gfp_flags);
  516. if (!req)
  517. return NULL;
  518. req->req.dma = DMA_ADDR_INVALID;
  519. INIT_LIST_HEAD(&req->queue);
  520. return &req->req;
  521. }
  522. static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
  523. {
  524. struct mv_req *req = NULL;
  525. req = container_of(_req, struct mv_req, req);
  526. if (_req)
  527. kfree(req);
  528. }
  529. static void mv_ep_fifo_flush(struct usb_ep *_ep)
  530. {
  531. struct mv_udc *udc;
  532. u32 bit_pos, direction;
  533. struct mv_ep *ep;
  534. unsigned int loops;
  535. if (!_ep)
  536. return;
  537. ep = container_of(_ep, struct mv_ep, ep);
  538. if (!ep->ep.desc)
  539. return;
  540. udc = ep->udc;
  541. direction = ep_dir(ep);
  542. if (ep->ep_num == 0)
  543. bit_pos = (1 << 16) | 1;
  544. else if (direction == EP_DIR_OUT)
  545. bit_pos = 1 << ep->ep_num;
  546. else
  547. bit_pos = 1 << (16 + ep->ep_num);
  548. loops = LOOPS(EPSTATUS_TIMEOUT);
  549. do {
  550. unsigned int inter_loops;
  551. if (loops == 0) {
  552. dev_err(&udc->dev->dev,
  553. "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
  554. (unsigned)readl(&udc->op_regs->epstatus),
  555. (unsigned)bit_pos);
  556. return;
  557. }
  558. /* Write 1 to the Flush register */
  559. writel(bit_pos, &udc->op_regs->epflush);
  560. /* Wait until flushing completed */
  561. inter_loops = LOOPS(FLUSH_TIMEOUT);
  562. while (readl(&udc->op_regs->epflush)) {
  563. /*
  564. * ENDPTFLUSH bit should be cleared to indicate this
  565. * operation is complete
  566. */
  567. if (inter_loops == 0) {
  568. dev_err(&udc->dev->dev,
  569. "TIMEOUT for ENDPTFLUSH=0x%x,"
  570. "bit_pos=0x%x\n",
  571. (unsigned)readl(&udc->op_regs->epflush),
  572. (unsigned)bit_pos);
  573. return;
  574. }
  575. inter_loops--;
  576. udelay(LOOPS_USEC);
  577. }
  578. loops--;
  579. } while (readl(&udc->op_regs->epstatus) & bit_pos);
  580. }
  581. /* queues (submits) an I/O request to an endpoint */
  582. static int
  583. mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
  584. {
  585. struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
  586. struct mv_req *req = container_of(_req, struct mv_req, req);
  587. struct mv_udc *udc = ep->udc;
  588. unsigned long flags;
  589. /* catch various bogus parameters */
  590. if (!_req || !req->req.complete || !req->req.buf
  591. || !list_empty(&req->queue)) {
  592. dev_err(&udc->dev->dev, "%s, bad params", __func__);
  593. return -EINVAL;
  594. }
  595. if (unlikely(!_ep || !ep->ep.desc)) {
  596. dev_err(&udc->dev->dev, "%s, bad ep", __func__);
  597. return -EINVAL;
  598. }
  599. if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  600. if (req->req.length > ep->ep.maxpacket)
  601. return -EMSGSIZE;
  602. }
  603. udc = ep->udc;
  604. if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
  605. return -ESHUTDOWN;
  606. req->ep = ep;
  607. /* map virtual address to hardware */
  608. if (req->req.dma == DMA_ADDR_INVALID) {
  609. req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
  610. req->req.buf,
  611. req->req.length, ep_dir(ep)
  612. ? DMA_TO_DEVICE
  613. : DMA_FROM_DEVICE);
  614. req->mapped = 1;
  615. } else {
  616. dma_sync_single_for_device(ep->udc->gadget.dev.parent,
  617. req->req.dma, req->req.length,
  618. ep_dir(ep)
  619. ? DMA_TO_DEVICE
  620. : DMA_FROM_DEVICE);
  621. req->mapped = 0;
  622. }
  623. req->req.status = -EINPROGRESS;
  624. req->req.actual = 0;
  625. req->dtd_count = 0;
  626. spin_lock_irqsave(&udc->lock, flags);
  627. /* build dtds and push them to device queue */
  628. if (!req_to_dtd(req)) {
  629. int retval;
  630. retval = queue_dtd(ep, req);
  631. if (retval) {
  632. spin_unlock_irqrestore(&udc->lock, flags);
  633. return retval;
  634. }
  635. } else {
  636. spin_unlock_irqrestore(&udc->lock, flags);
  637. return -ENOMEM;
  638. }
  639. /* Update ep0 state */
  640. if (ep->ep_num == 0)
  641. udc->ep0_state = DATA_STATE_XMIT;
  642. /* irq handler advances the queue */
  643. list_add_tail(&req->queue, &ep->queue);
  644. spin_unlock_irqrestore(&udc->lock, flags);
  645. return 0;
  646. }
  647. static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
  648. {
  649. struct mv_dqh *dqh = ep->dqh;
  650. u32 bit_pos;
  651. /* Write dQH next pointer and terminate bit to 0 */
  652. dqh->next_dtd_ptr = req->head->td_dma
  653. & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
  654. /* clear active and halt bit, in case set from a previous error */
  655. dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
  656. /* Ensure that updates to the QH will occure before priming. */
  657. wmb();
  658. bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
  659. /* Prime the Endpoint */
  660. writel(bit_pos, &ep->udc->op_regs->epprime);
  661. }
  662. /* dequeues (cancels, unlinks) an I/O request from an endpoint */
  663. static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  664. {
  665. struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
  666. struct mv_req *req;
  667. struct mv_udc *udc = ep->udc;
  668. unsigned long flags;
  669. int stopped, ret = 0;
  670. u32 epctrlx;
  671. if (!_ep || !_req)
  672. return -EINVAL;
  673. spin_lock_irqsave(&ep->udc->lock, flags);
  674. stopped = ep->stopped;
  675. /* Stop the ep before we deal with the queue */
  676. ep->stopped = 1;
  677. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  678. if (ep_dir(ep) == EP_DIR_IN)
  679. epctrlx &= ~EPCTRL_TX_ENABLE;
  680. else
  681. epctrlx &= ~EPCTRL_RX_ENABLE;
  682. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  683. /* make sure it's actually queued on this endpoint */
  684. list_for_each_entry(req, &ep->queue, queue) {
  685. if (&req->req == _req)
  686. break;
  687. }
  688. if (&req->req != _req) {
  689. ret = -EINVAL;
  690. goto out;
  691. }
  692. /* The request is in progress, or completed but not dequeued */
  693. if (ep->queue.next == &req->queue) {
  694. _req->status = -ECONNRESET;
  695. mv_ep_fifo_flush(_ep); /* flush current transfer */
  696. /* The request isn't the last request in this ep queue */
  697. if (req->queue.next != &ep->queue) {
  698. struct mv_req *next_req;
  699. next_req = list_entry(req->queue.next,
  700. struct mv_req, queue);
  701. /* Point the QH to the first TD of next request */
  702. mv_prime_ep(ep, next_req);
  703. } else {
  704. struct mv_dqh *qh;
  705. qh = ep->dqh;
  706. qh->next_dtd_ptr = 1;
  707. qh->size_ioc_int_sts = 0;
  708. }
  709. /* The request hasn't been processed, patch up the TD chain */
  710. } else {
  711. struct mv_req *prev_req;
  712. prev_req = list_entry(req->queue.prev, struct mv_req, queue);
  713. writel(readl(&req->tail->dtd_next),
  714. &prev_req->tail->dtd_next);
  715. }
  716. done(ep, req, -ECONNRESET);
  717. /* Enable EP */
  718. out:
  719. epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
  720. if (ep_dir(ep) == EP_DIR_IN)
  721. epctrlx |= EPCTRL_TX_ENABLE;
  722. else
  723. epctrlx |= EPCTRL_RX_ENABLE;
  724. writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
  725. ep->stopped = stopped;
  726. spin_unlock_irqrestore(&ep->udc->lock, flags);
  727. return ret;
  728. }
  729. static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
  730. {
  731. u32 epctrlx;
  732. epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
  733. if (stall) {
  734. if (direction == EP_DIR_IN)
  735. epctrlx |= EPCTRL_TX_EP_STALL;
  736. else
  737. epctrlx |= EPCTRL_RX_EP_STALL;
  738. } else {
  739. if (direction == EP_DIR_IN) {
  740. epctrlx &= ~EPCTRL_TX_EP_STALL;
  741. epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
  742. } else {
  743. epctrlx &= ~EPCTRL_RX_EP_STALL;
  744. epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
  745. }
  746. }
  747. writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
  748. }
  749. static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
  750. {
  751. u32 epctrlx;
  752. epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
  753. if (direction == EP_DIR_OUT)
  754. return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
  755. else
  756. return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
  757. }
  758. static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  759. {
  760. struct mv_ep *ep;
  761. unsigned long flags = 0;
  762. int status = 0;
  763. struct mv_udc *udc;
  764. ep = container_of(_ep, struct mv_ep, ep);
  765. udc = ep->udc;
  766. if (!_ep || !ep->ep.desc) {
  767. status = -EINVAL;
  768. goto out;
  769. }
  770. if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  771. status = -EOPNOTSUPP;
  772. goto out;
  773. }
  774. /*
  775. * Attempt to halt IN ep will fail if any transfer requests
  776. * are still queue
  777. */
  778. if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
  779. status = -EAGAIN;
  780. goto out;
  781. }
  782. spin_lock_irqsave(&ep->udc->lock, flags);
  783. ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
  784. if (halt && wedge)
  785. ep->wedge = 1;
  786. else if (!halt)
  787. ep->wedge = 0;
  788. spin_unlock_irqrestore(&ep->udc->lock, flags);
  789. if (ep->ep_num == 0) {
  790. udc->ep0_state = WAIT_FOR_SETUP;
  791. udc->ep0_dir = EP_DIR_OUT;
  792. }
  793. out:
  794. return status;
  795. }
  796. static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
  797. {
  798. return mv_ep_set_halt_wedge(_ep, halt, 0);
  799. }
  800. static int mv_ep_set_wedge(struct usb_ep *_ep)
  801. {
  802. return mv_ep_set_halt_wedge(_ep, 1, 1);
  803. }
  804. static struct usb_ep_ops mv_ep_ops = {
  805. .enable = mv_ep_enable,
  806. .disable = mv_ep_disable,
  807. .alloc_request = mv_alloc_request,
  808. .free_request = mv_free_request,
  809. .queue = mv_ep_queue,
  810. .dequeue = mv_ep_dequeue,
  811. .set_wedge = mv_ep_set_wedge,
  812. .set_halt = mv_ep_set_halt,
  813. .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
  814. };
  815. static void udc_clock_enable(struct mv_udc *udc)
  816. {
  817. unsigned int i;
  818. for (i = 0; i < udc->clknum; i++)
  819. clk_enable(udc->clk[i]);
  820. }
  821. static void udc_clock_disable(struct mv_udc *udc)
  822. {
  823. unsigned int i;
  824. for (i = 0; i < udc->clknum; i++)
  825. clk_disable(udc->clk[i]);
  826. }
  827. static void udc_stop(struct mv_udc *udc)
  828. {
  829. u32 tmp;
  830. /* Disable interrupts */
  831. tmp = readl(&udc->op_regs->usbintr);
  832. tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
  833. USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
  834. writel(tmp, &udc->op_regs->usbintr);
  835. udc->stopped = 1;
  836. /* Reset the Run the bit in the command register to stop VUSB */
  837. tmp = readl(&udc->op_regs->usbcmd);
  838. tmp &= ~USBCMD_RUN_STOP;
  839. writel(tmp, &udc->op_regs->usbcmd);
  840. }
  841. static void udc_start(struct mv_udc *udc)
  842. {
  843. u32 usbintr;
  844. usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
  845. | USBINTR_PORT_CHANGE_DETECT_EN
  846. | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
  847. /* Enable interrupts */
  848. writel(usbintr, &udc->op_regs->usbintr);
  849. udc->stopped = 0;
  850. /* Set the Run bit in the command register */
  851. writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
  852. }
  853. static int udc_reset(struct mv_udc *udc)
  854. {
  855. unsigned int loops;
  856. u32 tmp, portsc;
  857. /* Stop the controller */
  858. tmp = readl(&udc->op_regs->usbcmd);
  859. tmp &= ~USBCMD_RUN_STOP;
  860. writel(tmp, &udc->op_regs->usbcmd);
  861. /* Reset the controller to get default values */
  862. writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
  863. /* wait for reset to complete */
  864. loops = LOOPS(RESET_TIMEOUT);
  865. while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
  866. if (loops == 0) {
  867. dev_err(&udc->dev->dev,
  868. "Wait for RESET completed TIMEOUT\n");
  869. return -ETIMEDOUT;
  870. }
  871. loops--;
  872. udelay(LOOPS_USEC);
  873. }
  874. /* set controller to device mode */
  875. tmp = readl(&udc->op_regs->usbmode);
  876. tmp |= USBMODE_CTRL_MODE_DEVICE;
  877. /* turn setup lockout off, require setup tripwire in usbcmd */
  878. tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
  879. writel(tmp, &udc->op_regs->usbmode);
  880. writel(0x0, &udc->op_regs->epsetupstat);
  881. /* Configure the Endpoint List Address */
  882. writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
  883. &udc->op_regs->eplistaddr);
  884. portsc = readl(&udc->op_regs->portsc[0]);
  885. if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
  886. portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
  887. if (udc->force_fs)
  888. portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
  889. else
  890. portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
  891. writel(portsc, &udc->op_regs->portsc[0]);
  892. tmp = readl(&udc->op_regs->epctrlx[0]);
  893. tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
  894. writel(tmp, &udc->op_regs->epctrlx[0]);
  895. return 0;
  896. }
  897. static int mv_udc_enable_internal(struct mv_udc *udc)
  898. {
  899. int retval;
  900. if (udc->active)
  901. return 0;
  902. dev_dbg(&udc->dev->dev, "enable udc\n");
  903. udc_clock_enable(udc);
  904. if (udc->pdata->phy_init) {
  905. retval = udc->pdata->phy_init(udc->phy_regs);
  906. if (retval) {
  907. dev_err(&udc->dev->dev,
  908. "init phy error %d\n", retval);
  909. udc_clock_disable(udc);
  910. return retval;
  911. }
  912. }
  913. udc->active = 1;
  914. return 0;
  915. }
  916. static int mv_udc_enable(struct mv_udc *udc)
  917. {
  918. if (udc->clock_gating)
  919. return mv_udc_enable_internal(udc);
  920. return 0;
  921. }
  922. static void mv_udc_disable_internal(struct mv_udc *udc)
  923. {
  924. if (udc->active) {
  925. dev_dbg(&udc->dev->dev, "disable udc\n");
  926. if (udc->pdata->phy_deinit)
  927. udc->pdata->phy_deinit(udc->phy_regs);
  928. udc_clock_disable(udc);
  929. udc->active = 0;
  930. }
  931. }
  932. static void mv_udc_disable(struct mv_udc *udc)
  933. {
  934. if (udc->clock_gating)
  935. mv_udc_disable_internal(udc);
  936. }
  937. static int mv_udc_get_frame(struct usb_gadget *gadget)
  938. {
  939. struct mv_udc *udc;
  940. u16 retval;
  941. if (!gadget)
  942. return -ENODEV;
  943. udc = container_of(gadget, struct mv_udc, gadget);
  944. retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
  945. return retval;
  946. }
  947. /* Tries to wake up the host connected to this gadget */
  948. static int mv_udc_wakeup(struct usb_gadget *gadget)
  949. {
  950. struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
  951. u32 portsc;
  952. /* Remote wakeup feature not enabled by host */
  953. if (!udc->remote_wakeup)
  954. return -ENOTSUPP;
  955. portsc = readl(&udc->op_regs->portsc);
  956. /* not suspended? */
  957. if (!(portsc & PORTSCX_PORT_SUSPEND))
  958. return 0;
  959. /* trigger force resume */
  960. portsc |= PORTSCX_PORT_FORCE_RESUME;
  961. writel(portsc, &udc->op_regs->portsc[0]);
  962. return 0;
  963. }
  964. static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
  965. {
  966. struct mv_udc *udc;
  967. unsigned long flags;
  968. int retval = 0;
  969. udc = container_of(gadget, struct mv_udc, gadget);
  970. spin_lock_irqsave(&udc->lock, flags);
  971. udc->vbus_active = (is_active != 0);
  972. dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
  973. __func__, udc->softconnect, udc->vbus_active);
  974. if (udc->driver && udc->softconnect && udc->vbus_active) {
  975. retval = mv_udc_enable(udc);
  976. if (retval == 0) {
  977. /* Clock is disabled, need re-init registers */
  978. udc_reset(udc);
  979. ep0_reset(udc);
  980. udc_start(udc);
  981. }
  982. } else if (udc->driver && udc->softconnect) {
  983. /* stop all the transfer in queue*/
  984. stop_activity(udc, udc->driver);
  985. udc_stop(udc);
  986. mv_udc_disable(udc);
  987. }
  988. spin_unlock_irqrestore(&udc->lock, flags);
  989. return retval;
  990. }
  991. static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
  992. {
  993. struct mv_udc *udc;
  994. unsigned long flags;
  995. int retval = 0;
  996. udc = container_of(gadget, struct mv_udc, gadget);
  997. spin_lock_irqsave(&udc->lock, flags);
  998. udc->softconnect = (is_on != 0);
  999. dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
  1000. __func__, udc->softconnect, udc->vbus_active);
  1001. if (udc->driver && udc->softconnect && udc->vbus_active) {
  1002. retval = mv_udc_enable(udc);
  1003. if (retval == 0) {
  1004. /* Clock is disabled, need re-init registers */
  1005. udc_reset(udc);
  1006. ep0_reset(udc);
  1007. udc_start(udc);
  1008. }
  1009. } else if (udc->driver && udc->vbus_active) {
  1010. /* stop all the transfer in queue*/
  1011. stop_activity(udc, udc->driver);
  1012. udc_stop(udc);
  1013. mv_udc_disable(udc);
  1014. }
  1015. spin_unlock_irqrestore(&udc->lock, flags);
  1016. return retval;
  1017. }
  1018. static int mv_udc_start(struct usb_gadget_driver *driver,
  1019. int (*bind)(struct usb_gadget *));
  1020. static int mv_udc_stop(struct usb_gadget_driver *driver);
  1021. /* device controller usb_gadget_ops structure */
  1022. static const struct usb_gadget_ops mv_ops = {
  1023. /* returns the current frame number */
  1024. .get_frame = mv_udc_get_frame,
  1025. /* tries to wake up the host connected to this gadget */
  1026. .wakeup = mv_udc_wakeup,
  1027. /* notify controller that VBUS is powered or not */
  1028. .vbus_session = mv_udc_vbus_session,
  1029. /* D+ pullup, software-controlled connect/disconnect to USB host */
  1030. .pullup = mv_udc_pullup,
  1031. .start = mv_udc_start,
  1032. .stop = mv_udc_stop,
  1033. };
  1034. static int eps_init(struct mv_udc *udc)
  1035. {
  1036. struct mv_ep *ep;
  1037. char name[14];
  1038. int i;
  1039. /* initialize ep0 */
  1040. ep = &udc->eps[0];
  1041. ep->udc = udc;
  1042. strncpy(ep->name, "ep0", sizeof(ep->name));
  1043. ep->ep.name = ep->name;
  1044. ep->ep.ops = &mv_ep_ops;
  1045. ep->wedge = 0;
  1046. ep->stopped = 0;
  1047. ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
  1048. ep->ep_num = 0;
  1049. ep->ep.desc = &mv_ep0_desc;
  1050. INIT_LIST_HEAD(&ep->queue);
  1051. ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
  1052. /* initialize other endpoints */
  1053. for (i = 2; i < udc->max_eps * 2; i++) {
  1054. ep = &udc->eps[i];
  1055. if (i % 2) {
  1056. snprintf(name, sizeof(name), "ep%din", i / 2);
  1057. ep->direction = EP_DIR_IN;
  1058. } else {
  1059. snprintf(name, sizeof(name), "ep%dout", i / 2);
  1060. ep->direction = EP_DIR_OUT;
  1061. }
  1062. ep->udc = udc;
  1063. strncpy(ep->name, name, sizeof(ep->name));
  1064. ep->ep.name = ep->name;
  1065. ep->ep.ops = &mv_ep_ops;
  1066. ep->stopped = 0;
  1067. ep->ep.maxpacket = (unsigned short) ~0;
  1068. ep->ep_num = i / 2;
  1069. INIT_LIST_HEAD(&ep->queue);
  1070. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1071. ep->dqh = &udc->ep_dqh[i];
  1072. }
  1073. return 0;
  1074. }
  1075. /* delete all endpoint requests, called with spinlock held */
  1076. static void nuke(struct mv_ep *ep, int status)
  1077. {
  1078. /* called with spinlock held */
  1079. ep->stopped = 1;
  1080. /* endpoint fifo flush */
  1081. mv_ep_fifo_flush(&ep->ep);
  1082. while (!list_empty(&ep->queue)) {
  1083. struct mv_req *req = NULL;
  1084. req = list_entry(ep->queue.next, struct mv_req, queue);
  1085. done(ep, req, status);
  1086. }
  1087. }
  1088. /* stop all USB activities */
  1089. static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
  1090. {
  1091. struct mv_ep *ep;
  1092. nuke(&udc->eps[0], -ESHUTDOWN);
  1093. list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
  1094. nuke(ep, -ESHUTDOWN);
  1095. }
  1096. /* report disconnect; the driver is already quiesced */
  1097. if (driver) {
  1098. spin_unlock(&udc->lock);
  1099. driver->disconnect(&udc->gadget);
  1100. spin_lock(&udc->lock);
  1101. }
  1102. }
  1103. static int mv_udc_start(struct usb_gadget_driver *driver,
  1104. int (*bind)(struct usb_gadget *))
  1105. {
  1106. struct mv_udc *udc = the_controller;
  1107. int retval = 0;
  1108. unsigned long flags;
  1109. if (!udc)
  1110. return -ENODEV;
  1111. if (udc->driver)
  1112. return -EBUSY;
  1113. spin_lock_irqsave(&udc->lock, flags);
  1114. /* hook up the driver ... */
  1115. driver->driver.bus = NULL;
  1116. udc->driver = driver;
  1117. udc->gadget.dev.driver = &driver->driver;
  1118. udc->usb_state = USB_STATE_ATTACHED;
  1119. udc->ep0_state = WAIT_FOR_SETUP;
  1120. udc->ep0_dir = EP_DIR_OUT;
  1121. spin_unlock_irqrestore(&udc->lock, flags);
  1122. retval = bind(&udc->gadget);
  1123. if (retval) {
  1124. dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
  1125. driver->driver.name, retval);
  1126. udc->driver = NULL;
  1127. udc->gadget.dev.driver = NULL;
  1128. return retval;
  1129. }
  1130. if (!IS_ERR_OR_NULL(udc->transceiver)) {
  1131. retval = otg_set_peripheral(udc->transceiver->otg,
  1132. &udc->gadget);
  1133. if (retval) {
  1134. dev_err(&udc->dev->dev,
  1135. "unable to register peripheral to otg\n");
  1136. if (driver->unbind) {
  1137. driver->unbind(&udc->gadget);
  1138. udc->gadget.dev.driver = NULL;
  1139. udc->driver = NULL;
  1140. }
  1141. return retval;
  1142. }
  1143. }
  1144. /* pullup is always on */
  1145. mv_udc_pullup(&udc->gadget, 1);
  1146. /* When boot with cable attached, there will be no vbus irq occurred */
  1147. if (udc->qwork)
  1148. queue_work(udc->qwork, &udc->vbus_work);
  1149. return 0;
  1150. }
  1151. static int mv_udc_stop(struct usb_gadget_driver *driver)
  1152. {
  1153. struct mv_udc *udc = the_controller;
  1154. unsigned long flags;
  1155. if (!udc)
  1156. return -ENODEV;
  1157. spin_lock_irqsave(&udc->lock, flags);
  1158. mv_udc_enable(udc);
  1159. udc_stop(udc);
  1160. /* stop all usb activities */
  1161. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1162. stop_activity(udc, driver);
  1163. mv_udc_disable(udc);
  1164. spin_unlock_irqrestore(&udc->lock, flags);
  1165. /* unbind gadget driver */
  1166. driver->unbind(&udc->gadget);
  1167. udc->gadget.dev.driver = NULL;
  1168. udc->driver = NULL;
  1169. return 0;
  1170. }
  1171. static void mv_set_ptc(struct mv_udc *udc, u32 mode)
  1172. {
  1173. u32 portsc;
  1174. portsc = readl(&udc->op_regs->portsc[0]);
  1175. portsc |= mode << 16;
  1176. writel(portsc, &udc->op_regs->portsc[0]);
  1177. }
  1178. static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
  1179. {
  1180. struct mv_udc *udc = the_controller;
  1181. struct mv_req *req = container_of(_req, struct mv_req, req);
  1182. unsigned long flags;
  1183. dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
  1184. spin_lock_irqsave(&udc->lock, flags);
  1185. if (req->test_mode) {
  1186. mv_set_ptc(udc, req->test_mode);
  1187. req->test_mode = 0;
  1188. }
  1189. spin_unlock_irqrestore(&udc->lock, flags);
  1190. }
  1191. static int
  1192. udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
  1193. {
  1194. int retval = 0;
  1195. struct mv_req *req;
  1196. struct mv_ep *ep;
  1197. ep = &udc->eps[0];
  1198. udc->ep0_dir = direction;
  1199. udc->ep0_state = WAIT_FOR_OUT_STATUS;
  1200. req = udc->status_req;
  1201. /* fill in the reqest structure */
  1202. if (empty == false) {
  1203. *((u16 *) req->req.buf) = cpu_to_le16(status);
  1204. req->req.length = 2;
  1205. } else
  1206. req->req.length = 0;
  1207. req->ep = ep;
  1208. req->req.status = -EINPROGRESS;
  1209. req->req.actual = 0;
  1210. if (udc->test_mode) {
  1211. req->req.complete = prime_status_complete;
  1212. req->test_mode = udc->test_mode;
  1213. udc->test_mode = 0;
  1214. } else
  1215. req->req.complete = NULL;
  1216. req->dtd_count = 0;
  1217. if (req->req.dma == DMA_ADDR_INVALID) {
  1218. req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
  1219. req->req.buf, req->req.length,
  1220. ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1221. req->mapped = 1;
  1222. }
  1223. /* prime the data phase */
  1224. if (!req_to_dtd(req))
  1225. retval = queue_dtd(ep, req);
  1226. else{ /* no mem */
  1227. retval = -ENOMEM;
  1228. goto out;
  1229. }
  1230. if (retval) {
  1231. dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
  1232. goto out;
  1233. }
  1234. list_add_tail(&req->queue, &ep->queue);
  1235. return 0;
  1236. out:
  1237. return retval;
  1238. }
  1239. static void mv_udc_testmode(struct mv_udc *udc, u16 index)
  1240. {
  1241. if (index <= TEST_FORCE_EN) {
  1242. udc->test_mode = index;
  1243. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1244. ep0_stall(udc);
  1245. } else
  1246. dev_err(&udc->dev->dev,
  1247. "This test mode(%d) is not supported\n", index);
  1248. }
  1249. static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1250. {
  1251. udc->dev_addr = (u8)setup->wValue;
  1252. /* update usb state */
  1253. udc->usb_state = USB_STATE_ADDRESS;
  1254. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1255. ep0_stall(udc);
  1256. }
  1257. static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
  1258. struct usb_ctrlrequest *setup)
  1259. {
  1260. u16 status = 0;
  1261. int retval;
  1262. if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
  1263. != (USB_DIR_IN | USB_TYPE_STANDARD))
  1264. return;
  1265. if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
  1266. status = 1 << USB_DEVICE_SELF_POWERED;
  1267. status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
  1268. } else if ((setup->bRequestType & USB_RECIP_MASK)
  1269. == USB_RECIP_INTERFACE) {
  1270. /* get interface status */
  1271. status = 0;
  1272. } else if ((setup->bRequestType & USB_RECIP_MASK)
  1273. == USB_RECIP_ENDPOINT) {
  1274. u8 ep_num, direction;
  1275. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1276. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1277. ? EP_DIR_IN : EP_DIR_OUT;
  1278. status = ep_is_stall(udc, ep_num, direction)
  1279. << USB_ENDPOINT_HALT;
  1280. }
  1281. retval = udc_prime_status(udc, EP_DIR_IN, status, false);
  1282. if (retval)
  1283. ep0_stall(udc);
  1284. else
  1285. udc->ep0_state = DATA_STATE_XMIT;
  1286. }
  1287. static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1288. {
  1289. u8 ep_num;
  1290. u8 direction;
  1291. struct mv_ep *ep;
  1292. if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1293. == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
  1294. switch (setup->wValue) {
  1295. case USB_DEVICE_REMOTE_WAKEUP:
  1296. udc->remote_wakeup = 0;
  1297. break;
  1298. default:
  1299. goto out;
  1300. }
  1301. } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1302. == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
  1303. switch (setup->wValue) {
  1304. case USB_ENDPOINT_HALT:
  1305. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1306. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1307. ? EP_DIR_IN : EP_DIR_OUT;
  1308. if (setup->wValue != 0 || setup->wLength != 0
  1309. || ep_num > udc->max_eps)
  1310. goto out;
  1311. ep = &udc->eps[ep_num * 2 + direction];
  1312. if (ep->wedge == 1)
  1313. break;
  1314. spin_unlock(&udc->lock);
  1315. ep_set_stall(udc, ep_num, direction, 0);
  1316. spin_lock(&udc->lock);
  1317. break;
  1318. default:
  1319. goto out;
  1320. }
  1321. } else
  1322. goto out;
  1323. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1324. ep0_stall(udc);
  1325. out:
  1326. return;
  1327. }
  1328. static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
  1329. {
  1330. u8 ep_num;
  1331. u8 direction;
  1332. if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1333. == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
  1334. switch (setup->wValue) {
  1335. case USB_DEVICE_REMOTE_WAKEUP:
  1336. udc->remote_wakeup = 1;
  1337. break;
  1338. case USB_DEVICE_TEST_MODE:
  1339. if (setup->wIndex & 0xFF
  1340. || udc->gadget.speed != USB_SPEED_HIGH)
  1341. ep0_stall(udc);
  1342. if (udc->usb_state != USB_STATE_CONFIGURED
  1343. && udc->usb_state != USB_STATE_ADDRESS
  1344. && udc->usb_state != USB_STATE_DEFAULT)
  1345. ep0_stall(udc);
  1346. mv_udc_testmode(udc, (setup->wIndex >> 8));
  1347. goto out;
  1348. default:
  1349. goto out;
  1350. }
  1351. } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
  1352. == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
  1353. switch (setup->wValue) {
  1354. case USB_ENDPOINT_HALT:
  1355. ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
  1356. direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
  1357. ? EP_DIR_IN : EP_DIR_OUT;
  1358. if (setup->wValue != 0 || setup->wLength != 0
  1359. || ep_num > udc->max_eps)
  1360. goto out;
  1361. spin_unlock(&udc->lock);
  1362. ep_set_stall(udc, ep_num, direction, 1);
  1363. spin_lock(&udc->lock);
  1364. break;
  1365. default:
  1366. goto out;
  1367. }
  1368. } else
  1369. goto out;
  1370. if (udc_prime_status(udc, EP_DIR_IN, 0, true))
  1371. ep0_stall(udc);
  1372. out:
  1373. return;
  1374. }
  1375. static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
  1376. struct usb_ctrlrequest *setup)
  1377. {
  1378. bool delegate = false;
  1379. nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
  1380. dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
  1381. setup->bRequestType, setup->bRequest,
  1382. setup->wValue, setup->wIndex, setup->wLength);
  1383. /* We process some stardard setup requests here */
  1384. if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1385. switch (setup->bRequest) {
  1386. case USB_REQ_GET_STATUS:
  1387. ch9getstatus(udc, ep_num, setup);
  1388. break;
  1389. case USB_REQ_SET_ADDRESS:
  1390. ch9setaddress(udc, setup);
  1391. break;
  1392. case USB_REQ_CLEAR_FEATURE:
  1393. ch9clearfeature(udc, setup);
  1394. break;
  1395. case USB_REQ_SET_FEATURE:
  1396. ch9setfeature(udc, setup);
  1397. break;
  1398. default:
  1399. delegate = true;
  1400. }
  1401. } else
  1402. delegate = true;
  1403. /* delegate USB standard requests to the gadget driver */
  1404. if (delegate == true) {
  1405. /* USB requests handled by gadget */
  1406. if (setup->wLength) {
  1407. /* DATA phase from gadget, STATUS phase from udc */
  1408. udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
  1409. ? EP_DIR_IN : EP_DIR_OUT;
  1410. spin_unlock(&udc->lock);
  1411. if (udc->driver->setup(&udc->gadget,
  1412. &udc->local_setup_buff) < 0)
  1413. ep0_stall(udc);
  1414. spin_lock(&udc->lock);
  1415. udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
  1416. ? DATA_STATE_XMIT : DATA_STATE_RECV;
  1417. } else {
  1418. /* no DATA phase, IN STATUS phase from gadget */
  1419. udc->ep0_dir = EP_DIR_IN;
  1420. spin_unlock(&udc->lock);
  1421. if (udc->driver->setup(&udc->gadget,
  1422. &udc->local_setup_buff) < 0)
  1423. ep0_stall(udc);
  1424. spin_lock(&udc->lock);
  1425. udc->ep0_state = WAIT_FOR_OUT_STATUS;
  1426. }
  1427. }
  1428. }
  1429. /* complete DATA or STATUS phase of ep0 prime status phase if needed */
  1430. static void ep0_req_complete(struct mv_udc *udc,
  1431. struct mv_ep *ep0, struct mv_req *req)
  1432. {
  1433. u32 new_addr;
  1434. if (udc->usb_state == USB_STATE_ADDRESS) {
  1435. /* set the new address */
  1436. new_addr = (u32)udc->dev_addr;
  1437. writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
  1438. &udc->op_regs->deviceaddr);
  1439. }
  1440. done(ep0, req, 0);
  1441. switch (udc->ep0_state) {
  1442. case DATA_STATE_XMIT:
  1443. /* receive status phase */
  1444. if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
  1445. ep0_stall(udc);
  1446. break;
  1447. case DATA_STATE_RECV:
  1448. /* send status phase */
  1449. if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
  1450. ep0_stall(udc);
  1451. break;
  1452. case WAIT_FOR_OUT_STATUS:
  1453. udc->ep0_state = WAIT_FOR_SETUP;
  1454. break;
  1455. case WAIT_FOR_SETUP:
  1456. dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
  1457. break;
  1458. default:
  1459. ep0_stall(udc);
  1460. break;
  1461. }
  1462. }
  1463. static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
  1464. {
  1465. u32 temp;
  1466. struct mv_dqh *dqh;
  1467. dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
  1468. /* Clear bit in ENDPTSETUPSTAT */
  1469. writel((1 << ep_num), &udc->op_regs->epsetupstat);
  1470. /* while a hazard exists when setup package arrives */
  1471. do {
  1472. /* Set Setup Tripwire */
  1473. temp = readl(&udc->op_regs->usbcmd);
  1474. writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
  1475. /* Copy the setup packet to local buffer */
  1476. memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
  1477. } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
  1478. /* Clear Setup Tripwire */
  1479. temp = readl(&udc->op_regs->usbcmd);
  1480. writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
  1481. }
  1482. static void irq_process_tr_complete(struct mv_udc *udc)
  1483. {
  1484. u32 tmp, bit_pos;
  1485. int i, ep_num = 0, direction = 0;
  1486. struct mv_ep *curr_ep;
  1487. struct mv_req *curr_req, *temp_req;
  1488. int status;
  1489. /*
  1490. * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
  1491. * because the setup packets are to be read ASAP
  1492. */
  1493. /* Process all Setup packet received interrupts */
  1494. tmp = readl(&udc->op_regs->epsetupstat);
  1495. if (tmp) {
  1496. for (i = 0; i < udc->max_eps; i++) {
  1497. if (tmp & (1 << i)) {
  1498. get_setup_data(udc, i,
  1499. (u8 *)(&udc->local_setup_buff));
  1500. handle_setup_packet(udc, i,
  1501. &udc->local_setup_buff);
  1502. }
  1503. }
  1504. }
  1505. /* Don't clear the endpoint setup status register here.
  1506. * It is cleared as a setup packet is read out of the buffer
  1507. */
  1508. /* Process non-setup transaction complete interrupts */
  1509. tmp = readl(&udc->op_regs->epcomplete);
  1510. if (!tmp)
  1511. return;
  1512. writel(tmp, &udc->op_regs->epcomplete);
  1513. for (i = 0; i < udc->max_eps * 2; i++) {
  1514. ep_num = i >> 1;
  1515. direction = i % 2;
  1516. bit_pos = 1 << (ep_num + 16 * direction);
  1517. if (!(bit_pos & tmp))
  1518. continue;
  1519. if (i == 1)
  1520. curr_ep = &udc->eps[0];
  1521. else
  1522. curr_ep = &udc->eps[i];
  1523. /* process the req queue until an uncomplete request */
  1524. list_for_each_entry_safe(curr_req, temp_req,
  1525. &curr_ep->queue, queue) {
  1526. status = process_ep_req(udc, i, curr_req);
  1527. if (status)
  1528. break;
  1529. /* write back status to req */
  1530. curr_req->req.status = status;
  1531. /* ep0 request completion */
  1532. if (ep_num == 0) {
  1533. ep0_req_complete(udc, curr_ep, curr_req);
  1534. break;
  1535. } else {
  1536. done(curr_ep, curr_req, status);
  1537. }
  1538. }
  1539. }
  1540. }
  1541. void irq_process_reset(struct mv_udc *udc)
  1542. {
  1543. u32 tmp;
  1544. unsigned int loops;
  1545. udc->ep0_dir = EP_DIR_OUT;
  1546. udc->ep0_state = WAIT_FOR_SETUP;
  1547. udc->remote_wakeup = 0; /* default to 0 on reset */
  1548. /* The address bits are past bit 25-31. Set the address */
  1549. tmp = readl(&udc->op_regs->deviceaddr);
  1550. tmp &= ~(USB_DEVICE_ADDRESS_MASK);
  1551. writel(tmp, &udc->op_regs->deviceaddr);
  1552. /* Clear all the setup token semaphores */
  1553. tmp = readl(&udc->op_regs->epsetupstat);
  1554. writel(tmp, &udc->op_regs->epsetupstat);
  1555. /* Clear all the endpoint complete status bits */
  1556. tmp = readl(&udc->op_regs->epcomplete);
  1557. writel(tmp, &udc->op_regs->epcomplete);
  1558. /* wait until all endptprime bits cleared */
  1559. loops = LOOPS(PRIME_TIMEOUT);
  1560. while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
  1561. if (loops == 0) {
  1562. dev_err(&udc->dev->dev,
  1563. "Timeout for ENDPTPRIME = 0x%x\n",
  1564. readl(&udc->op_regs->epprime));
  1565. break;
  1566. }
  1567. loops--;
  1568. udelay(LOOPS_USEC);
  1569. }
  1570. /* Write 1s to the Flush register */
  1571. writel((u32)~0, &udc->op_regs->epflush);
  1572. if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
  1573. dev_info(&udc->dev->dev, "usb bus reset\n");
  1574. udc->usb_state = USB_STATE_DEFAULT;
  1575. /* reset all the queues, stop all USB activities */
  1576. stop_activity(udc, udc->driver);
  1577. } else {
  1578. dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
  1579. readl(&udc->op_regs->portsc));
  1580. /*
  1581. * re-initialize
  1582. * controller reset
  1583. */
  1584. udc_reset(udc);
  1585. /* reset all the queues, stop all USB activities */
  1586. stop_activity(udc, udc->driver);
  1587. /* reset ep0 dQH and endptctrl */
  1588. ep0_reset(udc);
  1589. /* enable interrupt and set controller to run state */
  1590. udc_start(udc);
  1591. udc->usb_state = USB_STATE_ATTACHED;
  1592. }
  1593. }
  1594. static void handle_bus_resume(struct mv_udc *udc)
  1595. {
  1596. udc->usb_state = udc->resume_state;
  1597. udc->resume_state = 0;
  1598. /* report resume to the driver */
  1599. if (udc->driver) {
  1600. if (udc->driver->resume) {
  1601. spin_unlock(&udc->lock);
  1602. udc->driver->resume(&udc->gadget);
  1603. spin_lock(&udc->lock);
  1604. }
  1605. }
  1606. }
  1607. static void irq_process_suspend(struct mv_udc *udc)
  1608. {
  1609. udc->resume_state = udc->usb_state;
  1610. udc->usb_state = USB_STATE_SUSPENDED;
  1611. if (udc->driver->suspend) {
  1612. spin_unlock(&udc->lock);
  1613. udc->driver->suspend(&udc->gadget);
  1614. spin_lock(&udc->lock);
  1615. }
  1616. }
  1617. static void irq_process_port_change(struct mv_udc *udc)
  1618. {
  1619. u32 portsc;
  1620. portsc = readl(&udc->op_regs->portsc[0]);
  1621. if (!(portsc & PORTSCX_PORT_RESET)) {
  1622. /* Get the speed */
  1623. u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
  1624. switch (speed) {
  1625. case PORTSCX_PORT_SPEED_HIGH:
  1626. udc->gadget.speed = USB_SPEED_HIGH;
  1627. break;
  1628. case PORTSCX_PORT_SPEED_FULL:
  1629. udc->gadget.speed = USB_SPEED_FULL;
  1630. break;
  1631. case PORTSCX_PORT_SPEED_LOW:
  1632. udc->gadget.speed = USB_SPEED_LOW;
  1633. break;
  1634. default:
  1635. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1636. break;
  1637. }
  1638. }
  1639. if (portsc & PORTSCX_PORT_SUSPEND) {
  1640. udc->resume_state = udc->usb_state;
  1641. udc->usb_state = USB_STATE_SUSPENDED;
  1642. if (udc->driver->suspend) {
  1643. spin_unlock(&udc->lock);
  1644. udc->driver->suspend(&udc->gadget);
  1645. spin_lock(&udc->lock);
  1646. }
  1647. }
  1648. if (!(portsc & PORTSCX_PORT_SUSPEND)
  1649. && udc->usb_state == USB_STATE_SUSPENDED) {
  1650. handle_bus_resume(udc);
  1651. }
  1652. if (!udc->resume_state)
  1653. udc->usb_state = USB_STATE_DEFAULT;
  1654. }
  1655. static void irq_process_error(struct mv_udc *udc)
  1656. {
  1657. /* Increment the error count */
  1658. udc->errors++;
  1659. }
  1660. static irqreturn_t mv_udc_irq(int irq, void *dev)
  1661. {
  1662. struct mv_udc *udc = (struct mv_udc *)dev;
  1663. u32 status, intr;
  1664. /* Disable ISR when stopped bit is set */
  1665. if (udc->stopped)
  1666. return IRQ_NONE;
  1667. spin_lock(&udc->lock);
  1668. status = readl(&udc->op_regs->usbsts);
  1669. intr = readl(&udc->op_regs->usbintr);
  1670. status &= intr;
  1671. if (status == 0) {
  1672. spin_unlock(&udc->lock);
  1673. return IRQ_NONE;
  1674. }
  1675. /* Clear all the interrupts occurred */
  1676. writel(status, &udc->op_regs->usbsts);
  1677. if (status & USBSTS_ERR)
  1678. irq_process_error(udc);
  1679. if (status & USBSTS_RESET)
  1680. irq_process_reset(udc);
  1681. if (status & USBSTS_PORT_CHANGE)
  1682. irq_process_port_change(udc);
  1683. if (status & USBSTS_INT)
  1684. irq_process_tr_complete(udc);
  1685. if (status & USBSTS_SUSPEND)
  1686. irq_process_suspend(udc);
  1687. spin_unlock(&udc->lock);
  1688. return IRQ_HANDLED;
  1689. }
  1690. static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
  1691. {
  1692. struct mv_udc *udc = (struct mv_udc *)dev;
  1693. /* polling VBUS and init phy may cause too much time*/
  1694. if (udc->qwork)
  1695. queue_work(udc->qwork, &udc->vbus_work);
  1696. return IRQ_HANDLED;
  1697. }
  1698. static void mv_udc_vbus_work(struct work_struct *work)
  1699. {
  1700. struct mv_udc *udc;
  1701. unsigned int vbus;
  1702. udc = container_of(work, struct mv_udc, vbus_work);
  1703. if (!udc->pdata->vbus)
  1704. return;
  1705. vbus = udc->pdata->vbus->poll();
  1706. dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
  1707. if (vbus == VBUS_HIGH)
  1708. mv_udc_vbus_session(&udc->gadget, 1);
  1709. else if (vbus == VBUS_LOW)
  1710. mv_udc_vbus_session(&udc->gadget, 0);
  1711. }
  1712. /* release device structure */
  1713. static void gadget_release(struct device *_dev)
  1714. {
  1715. struct mv_udc *udc = the_controller;
  1716. complete(udc->done);
  1717. }
  1718. static int __devexit mv_udc_remove(struct platform_device *dev)
  1719. {
  1720. struct mv_udc *udc = the_controller;
  1721. int clk_i;
  1722. usb_del_gadget_udc(&udc->gadget);
  1723. if (udc->qwork) {
  1724. flush_workqueue(udc->qwork);
  1725. destroy_workqueue(udc->qwork);
  1726. }
  1727. /*
  1728. * If we have transceiver inited,
  1729. * then vbus irq will not be requested in udc driver.
  1730. */
  1731. if (udc->pdata && udc->pdata->vbus
  1732. && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
  1733. free_irq(udc->pdata->vbus->irq, &dev->dev);
  1734. /* free memory allocated in probe */
  1735. if (udc->dtd_pool)
  1736. dma_pool_destroy(udc->dtd_pool);
  1737. if (udc->ep_dqh)
  1738. dma_free_coherent(&dev->dev, udc->ep_dqh_size,
  1739. udc->ep_dqh, udc->ep_dqh_dma);
  1740. kfree(udc->eps);
  1741. if (udc->irq)
  1742. free_irq(udc->irq, &dev->dev);
  1743. mv_udc_disable(udc);
  1744. if (udc->cap_regs)
  1745. iounmap(udc->cap_regs);
  1746. if (udc->phy_regs)
  1747. iounmap(udc->phy_regs);
  1748. if (udc->status_req) {
  1749. kfree(udc->status_req->req.buf);
  1750. kfree(udc->status_req);
  1751. }
  1752. for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
  1753. clk_put(udc->clk[clk_i]);
  1754. device_unregister(&udc->gadget.dev);
  1755. /* free dev, wait for the release() finished */
  1756. wait_for_completion(udc->done);
  1757. kfree(udc);
  1758. the_controller = NULL;
  1759. return 0;
  1760. }
  1761. static int __devinit mv_udc_probe(struct platform_device *dev)
  1762. {
  1763. struct mv_usb_platform_data *pdata = dev->dev.platform_data;
  1764. struct mv_udc *udc;
  1765. int retval = 0;
  1766. int clk_i = 0;
  1767. struct resource *r;
  1768. size_t size;
  1769. if (pdata == NULL) {
  1770. dev_err(&dev->dev, "missing platform_data\n");
  1771. return -ENODEV;
  1772. }
  1773. size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
  1774. udc = kzalloc(size, GFP_KERNEL);
  1775. if (udc == NULL) {
  1776. dev_err(&dev->dev, "failed to allocate memory for udc\n");
  1777. return -ENOMEM;
  1778. }
  1779. the_controller = udc;
  1780. udc->done = &release_done;
  1781. udc->pdata = dev->dev.platform_data;
  1782. spin_lock_init(&udc->lock);
  1783. udc->dev = dev;
  1784. #ifdef CONFIG_USB_OTG_UTILS
  1785. if (pdata->mode == MV_USB_MODE_OTG)
  1786. udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
  1787. #endif
  1788. udc->clknum = pdata->clknum;
  1789. for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
  1790. udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
  1791. if (IS_ERR(udc->clk[clk_i])) {
  1792. retval = PTR_ERR(udc->clk[clk_i]);
  1793. goto err_put_clk;
  1794. }
  1795. }
  1796. r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
  1797. if (r == NULL) {
  1798. dev_err(&dev->dev, "no I/O memory resource defined\n");
  1799. retval = -ENODEV;
  1800. goto err_put_clk;
  1801. }
  1802. udc->cap_regs = (struct mv_cap_regs __iomem *)
  1803. ioremap(r->start, resource_size(r));
  1804. if (udc->cap_regs == NULL) {
  1805. dev_err(&dev->dev, "failed to map I/O memory\n");
  1806. retval = -EBUSY;
  1807. goto err_put_clk;
  1808. }
  1809. r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
  1810. if (r == NULL) {
  1811. dev_err(&dev->dev, "no phy I/O memory resource defined\n");
  1812. retval = -ENODEV;
  1813. goto err_iounmap_capreg;
  1814. }
  1815. udc->phy_regs = ioremap(r->start, resource_size(r));
  1816. if (udc->phy_regs == NULL) {
  1817. dev_err(&dev->dev, "failed to map phy I/O memory\n");
  1818. retval = -EBUSY;
  1819. goto err_iounmap_capreg;
  1820. }
  1821. /* we will acces controller register, so enable the clk */
  1822. retval = mv_udc_enable_internal(udc);
  1823. if (retval)
  1824. goto err_iounmap_phyreg;
  1825. udc->op_regs =
  1826. (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
  1827. + (readl(&udc->cap_regs->caplength_hciversion)
  1828. & CAPLENGTH_MASK));
  1829. udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
  1830. /*
  1831. * some platform will use usb to download image, it may not disconnect
  1832. * usb gadget before loading kernel. So first stop udc here.
  1833. */
  1834. udc_stop(udc);
  1835. writel(0xFFFFFFFF, &udc->op_regs->usbsts);
  1836. size = udc->max_eps * sizeof(struct mv_dqh) *2;
  1837. size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
  1838. udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
  1839. &udc->ep_dqh_dma, GFP_KERNEL);
  1840. if (udc->ep_dqh == NULL) {
  1841. dev_err(&dev->dev, "allocate dQH memory failed\n");
  1842. retval = -ENOMEM;
  1843. goto err_disable_clock;
  1844. }
  1845. udc->ep_dqh_size = size;
  1846. /* create dTD dma_pool resource */
  1847. udc->dtd_pool = dma_pool_create("mv_dtd",
  1848. &dev->dev,
  1849. sizeof(struct mv_dtd),
  1850. DTD_ALIGNMENT,
  1851. DMA_BOUNDARY);
  1852. if (!udc->dtd_pool) {
  1853. retval = -ENOMEM;
  1854. goto err_free_dma;
  1855. }
  1856. size = udc->max_eps * sizeof(struct mv_ep) *2;
  1857. udc->eps = kzalloc(size, GFP_KERNEL);
  1858. if (udc->eps == NULL) {
  1859. dev_err(&dev->dev, "allocate ep memory failed\n");
  1860. retval = -ENOMEM;
  1861. goto err_destroy_dma;
  1862. }
  1863. /* initialize ep0 status request structure */
  1864. udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
  1865. if (!udc->status_req) {
  1866. dev_err(&dev->dev, "allocate status_req memory failed\n");
  1867. retval = -ENOMEM;
  1868. goto err_free_eps;
  1869. }
  1870. INIT_LIST_HEAD(&udc->status_req->queue);
  1871. /* allocate a small amount of memory to get valid address */
  1872. udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
  1873. udc->status_req->req.dma = DMA_ADDR_INVALID;
  1874. udc->resume_state = USB_STATE_NOTATTACHED;
  1875. udc->usb_state = USB_STATE_POWERED;
  1876. udc->ep0_dir = EP_DIR_OUT;
  1877. udc->remote_wakeup = 0;
  1878. r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
  1879. if (r == NULL) {
  1880. dev_err(&dev->dev, "no IRQ resource defined\n");
  1881. retval = -ENODEV;
  1882. goto err_free_status_req;
  1883. }
  1884. udc->irq = r->start;
  1885. if (request_irq(udc->irq, mv_udc_irq,
  1886. IRQF_SHARED, driver_name, udc)) {
  1887. dev_err(&dev->dev, "Request irq %d for UDC failed\n",
  1888. udc->irq);
  1889. retval = -ENODEV;
  1890. goto err_free_status_req;
  1891. }
  1892. /* initialize gadget structure */
  1893. udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
  1894. udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
  1895. INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
  1896. udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
  1897. udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
  1898. /* the "gadget" abstracts/virtualizes the controller */
  1899. dev_set_name(&udc->gadget.dev, "gadget");
  1900. udc->gadget.dev.parent = &dev->dev;
  1901. udc->gadget.dev.dma_mask = dev->dev.dma_mask;
  1902. udc->gadget.dev.release = gadget_release;
  1903. udc->gadget.name = driver_name; /* gadget name */
  1904. retval = device_register(&udc->gadget.dev);
  1905. if (retval)
  1906. goto err_free_irq;
  1907. eps_init(udc);
  1908. /* VBUS detect: we can disable/enable clock on demand.*/
  1909. if (!IS_ERR_OR_NULL(udc->transceiver))
  1910. udc->clock_gating = 1;
  1911. else if (pdata->vbus) {
  1912. udc->clock_gating = 1;
  1913. retval = request_threaded_irq(pdata->vbus->irq, NULL,
  1914. mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
  1915. if (retval) {
  1916. dev_info(&dev->dev,
  1917. "Can not request irq for VBUS, "
  1918. "disable clock gating\n");
  1919. udc->clock_gating = 0;
  1920. }
  1921. udc->qwork = create_singlethread_workqueue("mv_udc_queue");
  1922. if (!udc->qwork) {
  1923. dev_err(&dev->dev, "cannot create workqueue\n");
  1924. retval = -ENOMEM;
  1925. goto err_unregister;
  1926. }
  1927. INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
  1928. }
  1929. /*
  1930. * When clock gating is supported, we can disable clk and phy.
  1931. * If not, it means that VBUS detection is not supported, we
  1932. * have to enable vbus active all the time to let controller work.
  1933. */
  1934. if (udc->clock_gating)
  1935. mv_udc_disable_internal(udc);
  1936. else
  1937. udc->vbus_active = 1;
  1938. retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
  1939. if (retval)
  1940. goto err_unregister;
  1941. dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
  1942. udc->clock_gating ? "with" : "without");
  1943. return 0;
  1944. err_unregister:
  1945. if (udc->pdata && udc->pdata->vbus
  1946. && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
  1947. free_irq(pdata->vbus->irq, &dev->dev);
  1948. device_unregister(&udc->gadget.dev);
  1949. err_free_irq:
  1950. free_irq(udc->irq, &dev->dev);
  1951. err_free_status_req:
  1952. kfree(udc->status_req->req.buf);
  1953. kfree(udc->status_req);
  1954. err_free_eps:
  1955. kfree(udc->eps);
  1956. err_destroy_dma:
  1957. dma_pool_destroy(udc->dtd_pool);
  1958. err_free_dma:
  1959. dma_free_coherent(&dev->dev, udc->ep_dqh_size,
  1960. udc->ep_dqh, udc->ep_dqh_dma);
  1961. err_disable_clock:
  1962. mv_udc_disable_internal(udc);
  1963. err_iounmap_phyreg:
  1964. iounmap(udc->phy_regs);
  1965. err_iounmap_capreg:
  1966. iounmap(udc->cap_regs);
  1967. err_put_clk:
  1968. for (clk_i--; clk_i >= 0; clk_i--)
  1969. clk_put(udc->clk[clk_i]);
  1970. the_controller = NULL;
  1971. kfree(udc);
  1972. return retval;
  1973. }
  1974. #ifdef CONFIG_PM
  1975. static int mv_udc_suspend(struct device *_dev)
  1976. {
  1977. struct mv_udc *udc = the_controller;
  1978. /* if OTG is enabled, the following will be done in OTG driver*/
  1979. if (!IS_ERR_OR_NULL(udc->transceiver))
  1980. return 0;
  1981. if (udc->pdata->vbus && udc->pdata->vbus->poll)
  1982. if (udc->pdata->vbus->poll() == VBUS_HIGH) {
  1983. dev_info(&udc->dev->dev, "USB cable is connected!\n");
  1984. return -EAGAIN;
  1985. }
  1986. /*
  1987. * only cable is unplugged, udc can suspend.
  1988. * So do not care about clock_gating == 1.
  1989. */
  1990. if (!udc->clock_gating) {
  1991. udc_stop(udc);
  1992. spin_lock_irq(&udc->lock);
  1993. /* stop all usb activities */
  1994. stop_activity(udc, udc->driver);
  1995. spin_unlock_irq(&udc->lock);
  1996. mv_udc_disable_internal(udc);
  1997. }
  1998. return 0;
  1999. }
  2000. static int mv_udc_resume(struct device *_dev)
  2001. {
  2002. struct mv_udc *udc = the_controller;
  2003. int retval;
  2004. /* if OTG is enabled, the following will be done in OTG driver*/
  2005. if (!IS_ERR_OR_NULL(udc->transceiver))
  2006. return 0;
  2007. if (!udc->clock_gating) {
  2008. retval = mv_udc_enable_internal(udc);
  2009. if (retval)
  2010. return retval;
  2011. if (udc->driver && udc->softconnect) {
  2012. udc_reset(udc);
  2013. ep0_reset(udc);
  2014. udc_start(udc);
  2015. }
  2016. }
  2017. return 0;
  2018. }
  2019. static const struct dev_pm_ops mv_udc_pm_ops = {
  2020. .suspend = mv_udc_suspend,
  2021. .resume = mv_udc_resume,
  2022. };
  2023. #endif
  2024. static void mv_udc_shutdown(struct platform_device *dev)
  2025. {
  2026. struct mv_udc *udc = the_controller;
  2027. u32 mode;
  2028. /* reset controller mode to IDLE */
  2029. mode = readl(&udc->op_regs->usbmode);
  2030. mode &= ~3;
  2031. writel(mode, &udc->op_regs->usbmode);
  2032. }
  2033. static struct platform_driver udc_driver = {
  2034. .probe = mv_udc_probe,
  2035. .remove = __exit_p(mv_udc_remove),
  2036. .shutdown = mv_udc_shutdown,
  2037. .driver = {
  2038. .owner = THIS_MODULE,
  2039. .name = "mv-udc",
  2040. #ifdef CONFIG_PM
  2041. .pm = &mv_udc_pm_ops,
  2042. #endif
  2043. },
  2044. };
  2045. module_platform_driver(udc_driver);
  2046. MODULE_ALIAS("platform:mv-udc");
  2047. MODULE_DESCRIPTION(DRIVER_DESC);
  2048. MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
  2049. MODULE_VERSION(DRIVER_VERSION);
  2050. MODULE_LICENSE("GPL");