gadget.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318
  1. /**
  2. * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
  3. *
  4. * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Felipe Balbi <balbi@ti.com>,
  7. * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions, and the following disclaimer,
  14. * without modification.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. The names of the above-listed copyright holders may not be used
  19. * to endorse or promote products derived from this software without
  20. * specific prior written permission.
  21. *
  22. * ALTERNATIVELY, this software may be distributed under the terms of the
  23. * GNU General Public License ("GPL") version 2, as published by the Free
  24. * Software Foundation.
  25. *
  26. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  27. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  28. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  29. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  30. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  31. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  32. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  33. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  34. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  35. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  36. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. */
  38. #include <linux/kernel.h>
  39. #include <linux/delay.h>
  40. #include <linux/slab.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/platform_device.h>
  43. #include <linux/pm_runtime.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/io.h>
  46. #include <linux/list.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/usb/ch9.h>
  49. #include <linux/usb/gadget.h>
  50. #include "core.h"
  51. #include "gadget.h"
  52. #include "io.h"
  53. #define DMA_ADDR_INVALID (~(dma_addr_t)0)
  54. /**
  55. * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
  56. * @dwc: pointer to our context structure
  57. * @mode: the mode to set (J, K SE0 NAK, Force Enable)
  58. *
  59. * Caller should take care of locking. This function will
  60. * return 0 on success or -EINVAL if wrong Test Selector
  61. * is passed
  62. */
  63. int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
  64. {
  65. u32 reg;
  66. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  67. reg &= ~DWC3_DCTL_TSTCTRL_MASK;
  68. switch (mode) {
  69. case TEST_J:
  70. case TEST_K:
  71. case TEST_SE0_NAK:
  72. case TEST_PACKET:
  73. case TEST_FORCE_EN:
  74. reg |= mode << 1;
  75. break;
  76. default:
  77. return -EINVAL;
  78. }
  79. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  80. return 0;
  81. }
  82. void dwc3_map_buffer_to_dma(struct dwc3_request *req)
  83. {
  84. struct dwc3 *dwc = req->dep->dwc;
  85. if (req->request.length == 0) {
  86. /* req->request.dma = dwc->setup_buf_addr; */
  87. return;
  88. }
  89. if (req->request.num_sgs) {
  90. int mapped;
  91. mapped = dma_map_sg(dwc->dev, req->request.sg,
  92. req->request.num_sgs,
  93. req->direction ? DMA_TO_DEVICE
  94. : DMA_FROM_DEVICE);
  95. if (mapped < 0) {
  96. dev_err(dwc->dev, "failed to map SGs\n");
  97. return;
  98. }
  99. req->request.num_mapped_sgs = mapped;
  100. return;
  101. }
  102. if (req->request.dma == DMA_ADDR_INVALID) {
  103. req->request.dma = dma_map_single(dwc->dev, req->request.buf,
  104. req->request.length, req->direction
  105. ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  106. req->mapped = true;
  107. }
  108. }
  109. void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
  110. {
  111. struct dwc3 *dwc = req->dep->dwc;
  112. if (req->request.length == 0) {
  113. req->request.dma = DMA_ADDR_INVALID;
  114. return;
  115. }
  116. if (req->request.num_mapped_sgs) {
  117. req->request.dma = DMA_ADDR_INVALID;
  118. dma_unmap_sg(dwc->dev, req->request.sg,
  119. req->request.num_mapped_sgs,
  120. req->direction ? DMA_TO_DEVICE
  121. : DMA_FROM_DEVICE);
  122. req->request.num_mapped_sgs = 0;
  123. return;
  124. }
  125. if (req->mapped) {
  126. dma_unmap_single(dwc->dev, req->request.dma,
  127. req->request.length, req->direction
  128. ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  129. req->mapped = 0;
  130. req->request.dma = DMA_ADDR_INVALID;
  131. }
  132. }
  133. void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
  134. int status)
  135. {
  136. struct dwc3 *dwc = dep->dwc;
  137. if (req->queued) {
  138. if (req->request.num_mapped_sgs)
  139. dep->busy_slot += req->request.num_mapped_sgs;
  140. else
  141. dep->busy_slot++;
  142. /*
  143. * Skip LINK TRB. We can't use req->trb and check for
  144. * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
  145. * completed (not the LINK TRB).
  146. */
  147. if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
  148. usb_endpoint_xfer_isoc(dep->desc))
  149. dep->busy_slot++;
  150. }
  151. list_del(&req->list);
  152. req->trb = NULL;
  153. if (req->request.status == -EINPROGRESS)
  154. req->request.status = status;
  155. dwc3_unmap_buffer_from_dma(req);
  156. dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
  157. req, dep->name, req->request.actual,
  158. req->request.length, status);
  159. spin_unlock(&dwc->lock);
  160. req->request.complete(&req->dep->endpoint, &req->request);
  161. spin_lock(&dwc->lock);
  162. }
  163. static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
  164. {
  165. switch (cmd) {
  166. case DWC3_DEPCMD_DEPSTARTCFG:
  167. return "Start New Configuration";
  168. case DWC3_DEPCMD_ENDTRANSFER:
  169. return "End Transfer";
  170. case DWC3_DEPCMD_UPDATETRANSFER:
  171. return "Update Transfer";
  172. case DWC3_DEPCMD_STARTTRANSFER:
  173. return "Start Transfer";
  174. case DWC3_DEPCMD_CLEARSTALL:
  175. return "Clear Stall";
  176. case DWC3_DEPCMD_SETSTALL:
  177. return "Set Stall";
  178. case DWC3_DEPCMD_GETSEQNUMBER:
  179. return "Get Data Sequence Number";
  180. case DWC3_DEPCMD_SETTRANSFRESOURCE:
  181. return "Set Endpoint Transfer Resource";
  182. case DWC3_DEPCMD_SETEPCONFIG:
  183. return "Set Endpoint Configuration";
  184. default:
  185. return "UNKNOWN command";
  186. }
  187. }
  188. int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
  189. unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
  190. {
  191. struct dwc3_ep *dep = dwc->eps[ep];
  192. u32 timeout = 500;
  193. u32 reg;
  194. dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
  195. dep->name,
  196. dwc3_gadget_ep_cmd_string(cmd), params->param0,
  197. params->param1, params->param2);
  198. dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
  199. dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
  200. dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
  201. dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
  202. do {
  203. reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
  204. if (!(reg & DWC3_DEPCMD_CMDACT)) {
  205. dev_vdbg(dwc->dev, "Command Complete --> %d\n",
  206. DWC3_DEPCMD_STATUS(reg));
  207. return 0;
  208. }
  209. /*
  210. * We can't sleep here, because it is also called from
  211. * interrupt context.
  212. */
  213. timeout--;
  214. if (!timeout)
  215. return -ETIMEDOUT;
  216. udelay(1);
  217. } while (1);
  218. }
  219. static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
  220. struct dwc3_trb_hw *trb)
  221. {
  222. u32 offset = (char *) trb - (char *) dep->trb_pool;
  223. return dep->trb_pool_dma + offset;
  224. }
  225. static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
  226. {
  227. struct dwc3 *dwc = dep->dwc;
  228. if (dep->trb_pool)
  229. return 0;
  230. if (dep->number == 0 || dep->number == 1)
  231. return 0;
  232. dep->trb_pool = dma_alloc_coherent(dwc->dev,
  233. sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
  234. &dep->trb_pool_dma, GFP_KERNEL);
  235. if (!dep->trb_pool) {
  236. dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
  237. dep->name);
  238. return -ENOMEM;
  239. }
  240. return 0;
  241. }
  242. static void dwc3_free_trb_pool(struct dwc3_ep *dep)
  243. {
  244. struct dwc3 *dwc = dep->dwc;
  245. dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
  246. dep->trb_pool, dep->trb_pool_dma);
  247. dep->trb_pool = NULL;
  248. dep->trb_pool_dma = 0;
  249. }
  250. static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
  251. {
  252. struct dwc3_gadget_ep_cmd_params params;
  253. u32 cmd;
  254. memset(&params, 0x00, sizeof(params));
  255. if (dep->number != 1) {
  256. cmd = DWC3_DEPCMD_DEPSTARTCFG;
  257. /* XferRscIdx == 0 for ep0 and 2 for the remaining */
  258. if (dep->number > 1) {
  259. if (dwc->start_config_issued)
  260. return 0;
  261. dwc->start_config_issued = true;
  262. cmd |= DWC3_DEPCMD_PARAM(2);
  263. }
  264. return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
  265. }
  266. return 0;
  267. }
  268. static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
  269. const struct usb_endpoint_descriptor *desc,
  270. const struct usb_ss_ep_comp_descriptor *comp_desc)
  271. {
  272. struct dwc3_gadget_ep_cmd_params params;
  273. memset(&params, 0x00, sizeof(params));
  274. params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
  275. | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
  276. | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
  277. params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
  278. | DWC3_DEPCFG_XFER_NOT_READY_EN;
  279. if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
  280. params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
  281. | DWC3_DEPCFG_STREAM_EVENT_EN;
  282. dep->stream_capable = true;
  283. }
  284. if (usb_endpoint_xfer_isoc(desc))
  285. params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
  286. /*
  287. * We are doing 1:1 mapping for endpoints, meaning
  288. * Physical Endpoints 2 maps to Logical Endpoint 2 and
  289. * so on. We consider the direction bit as part of the physical
  290. * endpoint number. So USB endpoint 0x81 is 0x03.
  291. */
  292. params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
  293. /*
  294. * We must use the lower 16 TX FIFOs even though
  295. * HW might have more
  296. */
  297. if (dep->direction)
  298. params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
  299. if (desc->bInterval) {
  300. params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
  301. dep->interval = 1 << (desc->bInterval - 1);
  302. }
  303. return dwc3_send_gadget_ep_cmd(dwc, dep->number,
  304. DWC3_DEPCMD_SETEPCONFIG, &params);
  305. }
  306. static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
  307. {
  308. struct dwc3_gadget_ep_cmd_params params;
  309. memset(&params, 0x00, sizeof(params));
  310. params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
  311. return dwc3_send_gadget_ep_cmd(dwc, dep->number,
  312. DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
  313. }
  314. /**
  315. * __dwc3_gadget_ep_enable - Initializes a HW endpoint
  316. * @dep: endpoint to be initialized
  317. * @desc: USB Endpoint Descriptor
  318. *
  319. * Caller should take care of locking
  320. */
  321. static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
  322. const struct usb_endpoint_descriptor *desc,
  323. const struct usb_ss_ep_comp_descriptor *comp_desc)
  324. {
  325. struct dwc3 *dwc = dep->dwc;
  326. u32 reg;
  327. int ret = -ENOMEM;
  328. if (!(dep->flags & DWC3_EP_ENABLED)) {
  329. ret = dwc3_gadget_start_config(dwc, dep);
  330. if (ret)
  331. return ret;
  332. }
  333. ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
  334. if (ret)
  335. return ret;
  336. if (!(dep->flags & DWC3_EP_ENABLED)) {
  337. struct dwc3_trb_hw *trb_st_hw;
  338. struct dwc3_trb_hw *trb_link_hw;
  339. struct dwc3_trb trb_link;
  340. ret = dwc3_gadget_set_xfer_resource(dwc, dep);
  341. if (ret)
  342. return ret;
  343. dep->desc = desc;
  344. dep->comp_desc = comp_desc;
  345. dep->type = usb_endpoint_type(desc);
  346. dep->flags |= DWC3_EP_ENABLED;
  347. reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
  348. reg |= DWC3_DALEPENA_EP(dep->number);
  349. dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
  350. if (!usb_endpoint_xfer_isoc(desc))
  351. return 0;
  352. memset(&trb_link, 0, sizeof(trb_link));
  353. /* Link TRB for ISOC. The HWO but is never reset */
  354. trb_st_hw = &dep->trb_pool[0];
  355. trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
  356. trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
  357. trb_link.hwo = true;
  358. trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
  359. dwc3_trb_to_hw(&trb_link, trb_link_hw);
  360. }
  361. return 0;
  362. }
  363. static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
  364. static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
  365. {
  366. struct dwc3_request *req;
  367. if (!list_empty(&dep->req_queued))
  368. dwc3_stop_active_transfer(dwc, dep->number);
  369. while (!list_empty(&dep->request_list)) {
  370. req = next_request(&dep->request_list);
  371. dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
  372. }
  373. }
  374. /**
  375. * __dwc3_gadget_ep_disable - Disables a HW endpoint
  376. * @dep: the endpoint to disable
  377. *
  378. * This function also removes requests which are currently processed ny the
  379. * hardware and those which are not yet scheduled.
  380. * Caller should take care of locking.
  381. */
  382. static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
  383. {
  384. struct dwc3 *dwc = dep->dwc;
  385. u32 reg;
  386. dwc3_remove_requests(dwc, dep);
  387. reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
  388. reg &= ~DWC3_DALEPENA_EP(dep->number);
  389. dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
  390. dep->stream_capable = false;
  391. dep->desc = NULL;
  392. dep->comp_desc = NULL;
  393. dep->type = 0;
  394. dep->flags = 0;
  395. return 0;
  396. }
  397. /* -------------------------------------------------------------------------- */
  398. static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
  399. const struct usb_endpoint_descriptor *desc)
  400. {
  401. return -EINVAL;
  402. }
  403. static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
  404. {
  405. return -EINVAL;
  406. }
  407. /* -------------------------------------------------------------------------- */
  408. static int dwc3_gadget_ep_enable(struct usb_ep *ep,
  409. const struct usb_endpoint_descriptor *desc)
  410. {
  411. struct dwc3_ep *dep;
  412. struct dwc3 *dwc;
  413. unsigned long flags;
  414. int ret;
  415. if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
  416. pr_debug("dwc3: invalid parameters\n");
  417. return -EINVAL;
  418. }
  419. if (!desc->wMaxPacketSize) {
  420. pr_debug("dwc3: missing wMaxPacketSize\n");
  421. return -EINVAL;
  422. }
  423. dep = to_dwc3_ep(ep);
  424. dwc = dep->dwc;
  425. switch (usb_endpoint_type(desc)) {
  426. case USB_ENDPOINT_XFER_CONTROL:
  427. strncat(dep->name, "-control", sizeof(dep->name));
  428. break;
  429. case USB_ENDPOINT_XFER_ISOC:
  430. strncat(dep->name, "-isoc", sizeof(dep->name));
  431. break;
  432. case USB_ENDPOINT_XFER_BULK:
  433. strncat(dep->name, "-bulk", sizeof(dep->name));
  434. break;
  435. case USB_ENDPOINT_XFER_INT:
  436. strncat(dep->name, "-int", sizeof(dep->name));
  437. break;
  438. default:
  439. dev_err(dwc->dev, "invalid endpoint transfer type\n");
  440. }
  441. if (dep->flags & DWC3_EP_ENABLED) {
  442. dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
  443. dep->name);
  444. return 0;
  445. }
  446. dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
  447. spin_lock_irqsave(&dwc->lock, flags);
  448. ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
  449. spin_unlock_irqrestore(&dwc->lock, flags);
  450. return ret;
  451. }
  452. static int dwc3_gadget_ep_disable(struct usb_ep *ep)
  453. {
  454. struct dwc3_ep *dep;
  455. struct dwc3 *dwc;
  456. unsigned long flags;
  457. int ret;
  458. if (!ep) {
  459. pr_debug("dwc3: invalid parameters\n");
  460. return -EINVAL;
  461. }
  462. dep = to_dwc3_ep(ep);
  463. dwc = dep->dwc;
  464. if (!(dep->flags & DWC3_EP_ENABLED)) {
  465. dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
  466. dep->name);
  467. return 0;
  468. }
  469. snprintf(dep->name, sizeof(dep->name), "ep%d%s",
  470. dep->number >> 1,
  471. (dep->number & 1) ? "in" : "out");
  472. spin_lock_irqsave(&dwc->lock, flags);
  473. ret = __dwc3_gadget_ep_disable(dep);
  474. spin_unlock_irqrestore(&dwc->lock, flags);
  475. return ret;
  476. }
  477. static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
  478. gfp_t gfp_flags)
  479. {
  480. struct dwc3_request *req;
  481. struct dwc3_ep *dep = to_dwc3_ep(ep);
  482. struct dwc3 *dwc = dep->dwc;
  483. req = kzalloc(sizeof(*req), gfp_flags);
  484. if (!req) {
  485. dev_err(dwc->dev, "not enough memory\n");
  486. return NULL;
  487. }
  488. req->epnum = dep->number;
  489. req->dep = dep;
  490. req->request.dma = DMA_ADDR_INVALID;
  491. return &req->request;
  492. }
  493. static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
  494. struct usb_request *request)
  495. {
  496. struct dwc3_request *req = to_dwc3_request(request);
  497. kfree(req);
  498. }
  499. /**
  500. * dwc3_prepare_one_trb - setup one TRB from one request
  501. * @dep: endpoint for which this request is prepared
  502. * @req: dwc3_request pointer
  503. */
  504. static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
  505. struct dwc3_request *req, dma_addr_t dma,
  506. unsigned length, unsigned last, unsigned chain)
  507. {
  508. struct dwc3 *dwc = dep->dwc;
  509. struct dwc3_trb_hw *trb_hw;
  510. struct dwc3_trb trb;
  511. unsigned int cur_slot;
  512. dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
  513. dep->name, req, (unsigned long long) dma,
  514. length, last ? " last" : "",
  515. chain ? " chain" : "");
  516. trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
  517. cur_slot = dep->free_slot;
  518. dep->free_slot++;
  519. /* Skip the LINK-TRB on ISOC */
  520. if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
  521. usb_endpoint_xfer_isoc(dep->desc))
  522. return;
  523. memset(&trb, 0, sizeof(trb));
  524. if (!req->trb) {
  525. dwc3_gadget_move_request_queued(req);
  526. req->trb = trb_hw;
  527. req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
  528. }
  529. if (usb_endpoint_xfer_isoc(dep->desc)) {
  530. trb.isp_imi = true;
  531. trb.csp = true;
  532. } else {
  533. trb.chn = chain;
  534. trb.lst = last;
  535. }
  536. if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
  537. trb.sid_sofn = req->request.stream_id;
  538. switch (usb_endpoint_type(dep->desc)) {
  539. case USB_ENDPOINT_XFER_CONTROL:
  540. trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
  541. break;
  542. case USB_ENDPOINT_XFER_ISOC:
  543. trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
  544. /* IOC every DWC3_TRB_NUM / 4 so we can refill */
  545. if (!(cur_slot % (DWC3_TRB_NUM / 4)))
  546. trb.ioc = last;
  547. break;
  548. case USB_ENDPOINT_XFER_BULK:
  549. case USB_ENDPOINT_XFER_INT:
  550. trb.trbctl = DWC3_TRBCTL_NORMAL;
  551. break;
  552. default:
  553. /*
  554. * This is only possible with faulty memory because we
  555. * checked it already :)
  556. */
  557. BUG();
  558. }
  559. trb.length = length;
  560. trb.bplh = dma;
  561. trb.hwo = true;
  562. dwc3_trb_to_hw(&trb, trb_hw);
  563. }
  564. /*
  565. * dwc3_prepare_trbs - setup TRBs from requests
  566. * @dep: endpoint for which requests are being prepared
  567. * @starting: true if the endpoint is idle and no requests are queued.
  568. *
  569. * The functions goes through the requests list and setups TRBs for the
  570. * transfers. The functions returns once there are not more TRBs available or
  571. * it run out of requests.
  572. */
  573. static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
  574. {
  575. struct dwc3_request *req, *n;
  576. u32 trbs_left;
  577. unsigned int last_one = 0;
  578. BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
  579. /* the first request must not be queued */
  580. trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
  581. /*
  582. * if busy & slot are equal than it is either full or empty. If we are
  583. * starting to proceed requests then we are empty. Otherwise we ar
  584. * full and don't do anything
  585. */
  586. if (!trbs_left) {
  587. if (!starting)
  588. return;
  589. trbs_left = DWC3_TRB_NUM;
  590. /*
  591. * In case we start from scratch, we queue the ISOC requests
  592. * starting from slot 1. This is done because we use ring
  593. * buffer and have no LST bit to stop us. Instead, we place
  594. * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
  595. * after the first request so we start at slot 1 and have
  596. * 7 requests proceed before we hit the first IOC.
  597. * Other transfer types don't use the ring buffer and are
  598. * processed from the first TRB until the last one. Since we
  599. * don't wrap around we have to start at the beginning.
  600. */
  601. if (usb_endpoint_xfer_isoc(dep->desc)) {
  602. dep->busy_slot = 1;
  603. dep->free_slot = 1;
  604. } else {
  605. dep->busy_slot = 0;
  606. dep->free_slot = 0;
  607. }
  608. }
  609. /* The last TRB is a link TRB, not used for xfer */
  610. if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
  611. return;
  612. list_for_each_entry_safe(req, n, &dep->request_list, list) {
  613. unsigned length;
  614. dma_addr_t dma;
  615. if (req->request.num_mapped_sgs > 0) {
  616. struct usb_request *request = &req->request;
  617. struct scatterlist *sg = request->sg;
  618. struct scatterlist *s;
  619. int i;
  620. for_each_sg(sg, s, request->num_mapped_sgs, i) {
  621. unsigned chain = true;
  622. length = sg_dma_len(s);
  623. dma = sg_dma_address(s);
  624. if (i == (request->num_mapped_sgs - 1)
  625. || sg_is_last(s)) {
  626. last_one = true;
  627. chain = false;
  628. }
  629. trbs_left--;
  630. if (!trbs_left)
  631. last_one = true;
  632. if (last_one)
  633. chain = false;
  634. dwc3_prepare_one_trb(dep, req, dma, length,
  635. last_one, chain);
  636. if (last_one)
  637. break;
  638. }
  639. } else {
  640. dma = req->request.dma;
  641. length = req->request.length;
  642. trbs_left--;
  643. if (!trbs_left)
  644. last_one = 1;
  645. /* Is this the last request? */
  646. if (list_is_last(&req->list, &dep->request_list))
  647. last_one = 1;
  648. dwc3_prepare_one_trb(dep, req, dma, length,
  649. last_one, false);
  650. if (last_one)
  651. break;
  652. }
  653. }
  654. }
  655. static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
  656. int start_new)
  657. {
  658. struct dwc3_gadget_ep_cmd_params params;
  659. struct dwc3_request *req;
  660. struct dwc3 *dwc = dep->dwc;
  661. int ret;
  662. u32 cmd;
  663. if (start_new && (dep->flags & DWC3_EP_BUSY)) {
  664. dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
  665. return -EBUSY;
  666. }
  667. dep->flags &= ~DWC3_EP_PENDING_REQUEST;
  668. /*
  669. * If we are getting here after a short-out-packet we don't enqueue any
  670. * new requests as we try to set the IOC bit only on the last request.
  671. */
  672. if (start_new) {
  673. if (list_empty(&dep->req_queued))
  674. dwc3_prepare_trbs(dep, start_new);
  675. /* req points to the first request which will be sent */
  676. req = next_request(&dep->req_queued);
  677. } else {
  678. dwc3_prepare_trbs(dep, start_new);
  679. /*
  680. * req points to the first request where HWO changed
  681. * from 0 to 1
  682. */
  683. req = next_request(&dep->req_queued);
  684. }
  685. if (!req) {
  686. dep->flags |= DWC3_EP_PENDING_REQUEST;
  687. return 0;
  688. }
  689. memset(&params, 0, sizeof(params));
  690. params.param0 = upper_32_bits(req->trb_dma);
  691. params.param1 = lower_32_bits(req->trb_dma);
  692. if (start_new)
  693. cmd = DWC3_DEPCMD_STARTTRANSFER;
  694. else
  695. cmd = DWC3_DEPCMD_UPDATETRANSFER;
  696. cmd |= DWC3_DEPCMD_PARAM(cmd_param);
  697. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
  698. if (ret < 0) {
  699. dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
  700. /*
  701. * FIXME we need to iterate over the list of requests
  702. * here and stop, unmap, free and del each of the linked
  703. * requests instead of we do now.
  704. */
  705. dwc3_unmap_buffer_from_dma(req);
  706. list_del(&req->list);
  707. return ret;
  708. }
  709. dep->flags |= DWC3_EP_BUSY;
  710. dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
  711. dep->number);
  712. WARN_ON_ONCE(!dep->res_trans_idx);
  713. return 0;
  714. }
  715. static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
  716. {
  717. req->request.actual = 0;
  718. req->request.status = -EINPROGRESS;
  719. req->direction = dep->direction;
  720. req->epnum = dep->number;
  721. /*
  722. * We only add to our list of requests now and
  723. * start consuming the list once we get XferNotReady
  724. * IRQ.
  725. *
  726. * That way, we avoid doing anything that we don't need
  727. * to do now and defer it until the point we receive a
  728. * particular token from the Host side.
  729. *
  730. * This will also avoid Host cancelling URBs due to too
  731. * many NACKs.
  732. */
  733. dwc3_map_buffer_to_dma(req);
  734. list_add_tail(&req->list, &dep->request_list);
  735. /*
  736. * There is one special case: XferNotReady with
  737. * empty list of requests. We need to kick the
  738. * transfer here in that situation, otherwise
  739. * we will be NAKing forever.
  740. *
  741. * If we get XferNotReady before gadget driver
  742. * has a chance to queue a request, we will ACK
  743. * the IRQ but won't be able to receive the data
  744. * until the next request is queued. The following
  745. * code is handling exactly that.
  746. */
  747. if (dep->flags & DWC3_EP_PENDING_REQUEST) {
  748. int ret;
  749. int start_trans;
  750. start_trans = 1;
  751. if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
  752. dep->flags & DWC3_EP_BUSY)
  753. start_trans = 0;
  754. ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
  755. if (ret && ret != -EBUSY) {
  756. struct dwc3 *dwc = dep->dwc;
  757. dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
  758. dep->name);
  759. }
  760. };
  761. return 0;
  762. }
  763. static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
  764. gfp_t gfp_flags)
  765. {
  766. struct dwc3_request *req = to_dwc3_request(request);
  767. struct dwc3_ep *dep = to_dwc3_ep(ep);
  768. struct dwc3 *dwc = dep->dwc;
  769. unsigned long flags;
  770. int ret;
  771. if (!dep->desc) {
  772. dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
  773. request, ep->name);
  774. return -ESHUTDOWN;
  775. }
  776. dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
  777. request, ep->name, request->length);
  778. spin_lock_irqsave(&dwc->lock, flags);
  779. ret = __dwc3_gadget_ep_queue(dep, req);
  780. spin_unlock_irqrestore(&dwc->lock, flags);
  781. return ret;
  782. }
  783. static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
  784. struct usb_request *request)
  785. {
  786. struct dwc3_request *req = to_dwc3_request(request);
  787. struct dwc3_request *r = NULL;
  788. struct dwc3_ep *dep = to_dwc3_ep(ep);
  789. struct dwc3 *dwc = dep->dwc;
  790. unsigned long flags;
  791. int ret = 0;
  792. spin_lock_irqsave(&dwc->lock, flags);
  793. list_for_each_entry(r, &dep->request_list, list) {
  794. if (r == req)
  795. break;
  796. }
  797. if (r != req) {
  798. list_for_each_entry(r, &dep->req_queued, list) {
  799. if (r == req)
  800. break;
  801. }
  802. if (r == req) {
  803. /* wait until it is processed */
  804. dwc3_stop_active_transfer(dwc, dep->number);
  805. goto out0;
  806. }
  807. dev_err(dwc->dev, "request %p was not queued to %s\n",
  808. request, ep->name);
  809. ret = -EINVAL;
  810. goto out0;
  811. }
  812. /* giveback the request */
  813. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  814. out0:
  815. spin_unlock_irqrestore(&dwc->lock, flags);
  816. return ret;
  817. }
  818. int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
  819. {
  820. struct dwc3_gadget_ep_cmd_params params;
  821. struct dwc3 *dwc = dep->dwc;
  822. int ret;
  823. memset(&params, 0x00, sizeof(params));
  824. if (value) {
  825. if (dep->number == 0 || dep->number == 1) {
  826. /*
  827. * Whenever EP0 is stalled, we will restart
  828. * the state machine, thus moving back to
  829. * Setup Phase
  830. */
  831. dwc->ep0state = EP0_SETUP_PHASE;
  832. }
  833. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  834. DWC3_DEPCMD_SETSTALL, &params);
  835. if (ret)
  836. dev_err(dwc->dev, "failed to %s STALL on %s\n",
  837. value ? "set" : "clear",
  838. dep->name);
  839. else
  840. dep->flags |= DWC3_EP_STALL;
  841. } else {
  842. if (dep->flags & DWC3_EP_WEDGE)
  843. return 0;
  844. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  845. DWC3_DEPCMD_CLEARSTALL, &params);
  846. if (ret)
  847. dev_err(dwc->dev, "failed to %s STALL on %s\n",
  848. value ? "set" : "clear",
  849. dep->name);
  850. else
  851. dep->flags &= ~DWC3_EP_STALL;
  852. }
  853. return ret;
  854. }
  855. static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
  856. {
  857. struct dwc3_ep *dep = to_dwc3_ep(ep);
  858. struct dwc3 *dwc = dep->dwc;
  859. unsigned long flags;
  860. int ret;
  861. spin_lock_irqsave(&dwc->lock, flags);
  862. if (usb_endpoint_xfer_isoc(dep->desc)) {
  863. dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
  864. ret = -EINVAL;
  865. goto out;
  866. }
  867. ret = __dwc3_gadget_ep_set_halt(dep, value);
  868. out:
  869. spin_unlock_irqrestore(&dwc->lock, flags);
  870. return ret;
  871. }
  872. static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
  873. {
  874. struct dwc3_ep *dep = to_dwc3_ep(ep);
  875. dep->flags |= DWC3_EP_WEDGE;
  876. return dwc3_gadget_ep_set_halt(ep, 1);
  877. }
  878. /* -------------------------------------------------------------------------- */
  879. static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
  880. .bLength = USB_DT_ENDPOINT_SIZE,
  881. .bDescriptorType = USB_DT_ENDPOINT,
  882. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  883. };
  884. static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
  885. .enable = dwc3_gadget_ep0_enable,
  886. .disable = dwc3_gadget_ep0_disable,
  887. .alloc_request = dwc3_gadget_ep_alloc_request,
  888. .free_request = dwc3_gadget_ep_free_request,
  889. .queue = dwc3_gadget_ep0_queue,
  890. .dequeue = dwc3_gadget_ep_dequeue,
  891. .set_halt = dwc3_gadget_ep_set_halt,
  892. .set_wedge = dwc3_gadget_ep_set_wedge,
  893. };
  894. static const struct usb_ep_ops dwc3_gadget_ep_ops = {
  895. .enable = dwc3_gadget_ep_enable,
  896. .disable = dwc3_gadget_ep_disable,
  897. .alloc_request = dwc3_gadget_ep_alloc_request,
  898. .free_request = dwc3_gadget_ep_free_request,
  899. .queue = dwc3_gadget_ep_queue,
  900. .dequeue = dwc3_gadget_ep_dequeue,
  901. .set_halt = dwc3_gadget_ep_set_halt,
  902. .set_wedge = dwc3_gadget_ep_set_wedge,
  903. };
  904. /* -------------------------------------------------------------------------- */
  905. static int dwc3_gadget_get_frame(struct usb_gadget *g)
  906. {
  907. struct dwc3 *dwc = gadget_to_dwc(g);
  908. u32 reg;
  909. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  910. return DWC3_DSTS_SOFFN(reg);
  911. }
  912. static int dwc3_gadget_wakeup(struct usb_gadget *g)
  913. {
  914. struct dwc3 *dwc = gadget_to_dwc(g);
  915. unsigned long timeout;
  916. unsigned long flags;
  917. u32 reg;
  918. int ret = 0;
  919. u8 link_state;
  920. u8 speed;
  921. spin_lock_irqsave(&dwc->lock, flags);
  922. /*
  923. * According to the Databook Remote wakeup request should
  924. * be issued only when the device is in early suspend state.
  925. *
  926. * We can check that via USB Link State bits in DSTS register.
  927. */
  928. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  929. speed = reg & DWC3_DSTS_CONNECTSPD;
  930. if (speed == DWC3_DSTS_SUPERSPEED) {
  931. dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
  932. ret = -EINVAL;
  933. goto out;
  934. }
  935. link_state = DWC3_DSTS_USBLNKST(reg);
  936. switch (link_state) {
  937. case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
  938. case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
  939. break;
  940. default:
  941. dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
  942. link_state);
  943. ret = -EINVAL;
  944. goto out;
  945. }
  946. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  947. /*
  948. * Switch link state to Recovery. In HS/FS/LS this means
  949. * RemoteWakeup Request
  950. */
  951. reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
  952. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  953. /* wait for at least 2000us */
  954. usleep_range(2000, 2500);
  955. /* write zeroes to Link Change Request */
  956. reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
  957. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  958. /* pool until Link State change to ON */
  959. timeout = jiffies + msecs_to_jiffies(100);
  960. while (!(time_after(jiffies, timeout))) {
  961. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  962. /* in HS, means ON */
  963. if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
  964. break;
  965. }
  966. if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
  967. dev_err(dwc->dev, "failed to send remote wakeup\n");
  968. ret = -EINVAL;
  969. }
  970. out:
  971. spin_unlock_irqrestore(&dwc->lock, flags);
  972. return ret;
  973. }
  974. static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
  975. int is_selfpowered)
  976. {
  977. struct dwc3 *dwc = gadget_to_dwc(g);
  978. dwc->is_selfpowered = !!is_selfpowered;
  979. return 0;
  980. }
  981. static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
  982. {
  983. u32 reg;
  984. u32 timeout = 500;
  985. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  986. if (is_on)
  987. reg |= DWC3_DCTL_RUN_STOP;
  988. else
  989. reg &= ~DWC3_DCTL_RUN_STOP;
  990. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  991. do {
  992. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  993. if (is_on) {
  994. if (!(reg & DWC3_DSTS_DEVCTRLHLT))
  995. break;
  996. } else {
  997. if (reg & DWC3_DSTS_DEVCTRLHLT)
  998. break;
  999. }
  1000. timeout--;
  1001. if (!timeout)
  1002. break;
  1003. udelay(1);
  1004. } while (1);
  1005. dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
  1006. dwc->gadget_driver
  1007. ? dwc->gadget_driver->function : "no-function",
  1008. is_on ? "connect" : "disconnect");
  1009. }
  1010. static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
  1011. {
  1012. struct dwc3 *dwc = gadget_to_dwc(g);
  1013. unsigned long flags;
  1014. is_on = !!is_on;
  1015. spin_lock_irqsave(&dwc->lock, flags);
  1016. dwc3_gadget_run_stop(dwc, is_on);
  1017. spin_unlock_irqrestore(&dwc->lock, flags);
  1018. return 0;
  1019. }
  1020. static int dwc3_gadget_start(struct usb_gadget *g,
  1021. struct usb_gadget_driver *driver)
  1022. {
  1023. struct dwc3 *dwc = gadget_to_dwc(g);
  1024. struct dwc3_ep *dep;
  1025. unsigned long flags;
  1026. int ret = 0;
  1027. u32 reg;
  1028. spin_lock_irqsave(&dwc->lock, flags);
  1029. if (dwc->gadget_driver) {
  1030. dev_err(dwc->dev, "%s is already bound to %s\n",
  1031. dwc->gadget.name,
  1032. dwc->gadget_driver->driver.name);
  1033. ret = -EBUSY;
  1034. goto err0;
  1035. }
  1036. dwc->gadget_driver = driver;
  1037. dwc->gadget.dev.driver = &driver->driver;
  1038. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  1039. reg &= ~(DWC3_DCFG_SPEED_MASK);
  1040. reg |= dwc->maximum_speed;
  1041. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  1042. dwc->start_config_issued = false;
  1043. /* Start with SuperSpeed Default */
  1044. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  1045. dep = dwc->eps[0];
  1046. ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
  1047. if (ret) {
  1048. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  1049. goto err0;
  1050. }
  1051. dep = dwc->eps[1];
  1052. ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
  1053. if (ret) {
  1054. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  1055. goto err1;
  1056. }
  1057. /* begin to receive SETUP packets */
  1058. dwc->ep0state = EP0_SETUP_PHASE;
  1059. dwc3_ep0_out_start(dwc);
  1060. spin_unlock_irqrestore(&dwc->lock, flags);
  1061. return 0;
  1062. err1:
  1063. __dwc3_gadget_ep_disable(dwc->eps[0]);
  1064. err0:
  1065. spin_unlock_irqrestore(&dwc->lock, flags);
  1066. return ret;
  1067. }
  1068. static int dwc3_gadget_stop(struct usb_gadget *g,
  1069. struct usb_gadget_driver *driver)
  1070. {
  1071. struct dwc3 *dwc = gadget_to_dwc(g);
  1072. unsigned long flags;
  1073. spin_lock_irqsave(&dwc->lock, flags);
  1074. __dwc3_gadget_ep_disable(dwc->eps[0]);
  1075. __dwc3_gadget_ep_disable(dwc->eps[1]);
  1076. dwc->gadget_driver = NULL;
  1077. dwc->gadget.dev.driver = NULL;
  1078. spin_unlock_irqrestore(&dwc->lock, flags);
  1079. return 0;
  1080. }
  1081. static const struct usb_gadget_ops dwc3_gadget_ops = {
  1082. .get_frame = dwc3_gadget_get_frame,
  1083. .wakeup = dwc3_gadget_wakeup,
  1084. .set_selfpowered = dwc3_gadget_set_selfpowered,
  1085. .pullup = dwc3_gadget_pullup,
  1086. .udc_start = dwc3_gadget_start,
  1087. .udc_stop = dwc3_gadget_stop,
  1088. };
  1089. /* -------------------------------------------------------------------------- */
  1090. static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
  1091. {
  1092. struct dwc3_ep *dep;
  1093. u8 epnum;
  1094. INIT_LIST_HEAD(&dwc->gadget.ep_list);
  1095. for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
  1096. dep = kzalloc(sizeof(*dep), GFP_KERNEL);
  1097. if (!dep) {
  1098. dev_err(dwc->dev, "can't allocate endpoint %d\n",
  1099. epnum);
  1100. return -ENOMEM;
  1101. }
  1102. dep->dwc = dwc;
  1103. dep->number = epnum;
  1104. dwc->eps[epnum] = dep;
  1105. snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
  1106. (epnum & 1) ? "in" : "out");
  1107. dep->endpoint.name = dep->name;
  1108. dep->direction = (epnum & 1);
  1109. if (epnum == 0 || epnum == 1) {
  1110. dep->endpoint.maxpacket = 512;
  1111. dep->endpoint.ops = &dwc3_gadget_ep0_ops;
  1112. if (!epnum)
  1113. dwc->gadget.ep0 = &dep->endpoint;
  1114. } else {
  1115. int ret;
  1116. dep->endpoint.maxpacket = 1024;
  1117. dep->endpoint.max_streams = 15;
  1118. dep->endpoint.ops = &dwc3_gadget_ep_ops;
  1119. list_add_tail(&dep->endpoint.ep_list,
  1120. &dwc->gadget.ep_list);
  1121. ret = dwc3_alloc_trb_pool(dep);
  1122. if (ret)
  1123. return ret;
  1124. }
  1125. INIT_LIST_HEAD(&dep->request_list);
  1126. INIT_LIST_HEAD(&dep->req_queued);
  1127. }
  1128. return 0;
  1129. }
  1130. static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
  1131. {
  1132. struct dwc3_ep *dep;
  1133. u8 epnum;
  1134. for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
  1135. dep = dwc->eps[epnum];
  1136. dwc3_free_trb_pool(dep);
  1137. if (epnum != 0 && epnum != 1)
  1138. list_del(&dep->endpoint.ep_list);
  1139. kfree(dep);
  1140. }
  1141. }
  1142. static void dwc3_gadget_release(struct device *dev)
  1143. {
  1144. dev_dbg(dev, "%s\n", __func__);
  1145. }
  1146. /* -------------------------------------------------------------------------- */
  1147. static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
  1148. const struct dwc3_event_depevt *event, int status)
  1149. {
  1150. struct dwc3_request *req;
  1151. struct dwc3_trb trb;
  1152. unsigned int count;
  1153. unsigned int s_pkt = 0;
  1154. do {
  1155. req = next_request(&dep->req_queued);
  1156. if (!req) {
  1157. WARN_ON_ONCE(1);
  1158. return 1;
  1159. }
  1160. dwc3_trb_to_nat(req->trb, &trb);
  1161. if (trb.hwo && status != -ESHUTDOWN)
  1162. /*
  1163. * We continue despite the error. There is not much we
  1164. * can do. If we don't clean in up we loop for ever. If
  1165. * we skip the TRB than it gets overwritten reused after
  1166. * a while since we use them in a ring buffer. a BUG()
  1167. * would help. Lets hope that if this occures, someone
  1168. * fixes the root cause instead of looking away :)
  1169. */
  1170. dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
  1171. dep->name, req->trb);
  1172. count = trb.length;
  1173. if (dep->direction) {
  1174. if (count) {
  1175. dev_err(dwc->dev, "incomplete IN transfer %s\n",
  1176. dep->name);
  1177. status = -ECONNRESET;
  1178. }
  1179. } else {
  1180. if (count && (event->status & DEPEVT_STATUS_SHORT))
  1181. s_pkt = 1;
  1182. }
  1183. /*
  1184. * We assume here we will always receive the entire data block
  1185. * which we should receive. Meaning, if we program RX to
  1186. * receive 4K but we receive only 2K, we assume that's all we
  1187. * should receive and we simply bounce the request back to the
  1188. * gadget driver for further processing.
  1189. */
  1190. req->request.actual += req->request.length - count;
  1191. dwc3_gadget_giveback(dep, req, status);
  1192. if (s_pkt)
  1193. break;
  1194. if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
  1195. break;
  1196. if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
  1197. break;
  1198. } while (1);
  1199. if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
  1200. return 0;
  1201. return 1;
  1202. }
  1203. static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
  1204. struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
  1205. int start_new)
  1206. {
  1207. unsigned status = 0;
  1208. int clean_busy;
  1209. if (event->status & DEPEVT_STATUS_BUSERR)
  1210. status = -ECONNRESET;
  1211. clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
  1212. if (clean_busy) {
  1213. dep->flags &= ~DWC3_EP_BUSY;
  1214. dep->res_trans_idx = 0;
  1215. }
  1216. /*
  1217. * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
  1218. * See dwc3_gadget_linksts_change_interrupt() for 1st half.
  1219. */
  1220. if (dwc->revision < DWC3_REVISION_183A) {
  1221. u32 reg;
  1222. int i;
  1223. for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
  1224. struct dwc3_ep *dep = dwc->eps[i];
  1225. if (!(dep->flags & DWC3_EP_ENABLED))
  1226. continue;
  1227. if (!list_empty(&dep->req_queued))
  1228. return;
  1229. }
  1230. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  1231. reg |= dwc->u1u2;
  1232. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  1233. dwc->u1u2 = 0;
  1234. }
  1235. }
  1236. static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
  1237. struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
  1238. {
  1239. u32 uf;
  1240. if (list_empty(&dep->request_list)) {
  1241. dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
  1242. dep->name);
  1243. return;
  1244. }
  1245. if (event->parameters) {
  1246. u32 mask;
  1247. mask = ~(dep->interval - 1);
  1248. uf = event->parameters & mask;
  1249. /* 4 micro frames in the future */
  1250. uf += dep->interval * 4;
  1251. } else {
  1252. uf = 0;
  1253. }
  1254. __dwc3_gadget_kick_transfer(dep, uf, 1);
  1255. }
  1256. static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
  1257. const struct dwc3_event_depevt *event)
  1258. {
  1259. struct dwc3 *dwc = dep->dwc;
  1260. struct dwc3_event_depevt mod_ev = *event;
  1261. /*
  1262. * We were asked to remove one requests. It is possible that this
  1263. * request and a few other were started together and have the same
  1264. * transfer index. Since we stopped the complete endpoint we don't
  1265. * know how many requests were already completed (and not yet)
  1266. * reported and how could be done (later). We purge them all until
  1267. * the end of the list.
  1268. */
  1269. mod_ev.status = DEPEVT_STATUS_LST;
  1270. dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
  1271. dep->flags &= ~DWC3_EP_BUSY;
  1272. /* pending requets are ignored and are queued on XferNotReady */
  1273. }
  1274. static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
  1275. const struct dwc3_event_depevt *event)
  1276. {
  1277. u32 param = event->parameters;
  1278. u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
  1279. switch (cmd_type) {
  1280. case DWC3_DEPCMD_ENDTRANSFER:
  1281. dwc3_process_ep_cmd_complete(dep, event);
  1282. break;
  1283. case DWC3_DEPCMD_STARTTRANSFER:
  1284. dep->res_trans_idx = param & 0x7f;
  1285. break;
  1286. default:
  1287. printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
  1288. __func__, cmd_type);
  1289. break;
  1290. };
  1291. }
  1292. static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
  1293. const struct dwc3_event_depevt *event)
  1294. {
  1295. struct dwc3_ep *dep;
  1296. u8 epnum = event->endpoint_number;
  1297. dep = dwc->eps[epnum];
  1298. dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
  1299. dwc3_ep_event_string(event->endpoint_event));
  1300. if (epnum == 0 || epnum == 1) {
  1301. dwc3_ep0_interrupt(dwc, event);
  1302. return;
  1303. }
  1304. switch (event->endpoint_event) {
  1305. case DWC3_DEPEVT_XFERCOMPLETE:
  1306. if (usb_endpoint_xfer_isoc(dep->desc)) {
  1307. dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
  1308. dep->name);
  1309. return;
  1310. }
  1311. dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
  1312. break;
  1313. case DWC3_DEPEVT_XFERINPROGRESS:
  1314. if (!usb_endpoint_xfer_isoc(dep->desc)) {
  1315. dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
  1316. dep->name);
  1317. return;
  1318. }
  1319. dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
  1320. break;
  1321. case DWC3_DEPEVT_XFERNOTREADY:
  1322. if (usb_endpoint_xfer_isoc(dep->desc)) {
  1323. dwc3_gadget_start_isoc(dwc, dep, event);
  1324. } else {
  1325. int ret;
  1326. dev_vdbg(dwc->dev, "%s: reason %s\n",
  1327. dep->name, event->status
  1328. ? "Transfer Active"
  1329. : "Transfer Not Active");
  1330. ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
  1331. if (!ret || ret == -EBUSY)
  1332. return;
  1333. dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
  1334. dep->name);
  1335. }
  1336. break;
  1337. case DWC3_DEPEVT_STREAMEVT:
  1338. if (!usb_endpoint_xfer_bulk(dep->desc)) {
  1339. dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
  1340. dep->name);
  1341. return;
  1342. }
  1343. switch (event->status) {
  1344. case DEPEVT_STREAMEVT_FOUND:
  1345. dev_vdbg(dwc->dev, "Stream %d found and started\n",
  1346. event->parameters);
  1347. break;
  1348. case DEPEVT_STREAMEVT_NOTFOUND:
  1349. /* FALLTHROUGH */
  1350. default:
  1351. dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
  1352. }
  1353. break;
  1354. case DWC3_DEPEVT_RXTXFIFOEVT:
  1355. dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
  1356. break;
  1357. case DWC3_DEPEVT_EPCMDCMPLT:
  1358. dwc3_ep_cmd_compl(dep, event);
  1359. break;
  1360. }
  1361. }
  1362. static void dwc3_disconnect_gadget(struct dwc3 *dwc)
  1363. {
  1364. if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
  1365. spin_unlock(&dwc->lock);
  1366. dwc->gadget_driver->disconnect(&dwc->gadget);
  1367. spin_lock(&dwc->lock);
  1368. }
  1369. }
  1370. static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
  1371. {
  1372. struct dwc3_ep *dep;
  1373. struct dwc3_gadget_ep_cmd_params params;
  1374. u32 cmd;
  1375. int ret;
  1376. dep = dwc->eps[epnum];
  1377. WARN_ON(!dep->res_trans_idx);
  1378. if (dep->res_trans_idx) {
  1379. cmd = DWC3_DEPCMD_ENDTRANSFER;
  1380. cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
  1381. cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
  1382. memset(&params, 0, sizeof(params));
  1383. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
  1384. WARN_ON_ONCE(ret);
  1385. dep->res_trans_idx = 0;
  1386. }
  1387. }
  1388. static void dwc3_stop_active_transfers(struct dwc3 *dwc)
  1389. {
  1390. u32 epnum;
  1391. for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
  1392. struct dwc3_ep *dep;
  1393. dep = dwc->eps[epnum];
  1394. if (!(dep->flags & DWC3_EP_ENABLED))
  1395. continue;
  1396. dwc3_remove_requests(dwc, dep);
  1397. }
  1398. }
  1399. static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
  1400. {
  1401. u32 epnum;
  1402. for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
  1403. struct dwc3_ep *dep;
  1404. struct dwc3_gadget_ep_cmd_params params;
  1405. int ret;
  1406. dep = dwc->eps[epnum];
  1407. if (!(dep->flags & DWC3_EP_STALL))
  1408. continue;
  1409. dep->flags &= ~DWC3_EP_STALL;
  1410. memset(&params, 0, sizeof(params));
  1411. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  1412. DWC3_DEPCMD_CLEARSTALL, &params);
  1413. WARN_ON_ONCE(ret);
  1414. }
  1415. }
  1416. static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
  1417. {
  1418. dev_vdbg(dwc->dev, "%s\n", __func__);
  1419. #if 0
  1420. XXX
  1421. U1/U2 is powersave optimization. Skip it for now. Anyway we need to
  1422. enable it before we can disable it.
  1423. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  1424. reg &= ~DWC3_DCTL_INITU1ENA;
  1425. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  1426. reg &= ~DWC3_DCTL_INITU2ENA;
  1427. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  1428. #endif
  1429. dwc3_stop_active_transfers(dwc);
  1430. dwc3_disconnect_gadget(dwc);
  1431. dwc->start_config_issued = false;
  1432. dwc->gadget.speed = USB_SPEED_UNKNOWN;
  1433. dwc->setup_packet_pending = false;
  1434. }
  1435. static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
  1436. {
  1437. u32 reg;
  1438. reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
  1439. if (on)
  1440. reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
  1441. else
  1442. reg |= DWC3_GUSB3PIPECTL_SUSPHY;
  1443. dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
  1444. }
  1445. static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
  1446. {
  1447. u32 reg;
  1448. reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
  1449. if (on)
  1450. reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
  1451. else
  1452. reg |= DWC3_GUSB2PHYCFG_SUSPHY;
  1453. dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
  1454. }
  1455. static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
  1456. {
  1457. u32 reg;
  1458. dev_vdbg(dwc->dev, "%s\n", __func__);
  1459. /*
  1460. * WORKAROUND: DWC3 revisions <1.88a have an issue which
  1461. * would cause a missing Disconnect Event if there's a
  1462. * pending Setup Packet in the FIFO.
  1463. *
  1464. * There's no suggested workaround on the official Bug
  1465. * report, which states that "unless the driver/application
  1466. * is doing any special handling of a disconnect event,
  1467. * there is no functional issue".
  1468. *
  1469. * Unfortunately, it turns out that we _do_ some special
  1470. * handling of a disconnect event, namely complete all
  1471. * pending transfers, notify gadget driver of the
  1472. * disconnection, and so on.
  1473. *
  1474. * Our suggested workaround is to follow the Disconnect
  1475. * Event steps here, instead, based on a setup_packet_pending
  1476. * flag. Such flag gets set whenever we have a XferNotReady
  1477. * event on EP0 and gets cleared on XferComplete for the
  1478. * same endpoint.
  1479. *
  1480. * Refers to:
  1481. *
  1482. * STAR#9000466709: RTL: Device : Disconnect event not
  1483. * generated if setup packet pending in FIFO
  1484. */
  1485. if (dwc->revision < DWC3_REVISION_188A) {
  1486. if (dwc->setup_packet_pending)
  1487. dwc3_gadget_disconnect_interrupt(dwc);
  1488. }
  1489. /* after reset -> Default State */
  1490. dwc->dev_state = DWC3_DEFAULT_STATE;
  1491. /* Enable PHYs */
  1492. dwc3_gadget_usb2_phy_power(dwc, true);
  1493. dwc3_gadget_usb3_phy_power(dwc, true);
  1494. if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
  1495. dwc3_disconnect_gadget(dwc);
  1496. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  1497. reg &= ~DWC3_DCTL_TSTCTRL_MASK;
  1498. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  1499. dwc3_stop_active_transfers(dwc);
  1500. dwc3_clear_stall_all_ep(dwc);
  1501. dwc->start_config_issued = false;
  1502. /* Reset device address to zero */
  1503. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  1504. reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  1505. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  1506. }
  1507. static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
  1508. {
  1509. u32 reg;
  1510. u32 usb30_clock = DWC3_GCTL_CLK_BUS;
  1511. /*
  1512. * We change the clock only at SS but I dunno why I would want to do
  1513. * this. Maybe it becomes part of the power saving plan.
  1514. */
  1515. if (speed != DWC3_DSTS_SUPERSPEED)
  1516. return;
  1517. /*
  1518. * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
  1519. * each time on Connect Done.
  1520. */
  1521. if (!usb30_clock)
  1522. return;
  1523. reg = dwc3_readl(dwc->regs, DWC3_GCTL);
  1524. reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
  1525. dwc3_writel(dwc->regs, DWC3_GCTL, reg);
  1526. }
  1527. static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
  1528. {
  1529. switch (speed) {
  1530. case USB_SPEED_SUPER:
  1531. dwc3_gadget_usb2_phy_power(dwc, false);
  1532. break;
  1533. case USB_SPEED_HIGH:
  1534. case USB_SPEED_FULL:
  1535. case USB_SPEED_LOW:
  1536. dwc3_gadget_usb3_phy_power(dwc, false);
  1537. break;
  1538. }
  1539. }
  1540. static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
  1541. {
  1542. struct dwc3_gadget_ep_cmd_params params;
  1543. struct dwc3_ep *dep;
  1544. int ret;
  1545. u32 reg;
  1546. u8 speed;
  1547. dev_vdbg(dwc->dev, "%s\n", __func__);
  1548. memset(&params, 0x00, sizeof(params));
  1549. reg = dwc3_readl(dwc->regs, DWC3_DSTS);
  1550. speed = reg & DWC3_DSTS_CONNECTSPD;
  1551. dwc->speed = speed;
  1552. dwc3_update_ram_clk_sel(dwc, speed);
  1553. switch (speed) {
  1554. case DWC3_DCFG_SUPERSPEED:
  1555. /*
  1556. * WORKAROUND: DWC3 revisions <1.90a have an issue which
  1557. * would cause a missing USB3 Reset event.
  1558. *
  1559. * In such situations, we should force a USB3 Reset
  1560. * event by calling our dwc3_gadget_reset_interrupt()
  1561. * routine.
  1562. *
  1563. * Refers to:
  1564. *
  1565. * STAR#9000483510: RTL: SS : USB3 reset event may
  1566. * not be generated always when the link enters poll
  1567. */
  1568. if (dwc->revision < DWC3_REVISION_190A)
  1569. dwc3_gadget_reset_interrupt(dwc);
  1570. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  1571. dwc->gadget.ep0->maxpacket = 512;
  1572. dwc->gadget.speed = USB_SPEED_SUPER;
  1573. break;
  1574. case DWC3_DCFG_HIGHSPEED:
  1575. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
  1576. dwc->gadget.ep0->maxpacket = 64;
  1577. dwc->gadget.speed = USB_SPEED_HIGH;
  1578. break;
  1579. case DWC3_DCFG_FULLSPEED2:
  1580. case DWC3_DCFG_FULLSPEED1:
  1581. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
  1582. dwc->gadget.ep0->maxpacket = 64;
  1583. dwc->gadget.speed = USB_SPEED_FULL;
  1584. break;
  1585. case DWC3_DCFG_LOWSPEED:
  1586. dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
  1587. dwc->gadget.ep0->maxpacket = 8;
  1588. dwc->gadget.speed = USB_SPEED_LOW;
  1589. break;
  1590. }
  1591. /* Disable unneded PHY */
  1592. dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
  1593. dep = dwc->eps[0];
  1594. ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
  1595. if (ret) {
  1596. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  1597. return;
  1598. }
  1599. dep = dwc->eps[1];
  1600. ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
  1601. if (ret) {
  1602. dev_err(dwc->dev, "failed to enable %s\n", dep->name);
  1603. return;
  1604. }
  1605. /*
  1606. * Configure PHY via GUSB3PIPECTLn if required.
  1607. *
  1608. * Update GTXFIFOSIZn
  1609. *
  1610. * In both cases reset values should be sufficient.
  1611. */
  1612. }
  1613. static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
  1614. {
  1615. dev_vdbg(dwc->dev, "%s\n", __func__);
  1616. /*
  1617. * TODO take core out of low power mode when that's
  1618. * implemented.
  1619. */
  1620. dwc->gadget_driver->resume(&dwc->gadget);
  1621. }
  1622. static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
  1623. unsigned int evtinfo)
  1624. {
  1625. enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
  1626. /*
  1627. * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
  1628. * on the link partner, the USB session might do multiple entry/exit
  1629. * of low power states before a transfer takes place.
  1630. *
  1631. * Due to this problem, we might experience lower throughput. The
  1632. * suggested workaround is to disable DCTL[12:9] bits if we're
  1633. * transitioning from U1/U2 to U0 and enable those bits again
  1634. * after a transfer completes and there are no pending transfers
  1635. * on any of the enabled endpoints.
  1636. *
  1637. * This is the first half of that workaround.
  1638. *
  1639. * Refers to:
  1640. *
  1641. * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
  1642. * core send LGO_Ux entering U0
  1643. */
  1644. if (dwc->revision < DWC3_REVISION_183A) {
  1645. if (next == DWC3_LINK_STATE_U0) {
  1646. u32 u1u2;
  1647. u32 reg;
  1648. switch (dwc->link_state) {
  1649. case DWC3_LINK_STATE_U1:
  1650. case DWC3_LINK_STATE_U2:
  1651. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  1652. u1u2 = reg & (DWC3_DCTL_INITU2ENA
  1653. | DWC3_DCTL_ACCEPTU2ENA
  1654. | DWC3_DCTL_INITU1ENA
  1655. | DWC3_DCTL_ACCEPTU1ENA);
  1656. if (!dwc->u1u2)
  1657. dwc->u1u2 = reg & u1u2;
  1658. reg &= ~u1u2;
  1659. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  1660. break;
  1661. default:
  1662. /* do nothing */
  1663. break;
  1664. }
  1665. }
  1666. }
  1667. dwc->link_state = next;
  1668. dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
  1669. }
  1670. static void dwc3_gadget_interrupt(struct dwc3 *dwc,
  1671. const struct dwc3_event_devt *event)
  1672. {
  1673. switch (event->type) {
  1674. case DWC3_DEVICE_EVENT_DISCONNECT:
  1675. dwc3_gadget_disconnect_interrupt(dwc);
  1676. break;
  1677. case DWC3_DEVICE_EVENT_RESET:
  1678. dwc3_gadget_reset_interrupt(dwc);
  1679. break;
  1680. case DWC3_DEVICE_EVENT_CONNECT_DONE:
  1681. dwc3_gadget_conndone_interrupt(dwc);
  1682. break;
  1683. case DWC3_DEVICE_EVENT_WAKEUP:
  1684. dwc3_gadget_wakeup_interrupt(dwc);
  1685. break;
  1686. case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
  1687. dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
  1688. break;
  1689. case DWC3_DEVICE_EVENT_EOPF:
  1690. dev_vdbg(dwc->dev, "End of Periodic Frame\n");
  1691. break;
  1692. case DWC3_DEVICE_EVENT_SOF:
  1693. dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
  1694. break;
  1695. case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
  1696. dev_vdbg(dwc->dev, "Erratic Error\n");
  1697. break;
  1698. case DWC3_DEVICE_EVENT_CMD_CMPL:
  1699. dev_vdbg(dwc->dev, "Command Complete\n");
  1700. break;
  1701. case DWC3_DEVICE_EVENT_OVERFLOW:
  1702. dev_vdbg(dwc->dev, "Overflow\n");
  1703. break;
  1704. default:
  1705. dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
  1706. }
  1707. }
  1708. static void dwc3_process_event_entry(struct dwc3 *dwc,
  1709. const union dwc3_event *event)
  1710. {
  1711. /* Endpoint IRQ, handle it and return early */
  1712. if (event->type.is_devspec == 0) {
  1713. /* depevt */
  1714. return dwc3_endpoint_interrupt(dwc, &event->depevt);
  1715. }
  1716. switch (event->type.type) {
  1717. case DWC3_EVENT_TYPE_DEV:
  1718. dwc3_gadget_interrupt(dwc, &event->devt);
  1719. break;
  1720. /* REVISIT what to do with Carkit and I2C events ? */
  1721. default:
  1722. dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
  1723. }
  1724. }
  1725. static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
  1726. {
  1727. struct dwc3_event_buffer *evt;
  1728. int left;
  1729. u32 count;
  1730. count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
  1731. count &= DWC3_GEVNTCOUNT_MASK;
  1732. if (!count)
  1733. return IRQ_NONE;
  1734. evt = dwc->ev_buffs[buf];
  1735. left = count;
  1736. while (left > 0) {
  1737. union dwc3_event event;
  1738. memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
  1739. dwc3_process_event_entry(dwc, &event);
  1740. /*
  1741. * XXX we wrap around correctly to the next entry as almost all
  1742. * entries are 4 bytes in size. There is one entry which has 12
  1743. * bytes which is a regular entry followed by 8 bytes data. ATM
  1744. * I don't know how things are organized if were get next to the
  1745. * a boundary so I worry about that once we try to handle that.
  1746. */
  1747. evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
  1748. left -= 4;
  1749. dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
  1750. }
  1751. return IRQ_HANDLED;
  1752. }
  1753. static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
  1754. {
  1755. struct dwc3 *dwc = _dwc;
  1756. int i;
  1757. irqreturn_t ret = IRQ_NONE;
  1758. spin_lock(&dwc->lock);
  1759. for (i = 0; i < dwc->num_event_buffers; i++) {
  1760. irqreturn_t status;
  1761. status = dwc3_process_event_buf(dwc, i);
  1762. if (status == IRQ_HANDLED)
  1763. ret = status;
  1764. }
  1765. spin_unlock(&dwc->lock);
  1766. return ret;
  1767. }
  1768. /**
  1769. * dwc3_gadget_init - Initializes gadget related registers
  1770. * @dwc: Pointer to out controller context structure
  1771. *
  1772. * Returns 0 on success otherwise negative errno.
  1773. */
  1774. int __devinit dwc3_gadget_init(struct dwc3 *dwc)
  1775. {
  1776. u32 reg;
  1777. int ret;
  1778. int irq;
  1779. dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
  1780. &dwc->ctrl_req_addr, GFP_KERNEL);
  1781. if (!dwc->ctrl_req) {
  1782. dev_err(dwc->dev, "failed to allocate ctrl request\n");
  1783. ret = -ENOMEM;
  1784. goto err0;
  1785. }
  1786. dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
  1787. &dwc->ep0_trb_addr, GFP_KERNEL);
  1788. if (!dwc->ep0_trb) {
  1789. dev_err(dwc->dev, "failed to allocate ep0 trb\n");
  1790. ret = -ENOMEM;
  1791. goto err1;
  1792. }
  1793. dwc->setup_buf = dma_alloc_coherent(dwc->dev,
  1794. sizeof(*dwc->setup_buf) * 2,
  1795. &dwc->setup_buf_addr, GFP_KERNEL);
  1796. if (!dwc->setup_buf) {
  1797. dev_err(dwc->dev, "failed to allocate setup buffer\n");
  1798. ret = -ENOMEM;
  1799. goto err2;
  1800. }
  1801. dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
  1802. 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
  1803. if (!dwc->ep0_bounce) {
  1804. dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
  1805. ret = -ENOMEM;
  1806. goto err3;
  1807. }
  1808. dev_set_name(&dwc->gadget.dev, "gadget");
  1809. dwc->gadget.ops = &dwc3_gadget_ops;
  1810. dwc->gadget.max_speed = USB_SPEED_SUPER;
  1811. dwc->gadget.speed = USB_SPEED_UNKNOWN;
  1812. dwc->gadget.dev.parent = dwc->dev;
  1813. dwc->gadget.sg_supported = true;
  1814. dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
  1815. dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
  1816. dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
  1817. dwc->gadget.dev.release = dwc3_gadget_release;
  1818. dwc->gadget.name = "dwc3-gadget";
  1819. /*
  1820. * REVISIT: Here we should clear all pending IRQs to be
  1821. * sure we're starting from a well known location.
  1822. */
  1823. ret = dwc3_gadget_init_endpoints(dwc);
  1824. if (ret)
  1825. goto err4;
  1826. irq = platform_get_irq(to_platform_device(dwc->dev), 0);
  1827. ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
  1828. "dwc3", dwc);
  1829. if (ret) {
  1830. dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
  1831. irq, ret);
  1832. goto err5;
  1833. }
  1834. /* Enable all but Start and End of Frame IRQs */
  1835. reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
  1836. DWC3_DEVTEN_EVNTOVERFLOWEN |
  1837. DWC3_DEVTEN_CMDCMPLTEN |
  1838. DWC3_DEVTEN_ERRTICERREN |
  1839. DWC3_DEVTEN_WKUPEVTEN |
  1840. DWC3_DEVTEN_ULSTCNGEN |
  1841. DWC3_DEVTEN_CONNECTDONEEN |
  1842. DWC3_DEVTEN_USBRSTEN |
  1843. DWC3_DEVTEN_DISCONNEVTEN);
  1844. dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
  1845. ret = device_register(&dwc->gadget.dev);
  1846. if (ret) {
  1847. dev_err(dwc->dev, "failed to register gadget device\n");
  1848. put_device(&dwc->gadget.dev);
  1849. goto err6;
  1850. }
  1851. ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
  1852. if (ret) {
  1853. dev_err(dwc->dev, "failed to register udc\n");
  1854. goto err7;
  1855. }
  1856. return 0;
  1857. err7:
  1858. device_unregister(&dwc->gadget.dev);
  1859. err6:
  1860. dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
  1861. free_irq(irq, dwc);
  1862. err5:
  1863. dwc3_gadget_free_endpoints(dwc);
  1864. err4:
  1865. dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
  1866. dwc->ep0_bounce_addr);
  1867. err3:
  1868. dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
  1869. dwc->setup_buf, dwc->setup_buf_addr);
  1870. err2:
  1871. dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
  1872. dwc->ep0_trb, dwc->ep0_trb_addr);
  1873. err1:
  1874. dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
  1875. dwc->ctrl_req, dwc->ctrl_req_addr);
  1876. err0:
  1877. return ret;
  1878. }
  1879. void dwc3_gadget_exit(struct dwc3 *dwc)
  1880. {
  1881. int irq;
  1882. usb_del_gadget_udc(&dwc->gadget);
  1883. irq = platform_get_irq(to_platform_device(dwc->dev), 0);
  1884. dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
  1885. free_irq(irq, dwc);
  1886. dwc3_gadget_free_endpoints(dwc);
  1887. dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
  1888. dwc->ep0_bounce_addr);
  1889. dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
  1890. dwc->setup_buf, dwc->setup_buf_addr);
  1891. dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
  1892. dwc->ep0_trb, dwc->ep0_trb_addr);
  1893. dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
  1894. dwc->ctrl_req, dwc->ctrl_req_addr);
  1895. device_unregister(&dwc->gadget.dev);
  1896. }