musb_gadget.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050
  1. /*
  2. * MUSB OTG driver peripheral support
  3. *
  4. * Copyright 2005 Mentor Graphics Corporation
  5. * Copyright (C) 2005-2006 by Texas Instruments
  6. * Copyright (C) 2006-2007 Nokia Corporation
  7. * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21. * 02110-1301 USA
  22. *
  23. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  26. * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. */
  35. #include <linux/kernel.h>
  36. #include <linux/list.h>
  37. #include <linux/timer.h>
  38. #include <linux/module.h>
  39. #include <linux/smp.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/delay.h>
  42. #include <linux/moduleparam.h>
  43. #include <linux/stat.h>
  44. #include <linux/dma-mapping.h>
  45. #include "musb_core.h"
  46. /* MUSB PERIPHERAL status 3-mar-2006:
  47. *
  48. * - EP0 seems solid. It passes both USBCV and usbtest control cases.
  49. * Minor glitches:
  50. *
  51. * + remote wakeup to Linux hosts work, but saw USBCV failures;
  52. * in one test run (operator error?)
  53. * + endpoint halt tests -- in both usbtest and usbcv -- seem
  54. * to break when dma is enabled ... is something wrongly
  55. * clearing SENDSTALL?
  56. *
  57. * - Mass storage behaved ok when last tested. Network traffic patterns
  58. * (with lots of short transfers etc) need retesting; they turn up the
  59. * worst cases of the DMA, since short packets are typical but are not
  60. * required.
  61. *
  62. * - TX/IN
  63. * + both pio and dma behave in with network and g_zero tests
  64. * + no cppi throughput issues other than no-hw-queueing
  65. * + failed with FLAT_REG (DaVinci)
  66. * + seems to behave with double buffering, PIO -and- CPPI
  67. * + with gadgetfs + AIO, requests got lost?
  68. *
  69. * - RX/OUT
  70. * + both pio and dma behave in with network and g_zero tests
  71. * + dma is slow in typical case (short_not_ok is clear)
  72. * + double buffering ok with PIO
  73. * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
  74. * + request lossage observed with gadgetfs
  75. *
  76. * - ISO not tested ... might work, but only weakly isochronous
  77. *
  78. * - Gadget driver disabling of softconnect during bind() is ignored; so
  79. * drivers can't hold off host requests until userspace is ready.
  80. * (Workaround: they can turn it off later.)
  81. *
  82. * - PORTABILITY (assumes PIO works):
  83. * + DaVinci, basically works with cppi dma
  84. * + OMAP 2430, ditto with mentor dma
  85. * + TUSB 6010, platform-specific dma in the works
  86. */
  87. /* ----------------------------------------------------------------------- */
  88. /*
  89. * Immediately complete a request.
  90. *
  91. * @param request the request to complete
  92. * @param status the status to complete the request with
  93. * Context: controller locked, IRQs blocked.
  94. */
  95. void musb_g_giveback(
  96. struct musb_ep *ep,
  97. struct usb_request *request,
  98. int status)
  99. __releases(ep->musb->lock)
  100. __acquires(ep->musb->lock)
  101. {
  102. struct musb_request *req;
  103. struct musb *musb;
  104. int busy = ep->busy;
  105. req = to_musb_request(request);
  106. list_del(&request->list);
  107. if (req->request.status == -EINPROGRESS)
  108. req->request.status = status;
  109. musb = req->musb;
  110. ep->busy = 1;
  111. spin_unlock(&musb->lock);
  112. if (is_dma_capable()) {
  113. if (req->mapped) {
  114. dma_unmap_single(musb->controller,
  115. req->request.dma,
  116. req->request.length,
  117. req->tx
  118. ? DMA_TO_DEVICE
  119. : DMA_FROM_DEVICE);
  120. req->request.dma = DMA_ADDR_INVALID;
  121. req->mapped = 0;
  122. } else if (req->request.dma != DMA_ADDR_INVALID)
  123. dma_sync_single_for_cpu(musb->controller,
  124. req->request.dma,
  125. req->request.length,
  126. req->tx
  127. ? DMA_TO_DEVICE
  128. : DMA_FROM_DEVICE);
  129. }
  130. if (request->status == 0)
  131. DBG(5, "%s done request %p, %d/%d\n",
  132. ep->end_point.name, request,
  133. req->request.actual, req->request.length);
  134. else
  135. DBG(2, "%s request %p, %d/%d fault %d\n",
  136. ep->end_point.name, request,
  137. req->request.actual, req->request.length,
  138. request->status);
  139. req->request.complete(&req->ep->end_point, &req->request);
  140. spin_lock(&musb->lock);
  141. ep->busy = busy;
  142. }
  143. /* ----------------------------------------------------------------------- */
  144. /*
  145. * Abort requests queued to an endpoint using the status. Synchronous.
  146. * caller locked controller and blocked irqs, and selected this ep.
  147. */
  148. static void nuke(struct musb_ep *ep, const int status)
  149. {
  150. struct musb_request *req = NULL;
  151. void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
  152. ep->busy = 1;
  153. if (is_dma_capable() && ep->dma) {
  154. struct dma_controller *c = ep->musb->dma_controller;
  155. int value;
  156. if (ep->is_in) {
  157. /*
  158. * The programming guide says that we must not clear
  159. * the DMAMODE bit before DMAENAB, so we only
  160. * clear it in the second write...
  161. */
  162. musb_writew(epio, MUSB_TXCSR,
  163. MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
  164. musb_writew(epio, MUSB_TXCSR,
  165. 0 | MUSB_TXCSR_FLUSHFIFO);
  166. } else {
  167. musb_writew(epio, MUSB_RXCSR,
  168. 0 | MUSB_RXCSR_FLUSHFIFO);
  169. musb_writew(epio, MUSB_RXCSR,
  170. 0 | MUSB_RXCSR_FLUSHFIFO);
  171. }
  172. value = c->channel_abort(ep->dma);
  173. DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
  174. c->channel_release(ep->dma);
  175. ep->dma = NULL;
  176. }
  177. while (!list_empty(&(ep->req_list))) {
  178. req = container_of(ep->req_list.next, struct musb_request,
  179. request.list);
  180. musb_g_giveback(ep, &req->request, status);
  181. }
  182. }
  183. /* ----------------------------------------------------------------------- */
  184. /* Data transfers - pure PIO, pure DMA, or mixed mode */
  185. /*
  186. * This assumes the separate CPPI engine is responding to DMA requests
  187. * from the usb core ... sequenced a bit differently from mentor dma.
  188. */
  189. static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
  190. {
  191. if (can_bulk_split(musb, ep->type))
  192. return ep->hw_ep->max_packet_sz_tx;
  193. else
  194. return ep->packet_sz;
  195. }
  196. #ifdef CONFIG_USB_INVENTRA_DMA
  197. /* Peripheral tx (IN) using Mentor DMA works as follows:
  198. Only mode 0 is used for transfers <= wPktSize,
  199. mode 1 is used for larger transfers,
  200. One of the following happens:
  201. - Host sends IN token which causes an endpoint interrupt
  202. -> TxAvail
  203. -> if DMA is currently busy, exit.
  204. -> if queue is non-empty, txstate().
  205. - Request is queued by the gadget driver.
  206. -> if queue was previously empty, txstate()
  207. txstate()
  208. -> start
  209. /\ -> setup DMA
  210. | (data is transferred to the FIFO, then sent out when
  211. | IN token(s) are recd from Host.
  212. | -> DMA interrupt on completion
  213. | calls TxAvail.
  214. | -> stop DMA, ~DMAENAB,
  215. | -> set TxPktRdy for last short pkt or zlp
  216. | -> Complete Request
  217. | -> Continue next request (call txstate)
  218. |___________________________________|
  219. * Non-Mentor DMA engines can of course work differently, such as by
  220. * upleveling from irq-per-packet to irq-per-buffer.
  221. */
  222. #endif
  223. /*
  224. * An endpoint is transmitting data. This can be called either from
  225. * the IRQ routine or from ep.queue() to kickstart a request on an
  226. * endpoint.
  227. *
  228. * Context: controller locked, IRQs blocked, endpoint selected
  229. */
  230. static void txstate(struct musb *musb, struct musb_request *req)
  231. {
  232. u8 epnum = req->epnum;
  233. struct musb_ep *musb_ep;
  234. void __iomem *epio = musb->endpoints[epnum].regs;
  235. struct usb_request *request;
  236. u16 fifo_count = 0, csr;
  237. int use_dma = 0;
  238. musb_ep = req->ep;
  239. /* we shouldn't get here while DMA is active ... but we do ... */
  240. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  241. DBG(4, "dma pending...\n");
  242. return;
  243. }
  244. /* read TXCSR before */
  245. csr = musb_readw(epio, MUSB_TXCSR);
  246. request = &req->request;
  247. fifo_count = min(max_ep_writesize(musb, musb_ep),
  248. (int)(request->length - request->actual));
  249. if (csr & MUSB_TXCSR_TXPKTRDY) {
  250. DBG(5, "%s old packet still ready , txcsr %03x\n",
  251. musb_ep->end_point.name, csr);
  252. return;
  253. }
  254. if (csr & MUSB_TXCSR_P_SENDSTALL) {
  255. DBG(5, "%s stalling, txcsr %03x\n",
  256. musb_ep->end_point.name, csr);
  257. return;
  258. }
  259. DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
  260. epnum, musb_ep->packet_sz, fifo_count,
  261. csr);
  262. #ifndef CONFIG_MUSB_PIO_ONLY
  263. if (is_dma_capable() && musb_ep->dma) {
  264. struct dma_controller *c = musb->dma_controller;
  265. use_dma = (request->dma != DMA_ADDR_INVALID);
  266. /* MUSB_TXCSR_P_ISO is still set correctly */
  267. #ifdef CONFIG_USB_INVENTRA_DMA
  268. {
  269. size_t request_size;
  270. /* setup DMA, then program endpoint CSR */
  271. request_size = min(request->length,
  272. musb_ep->dma->max_len);
  273. if (request_size < musb_ep->packet_sz)
  274. musb_ep->dma->desired_mode = 0;
  275. else
  276. musb_ep->dma->desired_mode = 1;
  277. use_dma = use_dma && c->channel_program(
  278. musb_ep->dma, musb_ep->packet_sz,
  279. musb_ep->dma->desired_mode,
  280. request->dma, request_size);
  281. if (use_dma) {
  282. if (musb_ep->dma->desired_mode == 0) {
  283. /*
  284. * We must not clear the DMAMODE bit
  285. * before the DMAENAB bit -- and the
  286. * latter doesn't always get cleared
  287. * before we get here...
  288. */
  289. csr &= ~(MUSB_TXCSR_AUTOSET
  290. | MUSB_TXCSR_DMAENAB);
  291. musb_writew(epio, MUSB_TXCSR, csr
  292. | MUSB_TXCSR_P_WZC_BITS);
  293. csr &= ~MUSB_TXCSR_DMAMODE;
  294. csr |= (MUSB_TXCSR_DMAENAB |
  295. MUSB_TXCSR_MODE);
  296. /* against programming guide */
  297. } else
  298. csr |= (MUSB_TXCSR_AUTOSET
  299. | MUSB_TXCSR_DMAENAB
  300. | MUSB_TXCSR_DMAMODE
  301. | MUSB_TXCSR_MODE);
  302. csr &= ~MUSB_TXCSR_P_UNDERRUN;
  303. musb_writew(epio, MUSB_TXCSR, csr);
  304. }
  305. }
  306. #elif defined(CONFIG_USB_TI_CPPI_DMA)
  307. /* program endpoint CSR first, then setup DMA */
  308. csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
  309. csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
  310. MUSB_TXCSR_MODE;
  311. musb_writew(epio, MUSB_TXCSR,
  312. (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
  313. | csr);
  314. /* ensure writebuffer is empty */
  315. csr = musb_readw(epio, MUSB_TXCSR);
  316. /* NOTE host side sets DMAENAB later than this; both are
  317. * OK since the transfer dma glue (between CPPI and Mentor
  318. * fifos) just tells CPPI it could start. Data only moves
  319. * to the USB TX fifo when both fifos are ready.
  320. */
  321. /* "mode" is irrelevant here; handle terminating ZLPs like
  322. * PIO does, since the hardware RNDIS mode seems unreliable
  323. * except for the last-packet-is-already-short case.
  324. */
  325. use_dma = use_dma && c->channel_program(
  326. musb_ep->dma, musb_ep->packet_sz,
  327. 0,
  328. request->dma,
  329. request->length);
  330. if (!use_dma) {
  331. c->channel_release(musb_ep->dma);
  332. musb_ep->dma = NULL;
  333. csr &= ~MUSB_TXCSR_DMAENAB;
  334. musb_writew(epio, MUSB_TXCSR, csr);
  335. /* invariant: prequest->buf is non-null */
  336. }
  337. #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
  338. use_dma = use_dma && c->channel_program(
  339. musb_ep->dma, musb_ep->packet_sz,
  340. request->zero,
  341. request->dma,
  342. request->length);
  343. #endif
  344. }
  345. #endif
  346. if (!use_dma) {
  347. musb_write_fifo(musb_ep->hw_ep, fifo_count,
  348. (u8 *) (request->buf + request->actual));
  349. request->actual += fifo_count;
  350. csr |= MUSB_TXCSR_TXPKTRDY;
  351. csr &= ~MUSB_TXCSR_P_UNDERRUN;
  352. musb_writew(epio, MUSB_TXCSR, csr);
  353. }
  354. /* host may already have the data when this message shows... */
  355. DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
  356. musb_ep->end_point.name, use_dma ? "dma" : "pio",
  357. request->actual, request->length,
  358. musb_readw(epio, MUSB_TXCSR),
  359. fifo_count,
  360. musb_readw(epio, MUSB_TXMAXP));
  361. }
  362. /*
  363. * FIFO state update (e.g. data ready).
  364. * Called from IRQ, with controller locked.
  365. */
  366. void musb_g_tx(struct musb *musb, u8 epnum)
  367. {
  368. u16 csr;
  369. struct usb_request *request;
  370. u8 __iomem *mbase = musb->mregs;
  371. struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
  372. void __iomem *epio = musb->endpoints[epnum].regs;
  373. struct dma_channel *dma;
  374. musb_ep_select(mbase, epnum);
  375. request = next_request(musb_ep);
  376. csr = musb_readw(epio, MUSB_TXCSR);
  377. DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
  378. dma = is_dma_capable() ? musb_ep->dma : NULL;
  379. do {
  380. /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
  381. * probably rates reporting as a host error
  382. */
  383. if (csr & MUSB_TXCSR_P_SENTSTALL) {
  384. csr |= MUSB_TXCSR_P_WZC_BITS;
  385. csr &= ~MUSB_TXCSR_P_SENTSTALL;
  386. musb_writew(epio, MUSB_TXCSR, csr);
  387. break;
  388. }
  389. if (csr & MUSB_TXCSR_P_UNDERRUN) {
  390. /* we NAKed, no big deal ... little reason to care */
  391. csr |= MUSB_TXCSR_P_WZC_BITS;
  392. csr &= ~(MUSB_TXCSR_P_UNDERRUN
  393. | MUSB_TXCSR_TXPKTRDY);
  394. musb_writew(epio, MUSB_TXCSR, csr);
  395. DBG(20, "underrun on ep%d, req %p\n", epnum, request);
  396. }
  397. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  398. /* SHOULD NOT HAPPEN ... has with cppi though, after
  399. * changing SENDSTALL (and other cases); harmless?
  400. */
  401. DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
  402. break;
  403. }
  404. if (request) {
  405. u8 is_dma = 0;
  406. if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
  407. is_dma = 1;
  408. csr |= MUSB_TXCSR_P_WZC_BITS;
  409. csr &= ~(MUSB_TXCSR_DMAENAB
  410. | MUSB_TXCSR_P_UNDERRUN
  411. | MUSB_TXCSR_TXPKTRDY);
  412. musb_writew(epio, MUSB_TXCSR, csr);
  413. /* ensure writebuffer is empty */
  414. csr = musb_readw(epio, MUSB_TXCSR);
  415. request->actual += musb_ep->dma->actual_len;
  416. DBG(4, "TXCSR%d %04x, dma off, "
  417. "len %zu, req %p\n",
  418. epnum, csr,
  419. musb_ep->dma->actual_len,
  420. request);
  421. }
  422. if (is_dma || request->actual == request->length) {
  423. /* First, maybe a terminating short packet.
  424. * Some DMA engines might handle this by
  425. * themselves.
  426. */
  427. if ((request->zero
  428. && request->length
  429. && (request->length
  430. % musb_ep->packet_sz)
  431. == 0)
  432. #ifdef CONFIG_USB_INVENTRA_DMA
  433. || (is_dma &&
  434. ((!dma->desired_mode) ||
  435. (request->actual &
  436. (musb_ep->packet_sz - 1))))
  437. #endif
  438. ) {
  439. /* on dma completion, fifo may not
  440. * be available yet ...
  441. */
  442. if (csr & MUSB_TXCSR_TXPKTRDY)
  443. break;
  444. DBG(4, "sending zero pkt\n");
  445. musb_writew(epio, MUSB_TXCSR,
  446. MUSB_TXCSR_MODE
  447. | MUSB_TXCSR_TXPKTRDY);
  448. request->zero = 0;
  449. }
  450. /* ... or if not, then complete it */
  451. musb_g_giveback(musb_ep, request, 0);
  452. /* kickstart next transfer if appropriate;
  453. * the packet that just completed might not
  454. * be transmitted for hours or days.
  455. * REVISIT for double buffering...
  456. * FIXME revisit for stalls too...
  457. */
  458. musb_ep_select(mbase, epnum);
  459. csr = musb_readw(epio, MUSB_TXCSR);
  460. if (csr & MUSB_TXCSR_FIFONOTEMPTY)
  461. break;
  462. request = musb_ep->desc
  463. ? next_request(musb_ep)
  464. : NULL;
  465. if (!request) {
  466. DBG(4, "%s idle now\n",
  467. musb_ep->end_point.name);
  468. break;
  469. }
  470. }
  471. txstate(musb, to_musb_request(request));
  472. }
  473. } while (0);
  474. }
  475. /* ------------------------------------------------------------ */
  476. #ifdef CONFIG_USB_INVENTRA_DMA
  477. /* Peripheral rx (OUT) using Mentor DMA works as follows:
  478. - Only mode 0 is used.
  479. - Request is queued by the gadget class driver.
  480. -> if queue was previously empty, rxstate()
  481. - Host sends OUT token which causes an endpoint interrupt
  482. /\ -> RxReady
  483. | -> if request queued, call rxstate
  484. | /\ -> setup DMA
  485. | | -> DMA interrupt on completion
  486. | | -> RxReady
  487. | | -> stop DMA
  488. | | -> ack the read
  489. | | -> if data recd = max expected
  490. | | by the request, or host
  491. | | sent a short packet,
  492. | | complete the request,
  493. | | and start the next one.
  494. | |_____________________________________|
  495. | else just wait for the host
  496. | to send the next OUT token.
  497. |__________________________________________________|
  498. * Non-Mentor DMA engines can of course work differently.
  499. */
  500. #endif
  501. /*
  502. * Context: controller locked, IRQs blocked, endpoint selected
  503. */
  504. static void rxstate(struct musb *musb, struct musb_request *req)
  505. {
  506. const u8 epnum = req->epnum;
  507. struct usb_request *request = &req->request;
  508. struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
  509. void __iomem *epio = musb->endpoints[epnum].regs;
  510. unsigned fifo_count = 0;
  511. u16 len = musb_ep->packet_sz;
  512. u16 csr = musb_readw(epio, MUSB_RXCSR);
  513. /* We shouldn't get here while DMA is active, but we do... */
  514. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  515. DBG(4, "DMA pending...\n");
  516. return;
  517. }
  518. if (csr & MUSB_RXCSR_P_SENDSTALL) {
  519. DBG(5, "%s stalling, RXCSR %04x\n",
  520. musb_ep->end_point.name, csr);
  521. return;
  522. }
  523. if (is_cppi_enabled() && musb_ep->dma) {
  524. struct dma_controller *c = musb->dma_controller;
  525. struct dma_channel *channel = musb_ep->dma;
  526. /* NOTE: CPPI won't actually stop advancing the DMA
  527. * queue after short packet transfers, so this is almost
  528. * always going to run as IRQ-per-packet DMA so that
  529. * faults will be handled correctly.
  530. */
  531. if (c->channel_program(channel,
  532. musb_ep->packet_sz,
  533. !request->short_not_ok,
  534. request->dma + request->actual,
  535. request->length - request->actual)) {
  536. /* make sure that if an rxpkt arrived after the irq,
  537. * the cppi engine will be ready to take it as soon
  538. * as DMA is enabled
  539. */
  540. csr &= ~(MUSB_RXCSR_AUTOCLEAR
  541. | MUSB_RXCSR_DMAMODE);
  542. csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
  543. musb_writew(epio, MUSB_RXCSR, csr);
  544. return;
  545. }
  546. }
  547. if (csr & MUSB_RXCSR_RXPKTRDY) {
  548. len = musb_readw(epio, MUSB_RXCOUNT);
  549. if (request->actual < request->length) {
  550. #ifdef CONFIG_USB_INVENTRA_DMA
  551. if (is_dma_capable() && musb_ep->dma) {
  552. struct dma_controller *c;
  553. struct dma_channel *channel;
  554. int use_dma = 0;
  555. c = musb->dma_controller;
  556. channel = musb_ep->dma;
  557. /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
  558. * mode 0 only. So we do not get endpoint interrupts due to DMA
  559. * completion. We only get interrupts from DMA controller.
  560. *
  561. * We could operate in DMA mode 1 if we knew the size of the tranfer
  562. * in advance. For mass storage class, request->length = what the host
  563. * sends, so that'd work. But for pretty much everything else,
  564. * request->length is routinely more than what the host sends. For
  565. * most these gadgets, end of is signified either by a short packet,
  566. * or filling the last byte of the buffer. (Sending extra data in
  567. * that last pckate should trigger an overflow fault.) But in mode 1,
  568. * we don't get DMA completion interrrupt for short packets.
  569. *
  570. * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
  571. * to get endpoint interrupt on every DMA req, but that didn't seem
  572. * to work reliably.
  573. *
  574. * REVISIT an updated g_file_storage can set req->short_not_ok, which
  575. * then becomes usable as a runtime "use mode 1" hint...
  576. */
  577. csr |= MUSB_RXCSR_DMAENAB;
  578. #ifdef USE_MODE1
  579. csr |= MUSB_RXCSR_AUTOCLEAR;
  580. /* csr |= MUSB_RXCSR_DMAMODE; */
  581. /* this special sequence (enabling and then
  582. * disabling MUSB_RXCSR_DMAMODE) is required
  583. * to get DMAReq to activate
  584. */
  585. musb_writew(epio, MUSB_RXCSR,
  586. csr | MUSB_RXCSR_DMAMODE);
  587. #endif
  588. musb_writew(epio, MUSB_RXCSR, csr);
  589. if (request->actual < request->length) {
  590. int transfer_size = 0;
  591. #ifdef USE_MODE1
  592. transfer_size = min(request->length,
  593. channel->max_len);
  594. #else
  595. transfer_size = len;
  596. #endif
  597. if (transfer_size <= musb_ep->packet_sz)
  598. musb_ep->dma->desired_mode = 0;
  599. else
  600. musb_ep->dma->desired_mode = 1;
  601. use_dma = c->channel_program(
  602. channel,
  603. musb_ep->packet_sz,
  604. channel->desired_mode,
  605. request->dma
  606. + request->actual,
  607. transfer_size);
  608. }
  609. if (use_dma)
  610. return;
  611. }
  612. #endif /* Mentor's DMA */
  613. fifo_count = request->length - request->actual;
  614. DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
  615. musb_ep->end_point.name,
  616. len, fifo_count,
  617. musb_ep->packet_sz);
  618. fifo_count = min_t(unsigned, len, fifo_count);
  619. #ifdef CONFIG_USB_TUSB_OMAP_DMA
  620. if (tusb_dma_omap() && musb_ep->dma) {
  621. struct dma_controller *c = musb->dma_controller;
  622. struct dma_channel *channel = musb_ep->dma;
  623. u32 dma_addr = request->dma + request->actual;
  624. int ret;
  625. ret = c->channel_program(channel,
  626. musb_ep->packet_sz,
  627. channel->desired_mode,
  628. dma_addr,
  629. fifo_count);
  630. if (ret)
  631. return;
  632. }
  633. #endif
  634. musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
  635. (request->buf + request->actual));
  636. request->actual += fifo_count;
  637. /* REVISIT if we left anything in the fifo, flush
  638. * it and report -EOVERFLOW
  639. */
  640. /* ack the read! */
  641. csr |= MUSB_RXCSR_P_WZC_BITS;
  642. csr &= ~MUSB_RXCSR_RXPKTRDY;
  643. musb_writew(epio, MUSB_RXCSR, csr);
  644. }
  645. }
  646. /* reach the end or short packet detected */
  647. if (request->actual == request->length || len < musb_ep->packet_sz)
  648. musb_g_giveback(musb_ep, request, 0);
  649. }
  650. /*
  651. * Data ready for a request; called from IRQ
  652. */
  653. void musb_g_rx(struct musb *musb, u8 epnum)
  654. {
  655. u16 csr;
  656. struct usb_request *request;
  657. void __iomem *mbase = musb->mregs;
  658. struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
  659. void __iomem *epio = musb->endpoints[epnum].regs;
  660. struct dma_channel *dma;
  661. musb_ep_select(mbase, epnum);
  662. request = next_request(musb_ep);
  663. csr = musb_readw(epio, MUSB_RXCSR);
  664. dma = is_dma_capable() ? musb_ep->dma : NULL;
  665. DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
  666. csr, dma ? " (dma)" : "", request);
  667. if (csr & MUSB_RXCSR_P_SENTSTALL) {
  668. csr |= MUSB_RXCSR_P_WZC_BITS;
  669. csr &= ~MUSB_RXCSR_P_SENTSTALL;
  670. musb_writew(epio, MUSB_RXCSR, csr);
  671. return;
  672. }
  673. if (csr & MUSB_RXCSR_P_OVERRUN) {
  674. /* csr |= MUSB_RXCSR_P_WZC_BITS; */
  675. csr &= ~MUSB_RXCSR_P_OVERRUN;
  676. musb_writew(epio, MUSB_RXCSR, csr);
  677. DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
  678. if (request && request->status == -EINPROGRESS)
  679. request->status = -EOVERFLOW;
  680. }
  681. if (csr & MUSB_RXCSR_INCOMPRX) {
  682. /* REVISIT not necessarily an error */
  683. DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
  684. }
  685. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  686. /* "should not happen"; likely RXPKTRDY pending for DMA */
  687. DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
  688. "%s busy, csr %04x\n",
  689. musb_ep->end_point.name, csr);
  690. return;
  691. }
  692. if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
  693. csr &= ~(MUSB_RXCSR_AUTOCLEAR
  694. | MUSB_RXCSR_DMAENAB
  695. | MUSB_RXCSR_DMAMODE);
  696. musb_writew(epio, MUSB_RXCSR,
  697. MUSB_RXCSR_P_WZC_BITS | csr);
  698. request->actual += musb_ep->dma->actual_len;
  699. DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
  700. epnum, csr,
  701. musb_readw(epio, MUSB_RXCSR),
  702. musb_ep->dma->actual_len, request);
  703. #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
  704. /* Autoclear doesn't clear RxPktRdy for short packets */
  705. if ((dma->desired_mode == 0)
  706. || (dma->actual_len
  707. & (musb_ep->packet_sz - 1))) {
  708. /* ack the read! */
  709. csr &= ~MUSB_RXCSR_RXPKTRDY;
  710. musb_writew(epio, MUSB_RXCSR, csr);
  711. }
  712. /* incomplete, and not short? wait for next IN packet */
  713. if ((request->actual < request->length)
  714. && (musb_ep->dma->actual_len
  715. == musb_ep->packet_sz))
  716. return;
  717. #endif
  718. musb_g_giveback(musb_ep, request, 0);
  719. request = next_request(musb_ep);
  720. if (!request)
  721. return;
  722. }
  723. /* analyze request if the ep is hot */
  724. if (request)
  725. rxstate(musb, to_musb_request(request));
  726. else
  727. DBG(3, "packet waiting for %s%s request\n",
  728. musb_ep->desc ? "" : "inactive ",
  729. musb_ep->end_point.name);
  730. return;
  731. }
  732. /* ------------------------------------------------------------ */
  733. static int musb_gadget_enable(struct usb_ep *ep,
  734. const struct usb_endpoint_descriptor *desc)
  735. {
  736. unsigned long flags;
  737. struct musb_ep *musb_ep;
  738. struct musb_hw_ep *hw_ep;
  739. void __iomem *regs;
  740. struct musb *musb;
  741. void __iomem *mbase;
  742. u8 epnum;
  743. u16 csr;
  744. unsigned tmp;
  745. int status = -EINVAL;
  746. if (!ep || !desc)
  747. return -EINVAL;
  748. musb_ep = to_musb_ep(ep);
  749. hw_ep = musb_ep->hw_ep;
  750. regs = hw_ep->regs;
  751. musb = musb_ep->musb;
  752. mbase = musb->mregs;
  753. epnum = musb_ep->current_epnum;
  754. spin_lock_irqsave(&musb->lock, flags);
  755. if (musb_ep->desc) {
  756. status = -EBUSY;
  757. goto fail;
  758. }
  759. musb_ep->type = usb_endpoint_type(desc);
  760. /* check direction and (later) maxpacket size against endpoint */
  761. if (usb_endpoint_num(desc) != epnum)
  762. goto fail;
  763. /* REVISIT this rules out high bandwidth periodic transfers */
  764. tmp = le16_to_cpu(desc->wMaxPacketSize);
  765. if (tmp & ~0x07ff)
  766. goto fail;
  767. musb_ep->packet_sz = tmp;
  768. /* enable the interrupts for the endpoint, set the endpoint
  769. * packet size (or fail), set the mode, clear the fifo
  770. */
  771. musb_ep_select(mbase, epnum);
  772. if (usb_endpoint_dir_in(desc)) {
  773. u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
  774. if (hw_ep->is_shared_fifo)
  775. musb_ep->is_in = 1;
  776. if (!musb_ep->is_in)
  777. goto fail;
  778. if (tmp > hw_ep->max_packet_sz_tx)
  779. goto fail;
  780. int_txe |= (1 << epnum);
  781. musb_writew(mbase, MUSB_INTRTXE, int_txe);
  782. /* REVISIT if can_bulk_split(), use by updating "tmp";
  783. * likewise high bandwidth periodic tx
  784. */
  785. musb_writew(regs, MUSB_TXMAXP, tmp);
  786. csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
  787. if (musb_readw(regs, MUSB_TXCSR)
  788. & MUSB_TXCSR_FIFONOTEMPTY)
  789. csr |= MUSB_TXCSR_FLUSHFIFO;
  790. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  791. csr |= MUSB_TXCSR_P_ISO;
  792. /* set twice in case of double buffering */
  793. musb_writew(regs, MUSB_TXCSR, csr);
  794. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  795. musb_writew(regs, MUSB_TXCSR, csr);
  796. } else {
  797. u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
  798. if (hw_ep->is_shared_fifo)
  799. musb_ep->is_in = 0;
  800. if (musb_ep->is_in)
  801. goto fail;
  802. if (tmp > hw_ep->max_packet_sz_rx)
  803. goto fail;
  804. int_rxe |= (1 << epnum);
  805. musb_writew(mbase, MUSB_INTRRXE, int_rxe);
  806. /* REVISIT if can_bulk_combine() use by updating "tmp"
  807. * likewise high bandwidth periodic rx
  808. */
  809. musb_writew(regs, MUSB_RXMAXP, tmp);
  810. /* force shared fifo to OUT-only mode */
  811. if (hw_ep->is_shared_fifo) {
  812. csr = musb_readw(regs, MUSB_TXCSR);
  813. csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
  814. musb_writew(regs, MUSB_TXCSR, csr);
  815. }
  816. csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
  817. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  818. csr |= MUSB_RXCSR_P_ISO;
  819. else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
  820. csr |= MUSB_RXCSR_DISNYET;
  821. /* set twice in case of double buffering */
  822. musb_writew(regs, MUSB_RXCSR, csr);
  823. musb_writew(regs, MUSB_RXCSR, csr);
  824. }
  825. /* NOTE: all the I/O code _should_ work fine without DMA, in case
  826. * for some reason you run out of channels here.
  827. */
  828. if (is_dma_capable() && musb->dma_controller) {
  829. struct dma_controller *c = musb->dma_controller;
  830. musb_ep->dma = c->channel_alloc(c, hw_ep,
  831. (desc->bEndpointAddress & USB_DIR_IN));
  832. } else
  833. musb_ep->dma = NULL;
  834. musb_ep->desc = desc;
  835. musb_ep->busy = 0;
  836. musb_ep->wedged = 0;
  837. status = 0;
  838. pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
  839. musb_driver_name, musb_ep->end_point.name,
  840. ({ char *s; switch (musb_ep->type) {
  841. case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
  842. case USB_ENDPOINT_XFER_INT: s = "int"; break;
  843. default: s = "iso"; break;
  844. }; s; }),
  845. musb_ep->is_in ? "IN" : "OUT",
  846. musb_ep->dma ? "dma, " : "",
  847. musb_ep->packet_sz);
  848. schedule_work(&musb->irq_work);
  849. fail:
  850. spin_unlock_irqrestore(&musb->lock, flags);
  851. return status;
  852. }
  853. /*
  854. * Disable an endpoint flushing all requests queued.
  855. */
  856. static int musb_gadget_disable(struct usb_ep *ep)
  857. {
  858. unsigned long flags;
  859. struct musb *musb;
  860. u8 epnum;
  861. struct musb_ep *musb_ep;
  862. void __iomem *epio;
  863. int status = 0;
  864. musb_ep = to_musb_ep(ep);
  865. musb = musb_ep->musb;
  866. epnum = musb_ep->current_epnum;
  867. epio = musb->endpoints[epnum].regs;
  868. spin_lock_irqsave(&musb->lock, flags);
  869. musb_ep_select(musb->mregs, epnum);
  870. /* zero the endpoint sizes */
  871. if (musb_ep->is_in) {
  872. u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
  873. int_txe &= ~(1 << epnum);
  874. musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
  875. musb_writew(epio, MUSB_TXMAXP, 0);
  876. } else {
  877. u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
  878. int_rxe &= ~(1 << epnum);
  879. musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
  880. musb_writew(epio, MUSB_RXMAXP, 0);
  881. }
  882. musb_ep->desc = NULL;
  883. /* abort all pending DMA and requests */
  884. nuke(musb_ep, -ESHUTDOWN);
  885. schedule_work(&musb->irq_work);
  886. spin_unlock_irqrestore(&(musb->lock), flags);
  887. DBG(2, "%s\n", musb_ep->end_point.name);
  888. return status;
  889. }
  890. /*
  891. * Allocate a request for an endpoint.
  892. * Reused by ep0 code.
  893. */
  894. struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  895. {
  896. struct musb_ep *musb_ep = to_musb_ep(ep);
  897. struct musb_request *request = NULL;
  898. request = kzalloc(sizeof *request, gfp_flags);
  899. if (request) {
  900. INIT_LIST_HEAD(&request->request.list);
  901. request->request.dma = DMA_ADDR_INVALID;
  902. request->epnum = musb_ep->current_epnum;
  903. request->ep = musb_ep;
  904. }
  905. return &request->request;
  906. }
  907. /*
  908. * Free a request
  909. * Reused by ep0 code.
  910. */
  911. void musb_free_request(struct usb_ep *ep, struct usb_request *req)
  912. {
  913. kfree(to_musb_request(req));
  914. }
  915. static LIST_HEAD(buffers);
  916. struct free_record {
  917. struct list_head list;
  918. struct device *dev;
  919. unsigned bytes;
  920. dma_addr_t dma;
  921. };
  922. /*
  923. * Context: controller locked, IRQs blocked.
  924. */
  925. static void musb_ep_restart(struct musb *musb, struct musb_request *req)
  926. {
  927. DBG(3, "<== %s request %p len %u on hw_ep%d\n",
  928. req->tx ? "TX/IN" : "RX/OUT",
  929. &req->request, req->request.length, req->epnum);
  930. musb_ep_select(musb->mregs, req->epnum);
  931. if (req->tx)
  932. txstate(musb, req);
  933. else
  934. rxstate(musb, req);
  935. }
  936. static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
  937. gfp_t gfp_flags)
  938. {
  939. struct musb_ep *musb_ep;
  940. struct musb_request *request;
  941. struct musb *musb;
  942. int status = 0;
  943. unsigned long lockflags;
  944. if (!ep || !req)
  945. return -EINVAL;
  946. if (!req->buf)
  947. return -ENODATA;
  948. musb_ep = to_musb_ep(ep);
  949. musb = musb_ep->musb;
  950. request = to_musb_request(req);
  951. request->musb = musb;
  952. if (request->ep != musb_ep)
  953. return -EINVAL;
  954. DBG(4, "<== to %s request=%p\n", ep->name, req);
  955. /* request is mine now... */
  956. request->request.actual = 0;
  957. request->request.status = -EINPROGRESS;
  958. request->epnum = musb_ep->current_epnum;
  959. request->tx = musb_ep->is_in;
  960. if (is_dma_capable() && musb_ep->dma) {
  961. if (request->request.dma == DMA_ADDR_INVALID) {
  962. request->request.dma = dma_map_single(
  963. musb->controller,
  964. request->request.buf,
  965. request->request.length,
  966. request->tx
  967. ? DMA_TO_DEVICE
  968. : DMA_FROM_DEVICE);
  969. request->mapped = 1;
  970. } else {
  971. dma_sync_single_for_device(musb->controller,
  972. request->request.dma,
  973. request->request.length,
  974. request->tx
  975. ? DMA_TO_DEVICE
  976. : DMA_FROM_DEVICE);
  977. request->mapped = 0;
  978. }
  979. } else if (!req->buf) {
  980. return -ENODATA;
  981. } else
  982. request->mapped = 0;
  983. spin_lock_irqsave(&musb->lock, lockflags);
  984. /* don't queue if the ep is down */
  985. if (!musb_ep->desc) {
  986. DBG(4, "req %p queued to %s while ep %s\n",
  987. req, ep->name, "disabled");
  988. status = -ESHUTDOWN;
  989. goto cleanup;
  990. }
  991. /* add request to the list */
  992. list_add_tail(&(request->request.list), &(musb_ep->req_list));
  993. /* it this is the head of the queue, start i/o ... */
  994. if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
  995. musb_ep_restart(musb, request);
  996. cleanup:
  997. spin_unlock_irqrestore(&musb->lock, lockflags);
  998. return status;
  999. }
  1000. static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
  1001. {
  1002. struct musb_ep *musb_ep = to_musb_ep(ep);
  1003. struct usb_request *r;
  1004. unsigned long flags;
  1005. int status = 0;
  1006. struct musb *musb = musb_ep->musb;
  1007. if (!ep || !request || to_musb_request(request)->ep != musb_ep)
  1008. return -EINVAL;
  1009. spin_lock_irqsave(&musb->lock, flags);
  1010. list_for_each_entry(r, &musb_ep->req_list, list) {
  1011. if (r == request)
  1012. break;
  1013. }
  1014. if (r != request) {
  1015. DBG(3, "request %p not queued to %s\n", request, ep->name);
  1016. status = -EINVAL;
  1017. goto done;
  1018. }
  1019. /* if the hardware doesn't have the request, easy ... */
  1020. if (musb_ep->req_list.next != &request->list || musb_ep->busy)
  1021. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1022. /* ... else abort the dma transfer ... */
  1023. else if (is_dma_capable() && musb_ep->dma) {
  1024. struct dma_controller *c = musb->dma_controller;
  1025. musb_ep_select(musb->mregs, musb_ep->current_epnum);
  1026. if (c->channel_abort)
  1027. status = c->channel_abort(musb_ep->dma);
  1028. else
  1029. status = -EBUSY;
  1030. if (status == 0)
  1031. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1032. } else {
  1033. /* NOTE: by sticking to easily tested hardware/driver states,
  1034. * we leave counting of in-flight packets imprecise.
  1035. */
  1036. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1037. }
  1038. done:
  1039. spin_unlock_irqrestore(&musb->lock, flags);
  1040. return status;
  1041. }
  1042. /*
  1043. * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
  1044. * data but will queue requests.
  1045. *
  1046. * exported to ep0 code
  1047. */
  1048. int musb_gadget_set_halt(struct usb_ep *ep, int value)
  1049. {
  1050. struct musb_ep *musb_ep = to_musb_ep(ep);
  1051. u8 epnum = musb_ep->current_epnum;
  1052. struct musb *musb = musb_ep->musb;
  1053. void __iomem *epio = musb->endpoints[epnum].regs;
  1054. void __iomem *mbase;
  1055. unsigned long flags;
  1056. u16 csr;
  1057. struct musb_request *request;
  1058. int status = 0;
  1059. if (!ep)
  1060. return -EINVAL;
  1061. mbase = musb->mregs;
  1062. spin_lock_irqsave(&musb->lock, flags);
  1063. if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
  1064. status = -EINVAL;
  1065. goto done;
  1066. }
  1067. musb_ep_select(mbase, epnum);
  1068. request = to_musb_request(next_request(musb_ep));
  1069. if (value) {
  1070. if (request) {
  1071. DBG(3, "request in progress, cannot halt %s\n",
  1072. ep->name);
  1073. status = -EAGAIN;
  1074. goto done;
  1075. }
  1076. /* Cannot portably stall with non-empty FIFO */
  1077. if (musb_ep->is_in) {
  1078. csr = musb_readw(epio, MUSB_TXCSR);
  1079. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  1080. DBG(3, "FIFO busy, cannot halt %s\n", ep->name);
  1081. status = -EAGAIN;
  1082. goto done;
  1083. }
  1084. }
  1085. } else
  1086. musb_ep->wedged = 0;
  1087. /* set/clear the stall and toggle bits */
  1088. DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
  1089. if (musb_ep->is_in) {
  1090. csr = musb_readw(epio, MUSB_TXCSR);
  1091. csr |= MUSB_TXCSR_P_WZC_BITS
  1092. | MUSB_TXCSR_CLRDATATOG;
  1093. if (value)
  1094. csr |= MUSB_TXCSR_P_SENDSTALL;
  1095. else
  1096. csr &= ~(MUSB_TXCSR_P_SENDSTALL
  1097. | MUSB_TXCSR_P_SENTSTALL);
  1098. csr &= ~MUSB_TXCSR_TXPKTRDY;
  1099. musb_writew(epio, MUSB_TXCSR, csr);
  1100. } else {
  1101. csr = musb_readw(epio, MUSB_RXCSR);
  1102. csr |= MUSB_RXCSR_P_WZC_BITS
  1103. | MUSB_RXCSR_FLUSHFIFO
  1104. | MUSB_RXCSR_CLRDATATOG;
  1105. if (value)
  1106. csr |= MUSB_RXCSR_P_SENDSTALL;
  1107. else
  1108. csr &= ~(MUSB_RXCSR_P_SENDSTALL
  1109. | MUSB_RXCSR_P_SENTSTALL);
  1110. musb_writew(epio, MUSB_RXCSR, csr);
  1111. }
  1112. /* maybe start the first request in the queue */
  1113. if (!musb_ep->busy && !value && request) {
  1114. DBG(3, "restarting the request\n");
  1115. musb_ep_restart(musb, request);
  1116. }
  1117. done:
  1118. spin_unlock_irqrestore(&musb->lock, flags);
  1119. return status;
  1120. }
  1121. /*
  1122. * Sets the halt feature with the clear requests ignored
  1123. */
  1124. int musb_gadget_set_wedge(struct usb_ep *ep)
  1125. {
  1126. struct musb_ep *musb_ep = to_musb_ep(ep);
  1127. if (!ep)
  1128. return -EINVAL;
  1129. musb_ep->wedged = 1;
  1130. return usb_ep_set_halt(ep);
  1131. }
  1132. static int musb_gadget_fifo_status(struct usb_ep *ep)
  1133. {
  1134. struct musb_ep *musb_ep = to_musb_ep(ep);
  1135. void __iomem *epio = musb_ep->hw_ep->regs;
  1136. int retval = -EINVAL;
  1137. if (musb_ep->desc && !musb_ep->is_in) {
  1138. struct musb *musb = musb_ep->musb;
  1139. int epnum = musb_ep->current_epnum;
  1140. void __iomem *mbase = musb->mregs;
  1141. unsigned long flags;
  1142. spin_lock_irqsave(&musb->lock, flags);
  1143. musb_ep_select(mbase, epnum);
  1144. /* FIXME return zero unless RXPKTRDY is set */
  1145. retval = musb_readw(epio, MUSB_RXCOUNT);
  1146. spin_unlock_irqrestore(&musb->lock, flags);
  1147. }
  1148. return retval;
  1149. }
  1150. static void musb_gadget_fifo_flush(struct usb_ep *ep)
  1151. {
  1152. struct musb_ep *musb_ep = to_musb_ep(ep);
  1153. struct musb *musb = musb_ep->musb;
  1154. u8 epnum = musb_ep->current_epnum;
  1155. void __iomem *epio = musb->endpoints[epnum].regs;
  1156. void __iomem *mbase;
  1157. unsigned long flags;
  1158. u16 csr, int_txe;
  1159. mbase = musb->mregs;
  1160. spin_lock_irqsave(&musb->lock, flags);
  1161. musb_ep_select(mbase, (u8) epnum);
  1162. /* disable interrupts */
  1163. int_txe = musb_readw(mbase, MUSB_INTRTXE);
  1164. musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
  1165. if (musb_ep->is_in) {
  1166. csr = musb_readw(epio, MUSB_TXCSR);
  1167. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  1168. csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
  1169. musb_writew(epio, MUSB_TXCSR, csr);
  1170. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  1171. musb_writew(epio, MUSB_TXCSR, csr);
  1172. }
  1173. } else {
  1174. csr = musb_readw(epio, MUSB_RXCSR);
  1175. csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
  1176. musb_writew(epio, MUSB_RXCSR, csr);
  1177. musb_writew(epio, MUSB_RXCSR, csr);
  1178. }
  1179. /* re-enable interrupt */
  1180. musb_writew(mbase, MUSB_INTRTXE, int_txe);
  1181. spin_unlock_irqrestore(&musb->lock, flags);
  1182. }
  1183. static const struct usb_ep_ops musb_ep_ops = {
  1184. .enable = musb_gadget_enable,
  1185. .disable = musb_gadget_disable,
  1186. .alloc_request = musb_alloc_request,
  1187. .free_request = musb_free_request,
  1188. .queue = musb_gadget_queue,
  1189. .dequeue = musb_gadget_dequeue,
  1190. .set_halt = musb_gadget_set_halt,
  1191. .set_wedge = musb_gadget_set_wedge,
  1192. .fifo_status = musb_gadget_fifo_status,
  1193. .fifo_flush = musb_gadget_fifo_flush
  1194. };
  1195. /* ----------------------------------------------------------------------- */
  1196. static int musb_gadget_get_frame(struct usb_gadget *gadget)
  1197. {
  1198. struct musb *musb = gadget_to_musb(gadget);
  1199. return (int)musb_readw(musb->mregs, MUSB_FRAME);
  1200. }
  1201. static int musb_gadget_wakeup(struct usb_gadget *gadget)
  1202. {
  1203. struct musb *musb = gadget_to_musb(gadget);
  1204. void __iomem *mregs = musb->mregs;
  1205. unsigned long flags;
  1206. int status = -EINVAL;
  1207. u8 power, devctl;
  1208. int retries;
  1209. spin_lock_irqsave(&musb->lock, flags);
  1210. switch (musb->xceiv->state) {
  1211. case OTG_STATE_B_PERIPHERAL:
  1212. /* NOTE: OTG state machine doesn't include B_SUSPENDED;
  1213. * that's part of the standard usb 1.1 state machine, and
  1214. * doesn't affect OTG transitions.
  1215. */
  1216. if (musb->may_wakeup && musb->is_suspended)
  1217. break;
  1218. goto done;
  1219. case OTG_STATE_B_IDLE:
  1220. /* Start SRP ... OTG not required. */
  1221. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1222. DBG(2, "Sending SRP: devctl: %02x\n", devctl);
  1223. devctl |= MUSB_DEVCTL_SESSION;
  1224. musb_writeb(mregs, MUSB_DEVCTL, devctl);
  1225. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1226. retries = 100;
  1227. while (!(devctl & MUSB_DEVCTL_SESSION)) {
  1228. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1229. if (retries-- < 1)
  1230. break;
  1231. }
  1232. retries = 10000;
  1233. while (devctl & MUSB_DEVCTL_SESSION) {
  1234. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1235. if (retries-- < 1)
  1236. break;
  1237. }
  1238. /* Block idling for at least 1s */
  1239. musb_platform_try_idle(musb,
  1240. jiffies + msecs_to_jiffies(1 * HZ));
  1241. status = 0;
  1242. goto done;
  1243. default:
  1244. DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
  1245. goto done;
  1246. }
  1247. status = 0;
  1248. power = musb_readb(mregs, MUSB_POWER);
  1249. power |= MUSB_POWER_RESUME;
  1250. musb_writeb(mregs, MUSB_POWER, power);
  1251. DBG(2, "issue wakeup\n");
  1252. /* FIXME do this next chunk in a timer callback, no udelay */
  1253. mdelay(2);
  1254. power = musb_readb(mregs, MUSB_POWER);
  1255. power &= ~MUSB_POWER_RESUME;
  1256. musb_writeb(mregs, MUSB_POWER, power);
  1257. done:
  1258. spin_unlock_irqrestore(&musb->lock, flags);
  1259. return status;
  1260. }
  1261. static int
  1262. musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
  1263. {
  1264. struct musb *musb = gadget_to_musb(gadget);
  1265. musb->is_self_powered = !!is_selfpowered;
  1266. return 0;
  1267. }
  1268. static void musb_pullup(struct musb *musb, int is_on)
  1269. {
  1270. u8 power;
  1271. power = musb_readb(musb->mregs, MUSB_POWER);
  1272. if (is_on)
  1273. power |= MUSB_POWER_SOFTCONN;
  1274. else
  1275. power &= ~MUSB_POWER_SOFTCONN;
  1276. /* FIXME if on, HdrcStart; if off, HdrcStop */
  1277. DBG(3, "gadget %s D+ pullup %s\n",
  1278. musb->gadget_driver->function, is_on ? "on" : "off");
  1279. musb_writeb(musb->mregs, MUSB_POWER, power);
  1280. }
  1281. #if 0
  1282. static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
  1283. {
  1284. DBG(2, "<= %s =>\n", __func__);
  1285. /*
  1286. * FIXME iff driver's softconnect flag is set (as it is during probe,
  1287. * though that can clear it), just musb_pullup().
  1288. */
  1289. return -EINVAL;
  1290. }
  1291. #endif
  1292. static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
  1293. {
  1294. struct musb *musb = gadget_to_musb(gadget);
  1295. if (!musb->xceiv->set_power)
  1296. return -EOPNOTSUPP;
  1297. return otg_set_power(musb->xceiv, mA);
  1298. }
  1299. static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1300. {
  1301. struct musb *musb = gadget_to_musb(gadget);
  1302. unsigned long flags;
  1303. is_on = !!is_on;
  1304. /* NOTE: this assumes we are sensing vbus; we'd rather
  1305. * not pullup unless the B-session is active.
  1306. */
  1307. spin_lock_irqsave(&musb->lock, flags);
  1308. if (is_on != musb->softconnect) {
  1309. musb->softconnect = is_on;
  1310. musb_pullup(musb, is_on);
  1311. }
  1312. spin_unlock_irqrestore(&musb->lock, flags);
  1313. return 0;
  1314. }
  1315. static const struct usb_gadget_ops musb_gadget_operations = {
  1316. .get_frame = musb_gadget_get_frame,
  1317. .wakeup = musb_gadget_wakeup,
  1318. .set_selfpowered = musb_gadget_set_self_powered,
  1319. /* .vbus_session = musb_gadget_vbus_session, */
  1320. .vbus_draw = musb_gadget_vbus_draw,
  1321. .pullup = musb_gadget_pullup,
  1322. };
  1323. /* ----------------------------------------------------------------------- */
  1324. /* Registration */
  1325. /* Only this registration code "knows" the rule (from USB standards)
  1326. * about there being only one external upstream port. It assumes
  1327. * all peripheral ports are external...
  1328. */
  1329. static struct musb *the_gadget;
  1330. static void musb_gadget_release(struct device *dev)
  1331. {
  1332. /* kref_put(WHAT) */
  1333. dev_dbg(dev, "%s\n", __func__);
  1334. }
  1335. static void __init
  1336. init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
  1337. {
  1338. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  1339. memset(ep, 0, sizeof *ep);
  1340. ep->current_epnum = epnum;
  1341. ep->musb = musb;
  1342. ep->hw_ep = hw_ep;
  1343. ep->is_in = is_in;
  1344. INIT_LIST_HEAD(&ep->req_list);
  1345. sprintf(ep->name, "ep%d%s", epnum,
  1346. (!epnum || hw_ep->is_shared_fifo) ? "" : (
  1347. is_in ? "in" : "out"));
  1348. ep->end_point.name = ep->name;
  1349. INIT_LIST_HEAD(&ep->end_point.ep_list);
  1350. if (!epnum) {
  1351. ep->end_point.maxpacket = 64;
  1352. ep->end_point.ops = &musb_g_ep0_ops;
  1353. musb->g.ep0 = &ep->end_point;
  1354. } else {
  1355. if (is_in)
  1356. ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
  1357. else
  1358. ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
  1359. ep->end_point.ops = &musb_ep_ops;
  1360. list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
  1361. }
  1362. }
  1363. /*
  1364. * Initialize the endpoints exposed to peripheral drivers, with backlinks
  1365. * to the rest of the driver state.
  1366. */
  1367. static inline void __init musb_g_init_endpoints(struct musb *musb)
  1368. {
  1369. u8 epnum;
  1370. struct musb_hw_ep *hw_ep;
  1371. unsigned count = 0;
  1372. /* intialize endpoint list just once */
  1373. INIT_LIST_HEAD(&(musb->g.ep_list));
  1374. for (epnum = 0, hw_ep = musb->endpoints;
  1375. epnum < musb->nr_endpoints;
  1376. epnum++, hw_ep++) {
  1377. if (hw_ep->is_shared_fifo /* || !epnum */) {
  1378. init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
  1379. count++;
  1380. } else {
  1381. if (hw_ep->max_packet_sz_tx) {
  1382. init_peripheral_ep(musb, &hw_ep->ep_in,
  1383. epnum, 1);
  1384. count++;
  1385. }
  1386. if (hw_ep->max_packet_sz_rx) {
  1387. init_peripheral_ep(musb, &hw_ep->ep_out,
  1388. epnum, 0);
  1389. count++;
  1390. }
  1391. }
  1392. }
  1393. }
  1394. /* called once during driver setup to initialize and link into
  1395. * the driver model; memory is zeroed.
  1396. */
  1397. int __init musb_gadget_setup(struct musb *musb)
  1398. {
  1399. int status;
  1400. /* REVISIT minor race: if (erroneously) setting up two
  1401. * musb peripherals at the same time, only the bus lock
  1402. * is probably held.
  1403. */
  1404. if (the_gadget)
  1405. return -EBUSY;
  1406. the_gadget = musb;
  1407. musb->g.ops = &musb_gadget_operations;
  1408. musb->g.is_dualspeed = 1;
  1409. musb->g.speed = USB_SPEED_UNKNOWN;
  1410. /* this "gadget" abstracts/virtualizes the controller */
  1411. dev_set_name(&musb->g.dev, "gadget");
  1412. musb->g.dev.parent = musb->controller;
  1413. musb->g.dev.dma_mask = musb->controller->dma_mask;
  1414. musb->g.dev.release = musb_gadget_release;
  1415. musb->g.name = musb_driver_name;
  1416. if (is_otg_enabled(musb))
  1417. musb->g.is_otg = 1;
  1418. musb_g_init_endpoints(musb);
  1419. musb->is_active = 0;
  1420. musb_platform_try_idle(musb, 0);
  1421. status = device_register(&musb->g.dev);
  1422. if (status != 0)
  1423. the_gadget = NULL;
  1424. return status;
  1425. }
  1426. void musb_gadget_cleanup(struct musb *musb)
  1427. {
  1428. if (musb != the_gadget)
  1429. return;
  1430. device_unregister(&musb->g.dev);
  1431. the_gadget = NULL;
  1432. }
  1433. /*
  1434. * Register the gadget driver. Used by gadget drivers when
  1435. * registering themselves with the controller.
  1436. *
  1437. * -EINVAL something went wrong (not driver)
  1438. * -EBUSY another gadget is already using the controller
  1439. * -ENOMEM no memeory to perform the operation
  1440. *
  1441. * @param driver the gadget driver
  1442. * @return <0 if error, 0 if everything is fine
  1443. */
  1444. int usb_gadget_register_driver(struct usb_gadget_driver *driver)
  1445. {
  1446. int retval;
  1447. unsigned long flags;
  1448. struct musb *musb = the_gadget;
  1449. if (!driver
  1450. || driver->speed != USB_SPEED_HIGH
  1451. || !driver->bind
  1452. || !driver->setup)
  1453. return -EINVAL;
  1454. /* driver must be initialized to support peripheral mode */
  1455. if (!musb || !(musb->board_mode == MUSB_OTG
  1456. || musb->board_mode != MUSB_OTG)) {
  1457. DBG(1, "%s, no dev??\n", __func__);
  1458. return -ENODEV;
  1459. }
  1460. DBG(3, "registering driver %s\n", driver->function);
  1461. spin_lock_irqsave(&musb->lock, flags);
  1462. if (musb->gadget_driver) {
  1463. DBG(1, "%s is already bound to %s\n",
  1464. musb_driver_name,
  1465. musb->gadget_driver->driver.name);
  1466. retval = -EBUSY;
  1467. } else {
  1468. musb->gadget_driver = driver;
  1469. musb->g.dev.driver = &driver->driver;
  1470. driver->driver.bus = NULL;
  1471. musb->softconnect = 1;
  1472. retval = 0;
  1473. }
  1474. spin_unlock_irqrestore(&musb->lock, flags);
  1475. if (retval == 0) {
  1476. retval = driver->bind(&musb->g);
  1477. if (retval != 0) {
  1478. DBG(3, "bind to driver %s failed --> %d\n",
  1479. driver->driver.name, retval);
  1480. musb->gadget_driver = NULL;
  1481. musb->g.dev.driver = NULL;
  1482. }
  1483. spin_lock_irqsave(&musb->lock, flags);
  1484. otg_set_peripheral(musb->xceiv, &musb->g);
  1485. musb->is_active = 1;
  1486. /* FIXME this ignores the softconnect flag. Drivers are
  1487. * allowed hold the peripheral inactive until for example
  1488. * userspace hooks up printer hardware or DSP codecs, so
  1489. * hosts only see fully functional devices.
  1490. */
  1491. if (!is_otg_enabled(musb))
  1492. musb_start(musb);
  1493. otg_set_peripheral(musb->xceiv, &musb->g);
  1494. spin_unlock_irqrestore(&musb->lock, flags);
  1495. if (is_otg_enabled(musb)) {
  1496. DBG(3, "OTG startup...\n");
  1497. /* REVISIT: funcall to other code, which also
  1498. * handles power budgeting ... this way also
  1499. * ensures HdrcStart is indirectly called.
  1500. */
  1501. retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
  1502. if (retval < 0) {
  1503. DBG(1, "add_hcd failed, %d\n", retval);
  1504. spin_lock_irqsave(&musb->lock, flags);
  1505. otg_set_peripheral(musb->xceiv, NULL);
  1506. musb->gadget_driver = NULL;
  1507. musb->g.dev.driver = NULL;
  1508. spin_unlock_irqrestore(&musb->lock, flags);
  1509. }
  1510. }
  1511. }
  1512. return retval;
  1513. }
  1514. EXPORT_SYMBOL(usb_gadget_register_driver);
  1515. static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
  1516. {
  1517. int i;
  1518. struct musb_hw_ep *hw_ep;
  1519. /* don't disconnect if it's not connected */
  1520. if (musb->g.speed == USB_SPEED_UNKNOWN)
  1521. driver = NULL;
  1522. else
  1523. musb->g.speed = USB_SPEED_UNKNOWN;
  1524. /* deactivate the hardware */
  1525. if (musb->softconnect) {
  1526. musb->softconnect = 0;
  1527. musb_pullup(musb, 0);
  1528. }
  1529. musb_stop(musb);
  1530. /* killing any outstanding requests will quiesce the driver;
  1531. * then report disconnect
  1532. */
  1533. if (driver) {
  1534. for (i = 0, hw_ep = musb->endpoints;
  1535. i < musb->nr_endpoints;
  1536. i++, hw_ep++) {
  1537. musb_ep_select(musb->mregs, i);
  1538. if (hw_ep->is_shared_fifo /* || !epnum */) {
  1539. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  1540. } else {
  1541. if (hw_ep->max_packet_sz_tx)
  1542. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  1543. if (hw_ep->max_packet_sz_rx)
  1544. nuke(&hw_ep->ep_out, -ESHUTDOWN);
  1545. }
  1546. }
  1547. spin_unlock(&musb->lock);
  1548. driver->disconnect(&musb->g);
  1549. spin_lock(&musb->lock);
  1550. }
  1551. }
  1552. /*
  1553. * Unregister the gadget driver. Used by gadget drivers when
  1554. * unregistering themselves from the controller.
  1555. *
  1556. * @param driver the gadget driver to unregister
  1557. */
  1558. int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
  1559. {
  1560. unsigned long flags;
  1561. int retval = 0;
  1562. struct musb *musb = the_gadget;
  1563. if (!driver || !driver->unbind || !musb)
  1564. return -EINVAL;
  1565. /* REVISIT always use otg_set_peripheral() here too;
  1566. * this needs to shut down the OTG engine.
  1567. */
  1568. spin_lock_irqsave(&musb->lock, flags);
  1569. #ifdef CONFIG_USB_MUSB_OTG
  1570. musb_hnp_stop(musb);
  1571. #endif
  1572. if (musb->gadget_driver == driver) {
  1573. (void) musb_gadget_vbus_draw(&musb->g, 0);
  1574. musb->xceiv->state = OTG_STATE_UNDEFINED;
  1575. stop_activity(musb, driver);
  1576. otg_set_peripheral(musb->xceiv, NULL);
  1577. DBG(3, "unregistering driver %s\n", driver->function);
  1578. spin_unlock_irqrestore(&musb->lock, flags);
  1579. driver->unbind(&musb->g);
  1580. spin_lock_irqsave(&musb->lock, flags);
  1581. musb->gadget_driver = NULL;
  1582. musb->g.dev.driver = NULL;
  1583. musb->is_active = 0;
  1584. musb_platform_try_idle(musb, 0);
  1585. } else
  1586. retval = -EINVAL;
  1587. spin_unlock_irqrestore(&musb->lock, flags);
  1588. if (is_otg_enabled(musb) && retval == 0) {
  1589. usb_remove_hcd(musb_to_hcd(musb));
  1590. /* FIXME we need to be able to register another
  1591. * gadget driver here and have everything work;
  1592. * that currently misbehaves.
  1593. */
  1594. }
  1595. return retval;
  1596. }
  1597. EXPORT_SYMBOL(usb_gadget_unregister_driver);
  1598. /* ----------------------------------------------------------------------- */
  1599. /* lifecycle operations called through plat_uds.c */
  1600. void musb_g_resume(struct musb *musb)
  1601. {
  1602. musb->is_suspended = 0;
  1603. switch (musb->xceiv->state) {
  1604. case OTG_STATE_B_IDLE:
  1605. break;
  1606. case OTG_STATE_B_WAIT_ACON:
  1607. case OTG_STATE_B_PERIPHERAL:
  1608. musb->is_active = 1;
  1609. if (musb->gadget_driver && musb->gadget_driver->resume) {
  1610. spin_unlock(&musb->lock);
  1611. musb->gadget_driver->resume(&musb->g);
  1612. spin_lock(&musb->lock);
  1613. }
  1614. break;
  1615. default:
  1616. WARNING("unhandled RESUME transition (%s)\n",
  1617. otg_state_string(musb));
  1618. }
  1619. }
  1620. /* called when SOF packets stop for 3+ msec */
  1621. void musb_g_suspend(struct musb *musb)
  1622. {
  1623. u8 devctl;
  1624. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1625. DBG(3, "devctl %02x\n", devctl);
  1626. switch (musb->xceiv->state) {
  1627. case OTG_STATE_B_IDLE:
  1628. if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
  1629. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  1630. break;
  1631. case OTG_STATE_B_PERIPHERAL:
  1632. musb->is_suspended = 1;
  1633. if (musb->gadget_driver && musb->gadget_driver->suspend) {
  1634. spin_unlock(&musb->lock);
  1635. musb->gadget_driver->suspend(&musb->g);
  1636. spin_lock(&musb->lock);
  1637. }
  1638. break;
  1639. default:
  1640. /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
  1641. * A_PERIPHERAL may need care too
  1642. */
  1643. WARNING("unhandled SUSPEND transition (%s)\n",
  1644. otg_state_string(musb));
  1645. }
  1646. }
  1647. /* Called during SRP */
  1648. void musb_g_wakeup(struct musb *musb)
  1649. {
  1650. musb_gadget_wakeup(&musb->g);
  1651. }
  1652. /* called when VBUS drops below session threshold, and in other cases */
  1653. void musb_g_disconnect(struct musb *musb)
  1654. {
  1655. void __iomem *mregs = musb->mregs;
  1656. u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
  1657. DBG(3, "devctl %02x\n", devctl);
  1658. /* clear HR */
  1659. musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
  1660. /* don't draw vbus until new b-default session */
  1661. (void) musb_gadget_vbus_draw(&musb->g, 0);
  1662. musb->g.speed = USB_SPEED_UNKNOWN;
  1663. if (musb->gadget_driver && musb->gadget_driver->disconnect) {
  1664. spin_unlock(&musb->lock);
  1665. musb->gadget_driver->disconnect(&musb->g);
  1666. spin_lock(&musb->lock);
  1667. }
  1668. switch (musb->xceiv->state) {
  1669. default:
  1670. #ifdef CONFIG_USB_MUSB_OTG
  1671. DBG(2, "Unhandled disconnect %s, setting a_idle\n",
  1672. otg_state_string(musb));
  1673. musb->xceiv->state = OTG_STATE_A_IDLE;
  1674. MUSB_HST_MODE(musb);
  1675. break;
  1676. case OTG_STATE_A_PERIPHERAL:
  1677. musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
  1678. MUSB_HST_MODE(musb);
  1679. break;
  1680. case OTG_STATE_B_WAIT_ACON:
  1681. case OTG_STATE_B_HOST:
  1682. #endif
  1683. case OTG_STATE_B_PERIPHERAL:
  1684. case OTG_STATE_B_IDLE:
  1685. musb->xceiv->state = OTG_STATE_B_IDLE;
  1686. break;
  1687. case OTG_STATE_B_SRP_INIT:
  1688. break;
  1689. }
  1690. musb->is_active = 0;
  1691. }
  1692. void musb_g_reset(struct musb *musb)
  1693. __releases(musb->lock)
  1694. __acquires(musb->lock)
  1695. {
  1696. void __iomem *mbase = musb->mregs;
  1697. u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
  1698. u8 power;
  1699. DBG(3, "<== %s addr=%x driver '%s'\n",
  1700. (devctl & MUSB_DEVCTL_BDEVICE)
  1701. ? "B-Device" : "A-Device",
  1702. musb_readb(mbase, MUSB_FADDR),
  1703. musb->gadget_driver
  1704. ? musb->gadget_driver->driver.name
  1705. : NULL
  1706. );
  1707. /* report disconnect, if we didn't already (flushing EP state) */
  1708. if (musb->g.speed != USB_SPEED_UNKNOWN)
  1709. musb_g_disconnect(musb);
  1710. /* clear HR */
  1711. else if (devctl & MUSB_DEVCTL_HR)
  1712. musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
  1713. /* what speed did we negotiate? */
  1714. power = musb_readb(mbase, MUSB_POWER);
  1715. musb->g.speed = (power & MUSB_POWER_HSMODE)
  1716. ? USB_SPEED_HIGH : USB_SPEED_FULL;
  1717. /* start in USB_STATE_DEFAULT */
  1718. musb->is_active = 1;
  1719. musb->is_suspended = 0;
  1720. MUSB_DEV_MODE(musb);
  1721. musb->address = 0;
  1722. musb->ep0_state = MUSB_EP0_STAGE_SETUP;
  1723. musb->may_wakeup = 0;
  1724. musb->g.b_hnp_enable = 0;
  1725. musb->g.a_alt_hnp_support = 0;
  1726. musb->g.a_hnp_support = 0;
  1727. /* Normal reset, as B-Device;
  1728. * or else after HNP, as A-Device
  1729. */
  1730. if (devctl & MUSB_DEVCTL_BDEVICE) {
  1731. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  1732. musb->g.is_a_peripheral = 0;
  1733. } else if (is_otg_enabled(musb)) {
  1734. musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
  1735. musb->g.is_a_peripheral = 1;
  1736. } else
  1737. WARN_ON(1);
  1738. /* start with default limits on VBUS power draw */
  1739. (void) musb_gadget_vbus_draw(&musb->g,
  1740. is_otg_enabled(musb) ? 8 : 100);
  1741. }