musb_host.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338
  1. /*
  2. * MUSB OTG driver host support
  3. *
  4. * Copyright 2005 Mentor Graphics Corporation
  5. * Copyright (C) 2005-2006 by Texas Instruments
  6. * Copyright (C) 2006-2007 Nokia Corporation
  7. * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21. * 02110-1301 USA
  22. *
  23. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  26. * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. */
  35. #include <linux/module.h>
  36. #include <linux/kernel.h>
  37. #include <linux/delay.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/errno.h>
  41. #include <linux/init.h>
  42. #include <linux/list.h>
  43. #include "musb_core.h"
  44. #include "musb_host.h"
  45. /* MUSB HOST status 22-mar-2006
  46. *
  47. * - There's still lots of partial code duplication for fault paths, so
  48. * they aren't handled as consistently as they need to be.
  49. *
  50. * - PIO mostly behaved when last tested.
  51. * + including ep0, with all usbtest cases 9, 10
  52. * + usbtest 14 (ep0out) doesn't seem to run at all
  53. * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
  54. * configurations, but otherwise double buffering passes basic tests.
  55. * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
  56. *
  57. * - DMA (CPPI) ... partially behaves, not currently recommended
  58. * + about 1/15 the speed of typical EHCI implementations (PCI)
  59. * + RX, all too often reqpkt seems to misbehave after tx
  60. * + TX, no known issues (other than evident silicon issue)
  61. *
  62. * - DMA (Mentor/OMAP) ...has at least toggle update problems
  63. *
  64. * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
  65. * starvation ... nothing yet for TX, interrupt, or bulk.
  66. *
  67. * - Not tested with HNP, but some SRP paths seem to behave.
  68. *
  69. * NOTE 24-August-2006:
  70. *
  71. * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
  72. * extra endpoint for periodic use enabling hub + keybd + mouse. That
  73. * mostly works, except that with "usbnet" it's easy to trigger cases
  74. * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
  75. * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
  76. * although ARP RX wins. (That test was done with a full speed link.)
  77. */
  78. /*
  79. * NOTE on endpoint usage:
  80. *
  81. * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
  82. * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
  83. * (Yes, bulk _could_ use more of the endpoints than that, and would even
  84. * benefit from it.)
  85. *
  86. * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
  87. * So far that scheduling is both dumb and optimistic: the endpoint will be
  88. * "claimed" until its software queue is no longer refilled. No multiplexing
  89. * of transfers between endpoints, or anything clever.
  90. */
  91. static void musb_ep_program(struct musb *musb, u8 epnum,
  92. struct urb *urb, int is_out,
  93. u8 *buf, u32 offset, u32 len);
  94. /*
  95. * Clear TX fifo. Needed to avoid BABBLE errors.
  96. */
  97. static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
  98. {
  99. void __iomem *epio = ep->regs;
  100. u16 csr;
  101. u16 lastcsr = 0;
  102. int retries = 1000;
  103. csr = musb_readw(epio, MUSB_TXCSR);
  104. while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  105. if (csr != lastcsr)
  106. DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
  107. lastcsr = csr;
  108. csr |= MUSB_TXCSR_FLUSHFIFO;
  109. musb_writew(epio, MUSB_TXCSR, csr);
  110. csr = musb_readw(epio, MUSB_TXCSR);
  111. if (WARN(retries-- < 1,
  112. "Could not flush host TX%d fifo: csr: %04x\n",
  113. ep->epnum, csr))
  114. return;
  115. mdelay(1);
  116. }
  117. }
  118. static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
  119. {
  120. void __iomem *epio = ep->regs;
  121. u16 csr;
  122. int retries = 5;
  123. /* scrub any data left in the fifo */
  124. do {
  125. csr = musb_readw(epio, MUSB_TXCSR);
  126. if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
  127. break;
  128. musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
  129. csr = musb_readw(epio, MUSB_TXCSR);
  130. udelay(10);
  131. } while (--retries);
  132. WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
  133. ep->epnum, csr);
  134. /* and reset for the next transfer */
  135. musb_writew(epio, MUSB_TXCSR, 0);
  136. }
  137. /*
  138. * Start transmit. Caller is responsible for locking shared resources.
  139. * musb must be locked.
  140. */
  141. static inline void musb_h_tx_start(struct musb_hw_ep *ep)
  142. {
  143. u16 txcsr;
  144. /* NOTE: no locks here; caller should lock and select EP */
  145. if (ep->epnum) {
  146. txcsr = musb_readw(ep->regs, MUSB_TXCSR);
  147. txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
  148. musb_writew(ep->regs, MUSB_TXCSR, txcsr);
  149. } else {
  150. txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
  151. musb_writew(ep->regs, MUSB_CSR0, txcsr);
  152. }
  153. }
  154. static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
  155. {
  156. u16 txcsr;
  157. /* NOTE: no locks here; caller should lock and select EP */
  158. txcsr = musb_readw(ep->regs, MUSB_TXCSR);
  159. txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
  160. if (is_cppi_enabled())
  161. txcsr |= MUSB_TXCSR_DMAMODE;
  162. musb_writew(ep->regs, MUSB_TXCSR, txcsr);
  163. }
  164. static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
  165. {
  166. if (is_in != 0 || ep->is_shared_fifo)
  167. ep->in_qh = qh;
  168. if (is_in == 0 || ep->is_shared_fifo)
  169. ep->out_qh = qh;
  170. }
  171. static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
  172. {
  173. return is_in ? ep->in_qh : ep->out_qh;
  174. }
  175. /*
  176. * Start the URB at the front of an endpoint's queue
  177. * end must be claimed from the caller.
  178. *
  179. * Context: controller locked, irqs blocked
  180. */
  181. static void
  182. musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
  183. {
  184. u16 frame;
  185. u32 len;
  186. void __iomem *mbase = musb->mregs;
  187. struct urb *urb = next_urb(qh);
  188. void *buf = urb->transfer_buffer;
  189. u32 offset = 0;
  190. struct musb_hw_ep *hw_ep = qh->hw_ep;
  191. unsigned pipe = urb->pipe;
  192. u8 address = usb_pipedevice(pipe);
  193. int epnum = hw_ep->epnum;
  194. /* initialize software qh state */
  195. qh->offset = 0;
  196. qh->segsize = 0;
  197. /* gather right source of data */
  198. switch (qh->type) {
  199. case USB_ENDPOINT_XFER_CONTROL:
  200. /* control transfers always start with SETUP */
  201. is_in = 0;
  202. musb->ep0_stage = MUSB_EP0_START;
  203. buf = urb->setup_packet;
  204. len = 8;
  205. break;
  206. case USB_ENDPOINT_XFER_ISOC:
  207. qh->iso_idx = 0;
  208. qh->frame = 0;
  209. offset = urb->iso_frame_desc[0].offset;
  210. len = urb->iso_frame_desc[0].length;
  211. break;
  212. default: /* bulk, interrupt */
  213. /* actual_length may be nonzero on retry paths */
  214. buf = urb->transfer_buffer + urb->actual_length;
  215. len = urb->transfer_buffer_length - urb->actual_length;
  216. }
  217. DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
  218. qh, urb, address, qh->epnum,
  219. is_in ? "in" : "out",
  220. ({char *s; switch (qh->type) {
  221. case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
  222. case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
  223. case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
  224. default: s = "-intr"; break;
  225. }; s; }),
  226. epnum, buf + offset, len);
  227. /* Configure endpoint */
  228. musb_ep_set_qh(hw_ep, is_in, qh);
  229. musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
  230. /* transmit may have more work: start it when it is time */
  231. if (is_in)
  232. return;
  233. /* determine if the time is right for a periodic transfer */
  234. switch (qh->type) {
  235. case USB_ENDPOINT_XFER_ISOC:
  236. case USB_ENDPOINT_XFER_INT:
  237. DBG(3, "check whether there's still time for periodic Tx\n");
  238. frame = musb_readw(mbase, MUSB_FRAME);
  239. /* FIXME this doesn't implement that scheduling policy ...
  240. * or handle framecounter wrapping
  241. */
  242. if ((urb->transfer_flags & URB_ISO_ASAP)
  243. || (frame >= urb->start_frame)) {
  244. /* REVISIT the SOF irq handler shouldn't duplicate
  245. * this code; and we don't init urb->start_frame...
  246. */
  247. qh->frame = 0;
  248. goto start;
  249. } else {
  250. qh->frame = urb->start_frame;
  251. /* enable SOF interrupt so we can count down */
  252. DBG(1, "SOF for %d\n", epnum);
  253. #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
  254. musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
  255. #endif
  256. }
  257. break;
  258. default:
  259. start:
  260. DBG(4, "Start TX%d %s\n", epnum,
  261. hw_ep->tx_channel ? "dma" : "pio");
  262. if (!hw_ep->tx_channel)
  263. musb_h_tx_start(hw_ep);
  264. else if (is_cppi_enabled() || tusb_dma_omap())
  265. musb_h_tx_dma_start(hw_ep);
  266. }
  267. }
  268. /* Context: caller owns controller lock, IRQs are blocked */
  269. static void musb_giveback(struct musb *musb, struct urb *urb, int status)
  270. __releases(musb->lock)
  271. __acquires(musb->lock)
  272. {
  273. DBG(({ int level; switch (status) {
  274. case 0:
  275. level = 4;
  276. break;
  277. /* common/boring faults */
  278. case -EREMOTEIO:
  279. case -ESHUTDOWN:
  280. case -ECONNRESET:
  281. case -EPIPE:
  282. level = 3;
  283. break;
  284. default:
  285. level = 2;
  286. break;
  287. }; level; }),
  288. "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
  289. urb, urb->complete, status,
  290. usb_pipedevice(urb->pipe),
  291. usb_pipeendpoint(urb->pipe),
  292. usb_pipein(urb->pipe) ? "in" : "out",
  293. urb->actual_length, urb->transfer_buffer_length
  294. );
  295. usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
  296. spin_unlock(&musb->lock);
  297. usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
  298. spin_lock(&musb->lock);
  299. }
  300. /* For bulk/interrupt endpoints only */
  301. static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
  302. struct urb *urb)
  303. {
  304. void __iomem *epio = qh->hw_ep->regs;
  305. u16 csr;
  306. /*
  307. * FIXME: the current Mentor DMA code seems to have
  308. * problems getting toggle correct.
  309. */
  310. if (is_in)
  311. csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
  312. else
  313. csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
  314. usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
  315. }
  316. /*
  317. * Advance this hardware endpoint's queue, completing the specified URB and
  318. * advancing to either the next URB queued to that qh, or else invalidating
  319. * that qh and advancing to the next qh scheduled after the current one.
  320. *
  321. * Context: caller owns controller lock, IRQs are blocked
  322. */
  323. static void musb_advance_schedule(struct musb *musb, struct urb *urb,
  324. struct musb_hw_ep *hw_ep, int is_in)
  325. {
  326. struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
  327. struct musb_hw_ep *ep = qh->hw_ep;
  328. int ready = qh->is_ready;
  329. int status;
  330. status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
  331. /* save toggle eagerly, for paranoia */
  332. switch (qh->type) {
  333. case USB_ENDPOINT_XFER_BULK:
  334. case USB_ENDPOINT_XFER_INT:
  335. musb_save_toggle(qh, is_in, urb);
  336. break;
  337. case USB_ENDPOINT_XFER_ISOC:
  338. if (status == 0 && urb->error_count)
  339. status = -EXDEV;
  340. break;
  341. }
  342. qh->is_ready = 0;
  343. musb_giveback(musb, urb, status);
  344. qh->is_ready = ready;
  345. /* reclaim resources (and bandwidth) ASAP; deschedule it, and
  346. * invalidate qh as soon as list_empty(&hep->urb_list)
  347. */
  348. if (list_empty(&qh->hep->urb_list)) {
  349. struct list_head *head;
  350. if (is_in)
  351. ep->rx_reinit = 1;
  352. else
  353. ep->tx_reinit = 1;
  354. /* Clobber old pointers to this qh */
  355. musb_ep_set_qh(ep, is_in, NULL);
  356. qh->hep->hcpriv = NULL;
  357. switch (qh->type) {
  358. case USB_ENDPOINT_XFER_CONTROL:
  359. case USB_ENDPOINT_XFER_BULK:
  360. /* fifo policy for these lists, except that NAKing
  361. * should rotate a qh to the end (for fairness).
  362. */
  363. if (qh->mux == 1) {
  364. head = qh->ring.prev;
  365. list_del(&qh->ring);
  366. kfree(qh);
  367. qh = first_qh(head);
  368. break;
  369. }
  370. case USB_ENDPOINT_XFER_ISOC:
  371. case USB_ENDPOINT_XFER_INT:
  372. /* this is where periodic bandwidth should be
  373. * de-allocated if it's tracked and allocated;
  374. * and where we'd update the schedule tree...
  375. */
  376. kfree(qh);
  377. qh = NULL;
  378. break;
  379. }
  380. }
  381. if (qh != NULL && qh->is_ready) {
  382. DBG(4, "... next ep%d %cX urb %p\n",
  383. hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
  384. musb_start_urb(musb, is_in, qh);
  385. }
  386. }
  387. static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
  388. {
  389. /* we don't want fifo to fill itself again;
  390. * ignore dma (various models),
  391. * leave toggle alone (may not have been saved yet)
  392. */
  393. csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
  394. csr &= ~(MUSB_RXCSR_H_REQPKT
  395. | MUSB_RXCSR_H_AUTOREQ
  396. | MUSB_RXCSR_AUTOCLEAR);
  397. /* write 2x to allow double buffering */
  398. musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  399. musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  400. /* flush writebuffer */
  401. return musb_readw(hw_ep->regs, MUSB_RXCSR);
  402. }
  403. /*
  404. * PIO RX for a packet (or part of it).
  405. */
  406. static bool
  407. musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
  408. {
  409. u16 rx_count;
  410. u8 *buf;
  411. u16 csr;
  412. bool done = false;
  413. u32 length;
  414. int do_flush = 0;
  415. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  416. void __iomem *epio = hw_ep->regs;
  417. struct musb_qh *qh = hw_ep->in_qh;
  418. int pipe = urb->pipe;
  419. void *buffer = urb->transfer_buffer;
  420. /* musb_ep_select(mbase, epnum); */
  421. rx_count = musb_readw(epio, MUSB_RXCOUNT);
  422. DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
  423. urb->transfer_buffer, qh->offset,
  424. urb->transfer_buffer_length);
  425. /* unload FIFO */
  426. if (usb_pipeisoc(pipe)) {
  427. int status = 0;
  428. struct usb_iso_packet_descriptor *d;
  429. if (iso_err) {
  430. status = -EILSEQ;
  431. urb->error_count++;
  432. }
  433. d = urb->iso_frame_desc + qh->iso_idx;
  434. buf = buffer + d->offset;
  435. length = d->length;
  436. if (rx_count > length) {
  437. if (status == 0) {
  438. status = -EOVERFLOW;
  439. urb->error_count++;
  440. }
  441. DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
  442. do_flush = 1;
  443. } else
  444. length = rx_count;
  445. urb->actual_length += length;
  446. d->actual_length = length;
  447. d->status = status;
  448. /* see if we are done */
  449. done = (++qh->iso_idx >= urb->number_of_packets);
  450. } else {
  451. /* non-isoch */
  452. buf = buffer + qh->offset;
  453. length = urb->transfer_buffer_length - qh->offset;
  454. if (rx_count > length) {
  455. if (urb->status == -EINPROGRESS)
  456. urb->status = -EOVERFLOW;
  457. DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
  458. do_flush = 1;
  459. } else
  460. length = rx_count;
  461. urb->actual_length += length;
  462. qh->offset += length;
  463. /* see if we are done */
  464. done = (urb->actual_length == urb->transfer_buffer_length)
  465. || (rx_count < qh->maxpacket)
  466. || (urb->status != -EINPROGRESS);
  467. if (done
  468. && (urb->status == -EINPROGRESS)
  469. && (urb->transfer_flags & URB_SHORT_NOT_OK)
  470. && (urb->actual_length
  471. < urb->transfer_buffer_length))
  472. urb->status = -EREMOTEIO;
  473. }
  474. musb_read_fifo(hw_ep, length, buf);
  475. csr = musb_readw(epio, MUSB_RXCSR);
  476. csr |= MUSB_RXCSR_H_WZC_BITS;
  477. if (unlikely(do_flush))
  478. musb_h_flush_rxfifo(hw_ep, csr);
  479. else {
  480. /* REVISIT this assumes AUTOCLEAR is never set */
  481. csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
  482. if (!done)
  483. csr |= MUSB_RXCSR_H_REQPKT;
  484. musb_writew(epio, MUSB_RXCSR, csr);
  485. }
  486. return done;
  487. }
  488. /* we don't always need to reinit a given side of an endpoint...
  489. * when we do, use tx/rx reinit routine and then construct a new CSR
  490. * to address data toggle, NYET, and DMA or PIO.
  491. *
  492. * it's possible that driver bugs (especially for DMA) or aborting a
  493. * transfer might have left the endpoint busier than it should be.
  494. * the busy/not-empty tests are basically paranoia.
  495. */
  496. static void
  497. musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
  498. {
  499. u16 csr;
  500. /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
  501. * That always uses tx_reinit since ep0 repurposes TX register
  502. * offsets; the initial SETUP packet is also a kind of OUT.
  503. */
  504. /* if programmed for Tx, put it in RX mode */
  505. if (ep->is_shared_fifo) {
  506. csr = musb_readw(ep->regs, MUSB_TXCSR);
  507. if (csr & MUSB_TXCSR_MODE) {
  508. musb_h_tx_flush_fifo(ep);
  509. csr = musb_readw(ep->regs, MUSB_TXCSR);
  510. musb_writew(ep->regs, MUSB_TXCSR,
  511. csr | MUSB_TXCSR_FRCDATATOG);
  512. }
  513. /*
  514. * Clear the MODE bit (and everything else) to enable Rx.
  515. * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
  516. */
  517. if (csr & MUSB_TXCSR_DMAMODE)
  518. musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
  519. musb_writew(ep->regs, MUSB_TXCSR, 0);
  520. /* scrub all previous state, clearing toggle */
  521. } else {
  522. csr = musb_readw(ep->regs, MUSB_RXCSR);
  523. if (csr & MUSB_RXCSR_RXPKTRDY)
  524. WARNING("rx%d, packet/%d ready?\n", ep->epnum,
  525. musb_readw(ep->regs, MUSB_RXCOUNT));
  526. musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
  527. }
  528. /* target addr and (for multipoint) hub addr/port */
  529. if (musb->is_multipoint) {
  530. musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
  531. musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
  532. musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
  533. } else
  534. musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
  535. /* protocol/endpoint, interval/NAKlimit, i/o size */
  536. musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
  537. musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
  538. /* NOTE: bulk combining rewrites high bits of maxpacket */
  539. /* Set RXMAXP with the FIFO size of the endpoint
  540. * to disable double buffer mode.
  541. */
  542. if (musb->hwvers < MUSB_HWVERS_2000)
  543. musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
  544. else
  545. musb_writew(ep->regs, MUSB_RXMAXP,
  546. qh->maxpacket | ((qh->hb_mult - 1) << 11));
  547. ep->rx_reinit = 0;
  548. }
  549. static bool musb_tx_dma_program(struct dma_controller *dma,
  550. struct musb_hw_ep *hw_ep, struct musb_qh *qh,
  551. struct urb *urb, u32 offset, u32 length)
  552. {
  553. struct dma_channel *channel = hw_ep->tx_channel;
  554. void __iomem *epio = hw_ep->regs;
  555. u16 pkt_size = qh->maxpacket;
  556. u16 csr;
  557. u8 mode;
  558. #ifdef CONFIG_USB_INVENTRA_DMA
  559. if (length > channel->max_len)
  560. length = channel->max_len;
  561. csr = musb_readw(epio, MUSB_TXCSR);
  562. if (length > pkt_size) {
  563. mode = 1;
  564. csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
  565. /* autoset shouldn't be set in high bandwidth */
  566. if (qh->hb_mult == 1)
  567. csr |= MUSB_TXCSR_AUTOSET;
  568. } else {
  569. mode = 0;
  570. csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
  571. csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
  572. }
  573. channel->desired_mode = mode;
  574. musb_writew(epio, MUSB_TXCSR, csr);
  575. #else
  576. if (!is_cppi_enabled() && !tusb_dma_omap())
  577. return false;
  578. channel->actual_len = 0;
  579. /*
  580. * TX uses "RNDIS" mode automatically but needs help
  581. * to identify the zero-length-final-packet case.
  582. */
  583. mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
  584. #endif
  585. qh->segsize = length;
  586. /*
  587. * Ensure the data reaches to main memory before starting
  588. * DMA transfer
  589. */
  590. wmb();
  591. if (!dma->channel_program(channel, pkt_size, mode,
  592. urb->transfer_dma + offset, length)) {
  593. dma->channel_release(channel);
  594. hw_ep->tx_channel = NULL;
  595. csr = musb_readw(epio, MUSB_TXCSR);
  596. csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
  597. musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
  598. return false;
  599. }
  600. return true;
  601. }
  602. /*
  603. * Program an HDRC endpoint as per the given URB
  604. * Context: irqs blocked, controller lock held
  605. */
  606. static void musb_ep_program(struct musb *musb, u8 epnum,
  607. struct urb *urb, int is_out,
  608. u8 *buf, u32 offset, u32 len)
  609. {
  610. struct dma_controller *dma_controller;
  611. struct dma_channel *dma_channel;
  612. u8 dma_ok;
  613. void __iomem *mbase = musb->mregs;
  614. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  615. void __iomem *epio = hw_ep->regs;
  616. struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
  617. u16 packet_sz = qh->maxpacket;
  618. DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
  619. "h_addr%02x h_port%02x bytes %d\n",
  620. is_out ? "-->" : "<--",
  621. epnum, urb, urb->dev->speed,
  622. qh->addr_reg, qh->epnum, is_out ? "out" : "in",
  623. qh->h_addr_reg, qh->h_port_reg,
  624. len);
  625. musb_ep_select(mbase, epnum);
  626. /* candidate for DMA? */
  627. dma_controller = musb->dma_controller;
  628. if (is_dma_capable() && epnum && dma_controller) {
  629. dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
  630. if (!dma_channel) {
  631. dma_channel = dma_controller->channel_alloc(
  632. dma_controller, hw_ep, is_out);
  633. if (is_out)
  634. hw_ep->tx_channel = dma_channel;
  635. else
  636. hw_ep->rx_channel = dma_channel;
  637. }
  638. } else
  639. dma_channel = NULL;
  640. /* make sure we clear DMAEnab, autoSet bits from previous run */
  641. /* OUT/transmit/EP0 or IN/receive? */
  642. if (is_out) {
  643. u16 csr;
  644. u16 int_txe;
  645. u16 load_count;
  646. csr = musb_readw(epio, MUSB_TXCSR);
  647. /* disable interrupt in case we flush */
  648. int_txe = musb_readw(mbase, MUSB_INTRTXE);
  649. musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
  650. /* general endpoint setup */
  651. if (epnum) {
  652. /* flush all old state, set default */
  653. musb_h_tx_flush_fifo(hw_ep);
  654. /*
  655. * We must not clear the DMAMODE bit before or in
  656. * the same cycle with the DMAENAB bit, so we clear
  657. * the latter first...
  658. */
  659. csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
  660. | MUSB_TXCSR_AUTOSET
  661. | MUSB_TXCSR_DMAENAB
  662. | MUSB_TXCSR_FRCDATATOG
  663. | MUSB_TXCSR_H_RXSTALL
  664. | MUSB_TXCSR_H_ERROR
  665. | MUSB_TXCSR_TXPKTRDY
  666. );
  667. csr |= MUSB_TXCSR_MODE;
  668. if (usb_gettoggle(urb->dev, qh->epnum, 1))
  669. csr |= MUSB_TXCSR_H_WR_DATATOGGLE
  670. | MUSB_TXCSR_H_DATATOGGLE;
  671. else
  672. csr |= MUSB_TXCSR_CLRDATATOG;
  673. musb_writew(epio, MUSB_TXCSR, csr);
  674. /* REVISIT may need to clear FLUSHFIFO ... */
  675. csr &= ~MUSB_TXCSR_DMAMODE;
  676. musb_writew(epio, MUSB_TXCSR, csr);
  677. csr = musb_readw(epio, MUSB_TXCSR);
  678. } else {
  679. /* endpoint 0: just flush */
  680. musb_h_ep0_flush_fifo(hw_ep);
  681. }
  682. /* target addr and (for multipoint) hub addr/port */
  683. if (musb->is_multipoint) {
  684. musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
  685. musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
  686. musb_write_txhubport(mbase, epnum, qh->h_port_reg);
  687. /* FIXME if !epnum, do the same for RX ... */
  688. } else
  689. musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
  690. /* protocol/endpoint/interval/NAKlimit */
  691. if (epnum) {
  692. musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
  693. if (can_bulk_split(musb, qh->type))
  694. musb_writew(epio, MUSB_TXMAXP,
  695. packet_sz
  696. | ((hw_ep->max_packet_sz_tx /
  697. packet_sz) - 1) << 11);
  698. else
  699. musb_writew(epio, MUSB_TXMAXP,
  700. packet_sz);
  701. musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
  702. } else {
  703. musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
  704. if (musb->is_multipoint)
  705. musb_writeb(epio, MUSB_TYPE0,
  706. qh->type_reg);
  707. }
  708. if (can_bulk_split(musb, qh->type))
  709. load_count = min((u32) hw_ep->max_packet_sz_tx,
  710. len);
  711. else
  712. load_count = min((u32) packet_sz, len);
  713. if (dma_channel && musb_tx_dma_program(dma_controller,
  714. hw_ep, qh, urb, offset, len))
  715. load_count = 0;
  716. if (load_count) {
  717. /* PIO to load FIFO */
  718. qh->segsize = load_count;
  719. musb_write_fifo(hw_ep, load_count, buf);
  720. }
  721. /* re-enable interrupt */
  722. musb_writew(mbase, MUSB_INTRTXE, int_txe);
  723. /* IN/receive */
  724. } else {
  725. u16 csr;
  726. if (hw_ep->rx_reinit) {
  727. musb_rx_reinit(musb, qh, hw_ep);
  728. /* init new state: toggle and NYET, maybe DMA later */
  729. if (usb_gettoggle(urb->dev, qh->epnum, 0))
  730. csr = MUSB_RXCSR_H_WR_DATATOGGLE
  731. | MUSB_RXCSR_H_DATATOGGLE;
  732. else
  733. csr = 0;
  734. if (qh->type == USB_ENDPOINT_XFER_INT)
  735. csr |= MUSB_RXCSR_DISNYET;
  736. } else {
  737. csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  738. if (csr & (MUSB_RXCSR_RXPKTRDY
  739. | MUSB_RXCSR_DMAENAB
  740. | MUSB_RXCSR_H_REQPKT))
  741. ERR("broken !rx_reinit, ep%d csr %04x\n",
  742. hw_ep->epnum, csr);
  743. /* scrub any stale state, leaving toggle alone */
  744. csr &= MUSB_RXCSR_DISNYET;
  745. }
  746. /* kick things off */
  747. if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
  748. /* candidate for DMA */
  749. if (dma_channel) {
  750. dma_channel->actual_len = 0L;
  751. qh->segsize = len;
  752. /* AUTOREQ is in a DMA register */
  753. musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  754. csr = musb_readw(hw_ep->regs,
  755. MUSB_RXCSR);
  756. /* unless caller treats short rx transfers as
  757. * errors, we dare not queue multiple transfers.
  758. */
  759. dma_ok = dma_controller->channel_program(
  760. dma_channel, packet_sz,
  761. !(urb->transfer_flags
  762. & URB_SHORT_NOT_OK),
  763. urb->transfer_dma + offset,
  764. qh->segsize);
  765. if (!dma_ok) {
  766. dma_controller->channel_release(
  767. dma_channel);
  768. hw_ep->rx_channel = NULL;
  769. dma_channel = NULL;
  770. } else
  771. csr |= MUSB_RXCSR_DMAENAB;
  772. }
  773. }
  774. csr |= MUSB_RXCSR_H_REQPKT;
  775. DBG(7, "RXCSR%d := %04x\n", epnum, csr);
  776. musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  777. csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  778. }
  779. }
  780. /*
  781. * Service the default endpoint (ep0) as host.
  782. * Return true until it's time to start the status stage.
  783. */
  784. static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
  785. {
  786. bool more = false;
  787. u8 *fifo_dest = NULL;
  788. u16 fifo_count = 0;
  789. struct musb_hw_ep *hw_ep = musb->control_ep;
  790. struct musb_qh *qh = hw_ep->in_qh;
  791. struct usb_ctrlrequest *request;
  792. switch (musb->ep0_stage) {
  793. case MUSB_EP0_IN:
  794. fifo_dest = urb->transfer_buffer + urb->actual_length;
  795. fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
  796. urb->actual_length);
  797. if (fifo_count < len)
  798. urb->status = -EOVERFLOW;
  799. musb_read_fifo(hw_ep, fifo_count, fifo_dest);
  800. urb->actual_length += fifo_count;
  801. if (len < qh->maxpacket) {
  802. /* always terminate on short read; it's
  803. * rarely reported as an error.
  804. */
  805. } else if (urb->actual_length <
  806. urb->transfer_buffer_length)
  807. more = true;
  808. break;
  809. case MUSB_EP0_START:
  810. request = (struct usb_ctrlrequest *) urb->setup_packet;
  811. if (!request->wLength) {
  812. DBG(4, "start no-DATA\n");
  813. break;
  814. } else if (request->bRequestType & USB_DIR_IN) {
  815. DBG(4, "start IN-DATA\n");
  816. musb->ep0_stage = MUSB_EP0_IN;
  817. more = true;
  818. break;
  819. } else {
  820. DBG(4, "start OUT-DATA\n");
  821. musb->ep0_stage = MUSB_EP0_OUT;
  822. more = true;
  823. }
  824. /* FALLTHROUGH */
  825. case MUSB_EP0_OUT:
  826. fifo_count = min_t(size_t, qh->maxpacket,
  827. urb->transfer_buffer_length -
  828. urb->actual_length);
  829. if (fifo_count) {
  830. fifo_dest = (u8 *) (urb->transfer_buffer
  831. + urb->actual_length);
  832. DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
  833. fifo_count,
  834. (fifo_count == 1) ? "" : "s",
  835. fifo_dest);
  836. musb_write_fifo(hw_ep, fifo_count, fifo_dest);
  837. urb->actual_length += fifo_count;
  838. more = true;
  839. }
  840. break;
  841. default:
  842. ERR("bogus ep0 stage %d\n", musb->ep0_stage);
  843. break;
  844. }
  845. return more;
  846. }
  847. /*
  848. * Handle default endpoint interrupt as host. Only called in IRQ time
  849. * from musb_interrupt().
  850. *
  851. * called with controller irqlocked
  852. */
  853. irqreturn_t musb_h_ep0_irq(struct musb *musb)
  854. {
  855. struct urb *urb;
  856. u16 csr, len;
  857. int status = 0;
  858. void __iomem *mbase = musb->mregs;
  859. struct musb_hw_ep *hw_ep = musb->control_ep;
  860. void __iomem *epio = hw_ep->regs;
  861. struct musb_qh *qh = hw_ep->in_qh;
  862. bool complete = false;
  863. irqreturn_t retval = IRQ_NONE;
  864. /* ep0 only has one queue, "in" */
  865. urb = next_urb(qh);
  866. musb_ep_select(mbase, 0);
  867. csr = musb_readw(epio, MUSB_CSR0);
  868. len = (csr & MUSB_CSR0_RXPKTRDY)
  869. ? musb_readb(epio, MUSB_COUNT0)
  870. : 0;
  871. DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
  872. csr, qh, len, urb, musb->ep0_stage);
  873. /* if we just did status stage, we are done */
  874. if (MUSB_EP0_STATUS == musb->ep0_stage) {
  875. retval = IRQ_HANDLED;
  876. complete = true;
  877. }
  878. /* prepare status */
  879. if (csr & MUSB_CSR0_H_RXSTALL) {
  880. DBG(6, "STALLING ENDPOINT\n");
  881. status = -EPIPE;
  882. } else if (csr & MUSB_CSR0_H_ERROR) {
  883. DBG(2, "no response, csr0 %04x\n", csr);
  884. status = -EPROTO;
  885. } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
  886. DBG(2, "control NAK timeout\n");
  887. /* NOTE: this code path would be a good place to PAUSE a
  888. * control transfer, if another one is queued, so that
  889. * ep0 is more likely to stay busy. That's already done
  890. * for bulk RX transfers.
  891. *
  892. * if (qh->ring.next != &musb->control), then
  893. * we have a candidate... NAKing is *NOT* an error
  894. */
  895. musb_writew(epio, MUSB_CSR0, 0);
  896. retval = IRQ_HANDLED;
  897. }
  898. if (status) {
  899. DBG(6, "aborting\n");
  900. retval = IRQ_HANDLED;
  901. if (urb)
  902. urb->status = status;
  903. complete = true;
  904. /* use the proper sequence to abort the transfer */
  905. if (csr & MUSB_CSR0_H_REQPKT) {
  906. csr &= ~MUSB_CSR0_H_REQPKT;
  907. musb_writew(epio, MUSB_CSR0, csr);
  908. csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
  909. musb_writew(epio, MUSB_CSR0, csr);
  910. } else {
  911. musb_h_ep0_flush_fifo(hw_ep);
  912. }
  913. musb_writeb(epio, MUSB_NAKLIMIT0, 0);
  914. /* clear it */
  915. musb_writew(epio, MUSB_CSR0, 0);
  916. }
  917. if (unlikely(!urb)) {
  918. /* stop endpoint since we have no place for its data, this
  919. * SHOULD NEVER HAPPEN! */
  920. ERR("no URB for end 0\n");
  921. musb_h_ep0_flush_fifo(hw_ep);
  922. goto done;
  923. }
  924. if (!complete) {
  925. /* call common logic and prepare response */
  926. if (musb_h_ep0_continue(musb, len, urb)) {
  927. /* more packets required */
  928. csr = (MUSB_EP0_IN == musb->ep0_stage)
  929. ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
  930. } else {
  931. /* data transfer complete; perform status phase */
  932. if (usb_pipeout(urb->pipe)
  933. || !urb->transfer_buffer_length)
  934. csr = MUSB_CSR0_H_STATUSPKT
  935. | MUSB_CSR0_H_REQPKT;
  936. else
  937. csr = MUSB_CSR0_H_STATUSPKT
  938. | MUSB_CSR0_TXPKTRDY;
  939. /* flag status stage */
  940. musb->ep0_stage = MUSB_EP0_STATUS;
  941. DBG(5, "ep0 STATUS, csr %04x\n", csr);
  942. }
  943. musb_writew(epio, MUSB_CSR0, csr);
  944. retval = IRQ_HANDLED;
  945. } else
  946. musb->ep0_stage = MUSB_EP0_IDLE;
  947. /* call completion handler if done */
  948. if (complete)
  949. musb_advance_schedule(musb, urb, hw_ep, 1);
  950. done:
  951. return retval;
  952. }
  953. #ifdef CONFIG_USB_INVENTRA_DMA
  954. /* Host side TX (OUT) using Mentor DMA works as follows:
  955. submit_urb ->
  956. - if queue was empty, Program Endpoint
  957. - ... which starts DMA to fifo in mode 1 or 0
  958. DMA Isr (transfer complete) -> TxAvail()
  959. - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
  960. only in musb_cleanup_urb)
  961. - TxPktRdy has to be set in mode 0 or for
  962. short packets in mode 1.
  963. */
  964. #endif
  965. /* Service a Tx-Available or dma completion irq for the endpoint */
  966. void musb_host_tx(struct musb *musb, u8 epnum)
  967. {
  968. int pipe;
  969. bool done = false;
  970. u16 tx_csr;
  971. size_t length = 0;
  972. size_t offset = 0;
  973. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  974. void __iomem *epio = hw_ep->regs;
  975. struct musb_qh *qh = hw_ep->out_qh;
  976. struct urb *urb = next_urb(qh);
  977. u32 status = 0;
  978. void __iomem *mbase = musb->mregs;
  979. struct dma_channel *dma;
  980. musb_ep_select(mbase, epnum);
  981. tx_csr = musb_readw(epio, MUSB_TXCSR);
  982. /* with CPPI, DMA sometimes triggers "extra" irqs */
  983. if (!urb) {
  984. DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
  985. return;
  986. }
  987. pipe = urb->pipe;
  988. dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
  989. DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
  990. dma ? ", dma" : "");
  991. /* check for errors */
  992. if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
  993. /* dma was disabled, fifo flushed */
  994. DBG(3, "TX end %d stall\n", epnum);
  995. /* stall; record URB status */
  996. status = -EPIPE;
  997. } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
  998. /* (NON-ISO) dma was disabled, fifo flushed */
  999. DBG(3, "TX 3strikes on ep=%d\n", epnum);
  1000. status = -ETIMEDOUT;
  1001. } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
  1002. DBG(6, "TX end=%d device not responding\n", epnum);
  1003. /* NOTE: this code path would be a good place to PAUSE a
  1004. * transfer, if there's some other (nonperiodic) tx urb
  1005. * that could use this fifo. (dma complicates it...)
  1006. * That's already done for bulk RX transfers.
  1007. *
  1008. * if (bulk && qh->ring.next != &musb->out_bulk), then
  1009. * we have a candidate... NAKing is *NOT* an error
  1010. */
  1011. musb_ep_select(mbase, epnum);
  1012. musb_writew(epio, MUSB_TXCSR,
  1013. MUSB_TXCSR_H_WZC_BITS
  1014. | MUSB_TXCSR_TXPKTRDY);
  1015. return;
  1016. }
  1017. if (status) {
  1018. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1019. dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1020. (void) musb->dma_controller->channel_abort(dma);
  1021. }
  1022. /* do the proper sequence to abort the transfer in the
  1023. * usb core; the dma engine should already be stopped.
  1024. */
  1025. musb_h_tx_flush_fifo(hw_ep);
  1026. tx_csr &= ~(MUSB_TXCSR_AUTOSET
  1027. | MUSB_TXCSR_DMAENAB
  1028. | MUSB_TXCSR_H_ERROR
  1029. | MUSB_TXCSR_H_RXSTALL
  1030. | MUSB_TXCSR_H_NAKTIMEOUT
  1031. );
  1032. musb_ep_select(mbase, epnum);
  1033. musb_writew(epio, MUSB_TXCSR, tx_csr);
  1034. /* REVISIT may need to clear FLUSHFIFO ... */
  1035. musb_writew(epio, MUSB_TXCSR, tx_csr);
  1036. musb_writeb(epio, MUSB_TXINTERVAL, 0);
  1037. done = true;
  1038. }
  1039. /* second cppi case */
  1040. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1041. DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
  1042. return;
  1043. }
  1044. if (is_dma_capable() && dma && !status) {
  1045. /*
  1046. * DMA has completed. But if we're using DMA mode 1 (multi
  1047. * packet DMA), we need a terminal TXPKTRDY interrupt before
  1048. * we can consider this transfer completed, lest we trash
  1049. * its last packet when writing the next URB's data. So we
  1050. * switch back to mode 0 to get that interrupt; we'll come
  1051. * back here once it happens.
  1052. */
  1053. if (tx_csr & MUSB_TXCSR_DMAMODE) {
  1054. /*
  1055. * We shouldn't clear DMAMODE with DMAENAB set; so
  1056. * clear them in a safe order. That should be OK
  1057. * once TXPKTRDY has been set (and I've never seen
  1058. * it being 0 at this moment -- DMA interrupt latency
  1059. * is significant) but if it hasn't been then we have
  1060. * no choice but to stop being polite and ignore the
  1061. * programmer's guide... :-)
  1062. *
  1063. * Note that we must write TXCSR with TXPKTRDY cleared
  1064. * in order not to re-trigger the packet send (this bit
  1065. * can't be cleared by CPU), and there's another caveat:
  1066. * TXPKTRDY may be set shortly and then cleared in the
  1067. * double-buffered FIFO mode, so we do an extra TXCSR
  1068. * read for debouncing...
  1069. */
  1070. tx_csr &= musb_readw(epio, MUSB_TXCSR);
  1071. if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
  1072. tx_csr &= ~(MUSB_TXCSR_DMAENAB |
  1073. MUSB_TXCSR_TXPKTRDY);
  1074. musb_writew(epio, MUSB_TXCSR,
  1075. tx_csr | MUSB_TXCSR_H_WZC_BITS);
  1076. }
  1077. tx_csr &= ~(MUSB_TXCSR_DMAMODE |
  1078. MUSB_TXCSR_TXPKTRDY);
  1079. musb_writew(epio, MUSB_TXCSR,
  1080. tx_csr | MUSB_TXCSR_H_WZC_BITS);
  1081. /*
  1082. * There is no guarantee that we'll get an interrupt
  1083. * after clearing DMAMODE as we might have done this
  1084. * too late (after TXPKTRDY was cleared by controller).
  1085. * Re-read TXCSR as we have spoiled its previous value.
  1086. */
  1087. tx_csr = musb_readw(epio, MUSB_TXCSR);
  1088. }
  1089. /*
  1090. * We may get here from a DMA completion or TXPKTRDY interrupt.
  1091. * In any case, we must check the FIFO status here and bail out
  1092. * only if the FIFO still has data -- that should prevent the
  1093. * "missed" TXPKTRDY interrupts and deal with double-buffered
  1094. * FIFO mode too...
  1095. */
  1096. if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
  1097. DBG(2, "DMA complete but packet still in FIFO, "
  1098. "CSR %04x\n", tx_csr);
  1099. return;
  1100. }
  1101. }
  1102. if (!status || dma || usb_pipeisoc(pipe)) {
  1103. if (dma)
  1104. length = dma->actual_len;
  1105. else
  1106. length = qh->segsize;
  1107. qh->offset += length;
  1108. if (usb_pipeisoc(pipe)) {
  1109. struct usb_iso_packet_descriptor *d;
  1110. d = urb->iso_frame_desc + qh->iso_idx;
  1111. d->actual_length = length;
  1112. d->status = status;
  1113. if (++qh->iso_idx >= urb->number_of_packets) {
  1114. done = true;
  1115. } else {
  1116. d++;
  1117. offset = d->offset;
  1118. length = d->length;
  1119. }
  1120. } else if (dma) {
  1121. done = true;
  1122. } else {
  1123. /* see if we need to send more data, or ZLP */
  1124. if (qh->segsize < qh->maxpacket)
  1125. done = true;
  1126. else if (qh->offset == urb->transfer_buffer_length
  1127. && !(urb->transfer_flags
  1128. & URB_ZERO_PACKET))
  1129. done = true;
  1130. if (!done) {
  1131. offset = qh->offset;
  1132. length = urb->transfer_buffer_length - offset;
  1133. }
  1134. }
  1135. }
  1136. /* urb->status != -EINPROGRESS means request has been faulted,
  1137. * so we must abort this transfer after cleanup
  1138. */
  1139. if (urb->status != -EINPROGRESS) {
  1140. done = true;
  1141. if (status == 0)
  1142. status = urb->status;
  1143. }
  1144. if (done) {
  1145. /* set status */
  1146. urb->status = status;
  1147. urb->actual_length = qh->offset;
  1148. musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
  1149. return;
  1150. } else if (usb_pipeisoc(pipe) && dma) {
  1151. if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
  1152. offset, length)) {
  1153. if (is_cppi_enabled() || tusb_dma_omap())
  1154. musb_h_tx_dma_start(hw_ep);
  1155. return;
  1156. }
  1157. } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
  1158. DBG(1, "not complete, but DMA enabled?\n");
  1159. return;
  1160. }
  1161. /*
  1162. * PIO: start next packet in this URB.
  1163. *
  1164. * REVISIT: some docs say that when hw_ep->tx_double_buffered,
  1165. * (and presumably, FIFO is not half-full) we should write *two*
  1166. * packets before updating TXCSR; other docs disagree...
  1167. */
  1168. if (length > qh->maxpacket)
  1169. length = qh->maxpacket;
  1170. musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
  1171. qh->segsize = length;
  1172. musb_ep_select(mbase, epnum);
  1173. musb_writew(epio, MUSB_TXCSR,
  1174. MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
  1175. }
  1176. #ifdef CONFIG_USB_INVENTRA_DMA
  1177. /* Host side RX (IN) using Mentor DMA works as follows:
  1178. submit_urb ->
  1179. - if queue was empty, ProgramEndpoint
  1180. - first IN token is sent out (by setting ReqPkt)
  1181. LinuxIsr -> RxReady()
  1182. /\ => first packet is received
  1183. | - Set in mode 0 (DmaEnab, ~ReqPkt)
  1184. | -> DMA Isr (transfer complete) -> RxReady()
  1185. | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
  1186. | - if urb not complete, send next IN token (ReqPkt)
  1187. | | else complete urb.
  1188. | |
  1189. ---------------------------
  1190. *
  1191. * Nuances of mode 1:
  1192. * For short packets, no ack (+RxPktRdy) is sent automatically
  1193. * (even if AutoClear is ON)
  1194. * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
  1195. * automatically => major problem, as collecting the next packet becomes
  1196. * difficult. Hence mode 1 is not used.
  1197. *
  1198. * REVISIT
  1199. * All we care about at this driver level is that
  1200. * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
  1201. * (b) termination conditions are: short RX, or buffer full;
  1202. * (c) fault modes include
  1203. * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
  1204. * (and that endpoint's dma queue stops immediately)
  1205. * - overflow (full, PLUS more bytes in the terminal packet)
  1206. *
  1207. * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
  1208. * thus be a great candidate for using mode 1 ... for all but the
  1209. * last packet of one URB's transfer.
  1210. */
  1211. #endif
  1212. /* Schedule next QH from musb->in_bulk and move the current qh to
  1213. * the end; avoids starvation for other endpoints.
  1214. */
  1215. static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
  1216. {
  1217. struct dma_channel *dma;
  1218. struct urb *urb;
  1219. void __iomem *mbase = musb->mregs;
  1220. void __iomem *epio = ep->regs;
  1221. struct musb_qh *cur_qh, *next_qh;
  1222. u16 rx_csr;
  1223. musb_ep_select(mbase, ep->epnum);
  1224. dma = is_dma_capable() ? ep->rx_channel : NULL;
  1225. /* clear nak timeout bit */
  1226. rx_csr = musb_readw(epio, MUSB_RXCSR);
  1227. rx_csr |= MUSB_RXCSR_H_WZC_BITS;
  1228. rx_csr &= ~MUSB_RXCSR_DATAERROR;
  1229. musb_writew(epio, MUSB_RXCSR, rx_csr);
  1230. cur_qh = first_qh(&musb->in_bulk);
  1231. if (cur_qh) {
  1232. urb = next_urb(cur_qh);
  1233. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1234. dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1235. musb->dma_controller->channel_abort(dma);
  1236. urb->actual_length += dma->actual_len;
  1237. dma->actual_len = 0L;
  1238. }
  1239. musb_save_toggle(cur_qh, 1, urb);
  1240. /* move cur_qh to end of queue */
  1241. list_move_tail(&cur_qh->ring, &musb->in_bulk);
  1242. /* get the next qh from musb->in_bulk */
  1243. next_qh = first_qh(&musb->in_bulk);
  1244. /* set rx_reinit and schedule the next qh */
  1245. ep->rx_reinit = 1;
  1246. musb_start_urb(musb, 1, next_qh);
  1247. }
  1248. }
  1249. /*
  1250. * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
  1251. * and high-bandwidth IN transfer cases.
  1252. */
  1253. void musb_host_rx(struct musb *musb, u8 epnum)
  1254. {
  1255. struct urb *urb;
  1256. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  1257. void __iomem *epio = hw_ep->regs;
  1258. struct musb_qh *qh = hw_ep->in_qh;
  1259. size_t xfer_len;
  1260. void __iomem *mbase = musb->mregs;
  1261. int pipe;
  1262. u16 rx_csr, val;
  1263. bool iso_err = false;
  1264. bool done = false;
  1265. u32 status;
  1266. struct dma_channel *dma;
  1267. musb_ep_select(mbase, epnum);
  1268. urb = next_urb(qh);
  1269. dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
  1270. status = 0;
  1271. xfer_len = 0;
  1272. rx_csr = musb_readw(epio, MUSB_RXCSR);
  1273. val = rx_csr;
  1274. if (unlikely(!urb)) {
  1275. /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
  1276. * usbtest #11 (unlinks) triggers it regularly, sometimes
  1277. * with fifo full. (Only with DMA??)
  1278. */
  1279. DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
  1280. musb_readw(epio, MUSB_RXCOUNT));
  1281. musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
  1282. return;
  1283. }
  1284. pipe = urb->pipe;
  1285. DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
  1286. epnum, rx_csr, urb->actual_length,
  1287. dma ? dma->actual_len : 0);
  1288. /* check for errors, concurrent stall & unlink is not really
  1289. * handled yet! */
  1290. if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
  1291. DBG(3, "RX end %d STALL\n", epnum);
  1292. /* stall; record URB status */
  1293. status = -EPIPE;
  1294. } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
  1295. DBG(3, "end %d RX proto error\n", epnum);
  1296. status = -EPROTO;
  1297. musb_writeb(epio, MUSB_RXINTERVAL, 0);
  1298. } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
  1299. if (USB_ENDPOINT_XFER_ISOC != qh->type) {
  1300. DBG(6, "RX end %d NAK timeout\n", epnum);
  1301. /* NOTE: NAKing is *NOT* an error, so we want to
  1302. * continue. Except ... if there's a request for
  1303. * another QH, use that instead of starving it.
  1304. *
  1305. * Devices like Ethernet and serial adapters keep
  1306. * reads posted at all times, which will starve
  1307. * other devices without this logic.
  1308. */
  1309. if (usb_pipebulk(urb->pipe)
  1310. && qh->mux == 1
  1311. && !list_is_singular(&musb->in_bulk)) {
  1312. musb_bulk_rx_nak_timeout(musb, hw_ep);
  1313. return;
  1314. }
  1315. musb_ep_select(mbase, epnum);
  1316. rx_csr |= MUSB_RXCSR_H_WZC_BITS;
  1317. rx_csr &= ~MUSB_RXCSR_DATAERROR;
  1318. musb_writew(epio, MUSB_RXCSR, rx_csr);
  1319. goto finish;
  1320. } else {
  1321. DBG(4, "RX end %d ISO data error\n", epnum);
  1322. /* packet error reported later */
  1323. iso_err = true;
  1324. }
  1325. } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
  1326. DBG(3, "end %d high bandwidth incomplete ISO packet RX\n",
  1327. epnum);
  1328. status = -EPROTO;
  1329. }
  1330. /* faults abort the transfer */
  1331. if (status) {
  1332. /* clean up dma and collect transfer count */
  1333. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1334. dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1335. (void) musb->dma_controller->channel_abort(dma);
  1336. xfer_len = dma->actual_len;
  1337. }
  1338. musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
  1339. musb_writeb(epio, MUSB_RXINTERVAL, 0);
  1340. done = true;
  1341. goto finish;
  1342. }
  1343. if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
  1344. /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
  1345. ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
  1346. goto finish;
  1347. }
  1348. /* thorough shutdown for now ... given more precise fault handling
  1349. * and better queueing support, we might keep a DMA pipeline going
  1350. * while processing this irq for earlier completions.
  1351. */
  1352. /* FIXME this is _way_ too much in-line logic for Mentor DMA */
  1353. #ifndef CONFIG_USB_INVENTRA_DMA
  1354. if (rx_csr & MUSB_RXCSR_H_REQPKT) {
  1355. /* REVISIT this happened for a while on some short reads...
  1356. * the cleanup still needs investigation... looks bad...
  1357. * and also duplicates dma cleanup code above ... plus,
  1358. * shouldn't this be the "half full" double buffer case?
  1359. */
  1360. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1361. dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1362. (void) musb->dma_controller->channel_abort(dma);
  1363. xfer_len = dma->actual_len;
  1364. done = true;
  1365. }
  1366. DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
  1367. xfer_len, dma ? ", dma" : "");
  1368. rx_csr &= ~MUSB_RXCSR_H_REQPKT;
  1369. musb_ep_select(mbase, epnum);
  1370. musb_writew(epio, MUSB_RXCSR,
  1371. MUSB_RXCSR_H_WZC_BITS | rx_csr);
  1372. }
  1373. #endif
  1374. if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
  1375. xfer_len = dma->actual_len;
  1376. val &= ~(MUSB_RXCSR_DMAENAB
  1377. | MUSB_RXCSR_H_AUTOREQ
  1378. | MUSB_RXCSR_AUTOCLEAR
  1379. | MUSB_RXCSR_RXPKTRDY);
  1380. musb_writew(hw_ep->regs, MUSB_RXCSR, val);
  1381. #ifdef CONFIG_USB_INVENTRA_DMA
  1382. if (usb_pipeisoc(pipe)) {
  1383. struct usb_iso_packet_descriptor *d;
  1384. d = urb->iso_frame_desc + qh->iso_idx;
  1385. d->actual_length = xfer_len;
  1386. /* even if there was an error, we did the dma
  1387. * for iso_frame_desc->length
  1388. */
  1389. if (d->status != EILSEQ && d->status != -EOVERFLOW)
  1390. d->status = 0;
  1391. if (++qh->iso_idx >= urb->number_of_packets)
  1392. done = true;
  1393. else
  1394. done = false;
  1395. } else {
  1396. /* done if urb buffer is full or short packet is recd */
  1397. done = (urb->actual_length + xfer_len >=
  1398. urb->transfer_buffer_length
  1399. || dma->actual_len < qh->maxpacket);
  1400. }
  1401. /* send IN token for next packet, without AUTOREQ */
  1402. if (!done) {
  1403. val |= MUSB_RXCSR_H_REQPKT;
  1404. musb_writew(epio, MUSB_RXCSR,
  1405. MUSB_RXCSR_H_WZC_BITS | val);
  1406. }
  1407. DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
  1408. done ? "off" : "reset",
  1409. musb_readw(epio, MUSB_RXCSR),
  1410. musb_readw(epio, MUSB_RXCOUNT));
  1411. #else
  1412. done = true;
  1413. #endif
  1414. } else if (urb->status == -EINPROGRESS) {
  1415. /* if no errors, be sure a packet is ready for unloading */
  1416. if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
  1417. status = -EPROTO;
  1418. ERR("Rx interrupt with no errors or packet!\n");
  1419. /* FIXME this is another "SHOULD NEVER HAPPEN" */
  1420. /* SCRUB (RX) */
  1421. /* do the proper sequence to abort the transfer */
  1422. musb_ep_select(mbase, epnum);
  1423. val &= ~MUSB_RXCSR_H_REQPKT;
  1424. musb_writew(epio, MUSB_RXCSR, val);
  1425. goto finish;
  1426. }
  1427. /* we are expecting IN packets */
  1428. #ifdef CONFIG_USB_INVENTRA_DMA
  1429. if (dma) {
  1430. struct dma_controller *c;
  1431. u16 rx_count;
  1432. int ret, length;
  1433. dma_addr_t buf;
  1434. rx_count = musb_readw(epio, MUSB_RXCOUNT);
  1435. DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
  1436. epnum, rx_count,
  1437. urb->transfer_dma
  1438. + urb->actual_length,
  1439. qh->offset,
  1440. urb->transfer_buffer_length);
  1441. c = musb->dma_controller;
  1442. if (usb_pipeisoc(pipe)) {
  1443. int d_status = 0;
  1444. struct usb_iso_packet_descriptor *d;
  1445. d = urb->iso_frame_desc + qh->iso_idx;
  1446. if (iso_err) {
  1447. d_status = -EILSEQ;
  1448. urb->error_count++;
  1449. }
  1450. if (rx_count > d->length) {
  1451. if (d_status == 0) {
  1452. d_status = -EOVERFLOW;
  1453. urb->error_count++;
  1454. }
  1455. DBG(2, "** OVERFLOW %d into %d\n",\
  1456. rx_count, d->length);
  1457. length = d->length;
  1458. } else
  1459. length = rx_count;
  1460. d->status = d_status;
  1461. buf = urb->transfer_dma + d->offset;
  1462. } else {
  1463. length = rx_count;
  1464. buf = urb->transfer_dma +
  1465. urb->actual_length;
  1466. }
  1467. dma->desired_mode = 0;
  1468. #ifdef USE_MODE1
  1469. /* because of the issue below, mode 1 will
  1470. * only rarely behave with correct semantics.
  1471. */
  1472. if ((urb->transfer_flags &
  1473. URB_SHORT_NOT_OK)
  1474. && (urb->transfer_buffer_length -
  1475. urb->actual_length)
  1476. > qh->maxpacket)
  1477. dma->desired_mode = 1;
  1478. if (rx_count < hw_ep->max_packet_sz_rx) {
  1479. length = rx_count;
  1480. dma->desired_mode = 0;
  1481. } else {
  1482. length = urb->transfer_buffer_length;
  1483. }
  1484. #endif
  1485. /* Disadvantage of using mode 1:
  1486. * It's basically usable only for mass storage class; essentially all
  1487. * other protocols also terminate transfers on short packets.
  1488. *
  1489. * Details:
  1490. * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
  1491. * If you try to use mode 1 for (transfer_buffer_length - 512), and try
  1492. * to use the extra IN token to grab the last packet using mode 0, then
  1493. * the problem is that you cannot be sure when the device will send the
  1494. * last packet and RxPktRdy set. Sometimes the packet is recd too soon
  1495. * such that it gets lost when RxCSR is re-set at the end of the mode 1
  1496. * transfer, while sometimes it is recd just a little late so that if you
  1497. * try to configure for mode 0 soon after the mode 1 transfer is
  1498. * completed, you will find rxcount 0. Okay, so you might think why not
  1499. * wait for an interrupt when the pkt is recd. Well, you won't get any!
  1500. */
  1501. val = musb_readw(epio, MUSB_RXCSR);
  1502. val &= ~MUSB_RXCSR_H_REQPKT;
  1503. if (dma->desired_mode == 0)
  1504. val &= ~MUSB_RXCSR_H_AUTOREQ;
  1505. else
  1506. val |= MUSB_RXCSR_H_AUTOREQ;
  1507. val |= MUSB_RXCSR_DMAENAB;
  1508. /* autoclear shouldn't be set in high bandwidth */
  1509. if (qh->hb_mult == 1)
  1510. val |= MUSB_RXCSR_AUTOCLEAR;
  1511. musb_writew(epio, MUSB_RXCSR,
  1512. MUSB_RXCSR_H_WZC_BITS | val);
  1513. /* REVISIT if when actual_length != 0,
  1514. * transfer_buffer_length needs to be
  1515. * adjusted first...
  1516. */
  1517. ret = c->channel_program(
  1518. dma, qh->maxpacket,
  1519. dma->desired_mode, buf, length);
  1520. if (!ret) {
  1521. c->channel_release(dma);
  1522. hw_ep->rx_channel = NULL;
  1523. dma = NULL;
  1524. /* REVISIT reset CSR */
  1525. }
  1526. }
  1527. #endif /* Mentor DMA */
  1528. if (!dma) {
  1529. done = musb_host_packet_rx(musb, urb,
  1530. epnum, iso_err);
  1531. DBG(6, "read %spacket\n", done ? "last " : "");
  1532. }
  1533. }
  1534. finish:
  1535. urb->actual_length += xfer_len;
  1536. qh->offset += xfer_len;
  1537. if (done) {
  1538. if (urb->status == -EINPROGRESS)
  1539. urb->status = status;
  1540. musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
  1541. }
  1542. }
  1543. /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
  1544. * the software schedule associates multiple such nodes with a given
  1545. * host side hardware endpoint + direction; scheduling may activate
  1546. * that hardware endpoint.
  1547. */
  1548. static int musb_schedule(
  1549. struct musb *musb,
  1550. struct musb_qh *qh,
  1551. int is_in)
  1552. {
  1553. int idle;
  1554. int best_diff;
  1555. int best_end, epnum;
  1556. struct musb_hw_ep *hw_ep = NULL;
  1557. struct list_head *head = NULL;
  1558. u8 toggle;
  1559. u8 txtype;
  1560. struct urb *urb = next_urb(qh);
  1561. /* use fixed hardware for control and bulk */
  1562. if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
  1563. head = &musb->control;
  1564. hw_ep = musb->control_ep;
  1565. goto success;
  1566. }
  1567. /* else, periodic transfers get muxed to other endpoints */
  1568. /*
  1569. * We know this qh hasn't been scheduled, so all we need to do
  1570. * is choose which hardware endpoint to put it on ...
  1571. *
  1572. * REVISIT what we really want here is a regular schedule tree
  1573. * like e.g. OHCI uses.
  1574. */
  1575. best_diff = 4096;
  1576. best_end = -1;
  1577. for (epnum = 1, hw_ep = musb->endpoints + 1;
  1578. epnum < musb->nr_endpoints;
  1579. epnum++, hw_ep++) {
  1580. int diff;
  1581. if (musb_ep_get_qh(hw_ep, is_in) != NULL)
  1582. continue;
  1583. if (hw_ep == musb->bulk_ep)
  1584. continue;
  1585. if (is_in)
  1586. diff = hw_ep->max_packet_sz_rx;
  1587. else
  1588. diff = hw_ep->max_packet_sz_tx;
  1589. diff -= (qh->maxpacket * qh->hb_mult);
  1590. if (diff >= 0 && best_diff > diff) {
  1591. /*
  1592. * Mentor controller has a bug in that if we schedule
  1593. * a BULK Tx transfer on an endpoint that had earlier
  1594. * handled ISOC then the BULK transfer has to start on
  1595. * a zero toggle. If the BULK transfer starts on a 1
  1596. * toggle then this transfer will fail as the mentor
  1597. * controller starts the Bulk transfer on a 0 toggle
  1598. * irrespective of the programming of the toggle bits
  1599. * in the TXCSR register. Check for this condition
  1600. * while allocating the EP for a Tx Bulk transfer. If
  1601. * so skip this EP.
  1602. */
  1603. hw_ep = musb->endpoints + epnum;
  1604. toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
  1605. txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
  1606. >> 4) & 0x3;
  1607. if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
  1608. toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
  1609. continue;
  1610. best_diff = diff;
  1611. best_end = epnum;
  1612. }
  1613. }
  1614. /* use bulk reserved ep1 if no other ep is free */
  1615. if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
  1616. hw_ep = musb->bulk_ep;
  1617. if (is_in)
  1618. head = &musb->in_bulk;
  1619. else
  1620. head = &musb->out_bulk;
  1621. /* Enable bulk RX NAK timeout scheme when bulk requests are
  1622. * multiplexed. This scheme doen't work in high speed to full
  1623. * speed scenario as NAK interrupts are not coming from a
  1624. * full speed device connected to a high speed device.
  1625. * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
  1626. * 4 (8 frame or 8ms) for FS device.
  1627. */
  1628. if (is_in && qh->dev)
  1629. qh->intv_reg =
  1630. (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
  1631. goto success;
  1632. } else if (best_end < 0) {
  1633. return -ENOSPC;
  1634. }
  1635. idle = 1;
  1636. qh->mux = 0;
  1637. hw_ep = musb->endpoints + best_end;
  1638. DBG(4, "qh %p periodic slot %d\n", qh, best_end);
  1639. success:
  1640. if (head) {
  1641. idle = list_empty(head);
  1642. list_add_tail(&qh->ring, head);
  1643. qh->mux = 1;
  1644. }
  1645. qh->hw_ep = hw_ep;
  1646. qh->hep->hcpriv = qh;
  1647. if (idle)
  1648. musb_start_urb(musb, is_in, qh);
  1649. return 0;
  1650. }
  1651. static int musb_urb_enqueue(
  1652. struct usb_hcd *hcd,
  1653. struct urb *urb,
  1654. gfp_t mem_flags)
  1655. {
  1656. unsigned long flags;
  1657. struct musb *musb = hcd_to_musb(hcd);
  1658. struct usb_host_endpoint *hep = urb->ep;
  1659. struct musb_qh *qh;
  1660. struct usb_endpoint_descriptor *epd = &hep->desc;
  1661. int ret;
  1662. unsigned type_reg;
  1663. unsigned interval;
  1664. /* host role must be active */
  1665. if (!is_host_active(musb) || !musb->is_active)
  1666. return -ENODEV;
  1667. spin_lock_irqsave(&musb->lock, flags);
  1668. ret = usb_hcd_link_urb_to_ep(hcd, urb);
  1669. qh = ret ? NULL : hep->hcpriv;
  1670. if (qh)
  1671. urb->hcpriv = qh;
  1672. spin_unlock_irqrestore(&musb->lock, flags);
  1673. /* DMA mapping was already done, if needed, and this urb is on
  1674. * hep->urb_list now ... so we're done, unless hep wasn't yet
  1675. * scheduled onto a live qh.
  1676. *
  1677. * REVISIT best to keep hep->hcpriv valid until the endpoint gets
  1678. * disabled, testing for empty qh->ring and avoiding qh setup costs
  1679. * except for the first urb queued after a config change.
  1680. */
  1681. if (qh || ret)
  1682. return ret;
  1683. /* Allocate and initialize qh, minimizing the work done each time
  1684. * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
  1685. *
  1686. * REVISIT consider a dedicated qh kmem_cache, so it's harder
  1687. * for bugs in other kernel code to break this driver...
  1688. */
  1689. qh = kzalloc(sizeof *qh, mem_flags);
  1690. if (!qh) {
  1691. spin_lock_irqsave(&musb->lock, flags);
  1692. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1693. spin_unlock_irqrestore(&musb->lock, flags);
  1694. return -ENOMEM;
  1695. }
  1696. qh->hep = hep;
  1697. qh->dev = urb->dev;
  1698. INIT_LIST_HEAD(&qh->ring);
  1699. qh->is_ready = 1;
  1700. qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
  1701. qh->type = usb_endpoint_type(epd);
  1702. /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
  1703. * Some musb cores don't support high bandwidth ISO transfers; and
  1704. * we don't (yet!) support high bandwidth interrupt transfers.
  1705. */
  1706. qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
  1707. if (qh->hb_mult > 1) {
  1708. int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
  1709. if (ok)
  1710. ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
  1711. || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
  1712. if (!ok) {
  1713. ret = -EMSGSIZE;
  1714. goto done;
  1715. }
  1716. qh->maxpacket &= 0x7ff;
  1717. }
  1718. qh->epnum = usb_endpoint_num(epd);
  1719. /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
  1720. qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
  1721. /* precompute rxtype/txtype/type0 register */
  1722. type_reg = (qh->type << 4) | qh->epnum;
  1723. switch (urb->dev->speed) {
  1724. case USB_SPEED_LOW:
  1725. type_reg |= 0xc0;
  1726. break;
  1727. case USB_SPEED_FULL:
  1728. type_reg |= 0x80;
  1729. break;
  1730. default:
  1731. type_reg |= 0x40;
  1732. }
  1733. qh->type_reg = type_reg;
  1734. /* Precompute RXINTERVAL/TXINTERVAL register */
  1735. switch (qh->type) {
  1736. case USB_ENDPOINT_XFER_INT:
  1737. /*
  1738. * Full/low speeds use the linear encoding,
  1739. * high speed uses the logarithmic encoding.
  1740. */
  1741. if (urb->dev->speed <= USB_SPEED_FULL) {
  1742. interval = max_t(u8, epd->bInterval, 1);
  1743. break;
  1744. }
  1745. /* FALLTHROUGH */
  1746. case USB_ENDPOINT_XFER_ISOC:
  1747. /* ISO always uses logarithmic encoding */
  1748. interval = min_t(u8, epd->bInterval, 16);
  1749. break;
  1750. default:
  1751. /* REVISIT we actually want to use NAK limits, hinting to the
  1752. * transfer scheduling logic to try some other qh, e.g. try
  1753. * for 2 msec first:
  1754. *
  1755. * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
  1756. *
  1757. * The downside of disabling this is that transfer scheduling
  1758. * gets VERY unfair for nonperiodic transfers; a misbehaving
  1759. * peripheral could make that hurt. That's perfectly normal
  1760. * for reads from network or serial adapters ... so we have
  1761. * partial NAKlimit support for bulk RX.
  1762. *
  1763. * The upside of disabling it is simpler transfer scheduling.
  1764. */
  1765. interval = 0;
  1766. }
  1767. qh->intv_reg = interval;
  1768. /* precompute addressing for external hub/tt ports */
  1769. if (musb->is_multipoint) {
  1770. struct usb_device *parent = urb->dev->parent;
  1771. if (parent != hcd->self.root_hub) {
  1772. qh->h_addr_reg = (u8) parent->devnum;
  1773. /* set up tt info if needed */
  1774. if (urb->dev->tt) {
  1775. qh->h_port_reg = (u8) urb->dev->ttport;
  1776. if (urb->dev->tt->hub)
  1777. qh->h_addr_reg =
  1778. (u8) urb->dev->tt->hub->devnum;
  1779. if (urb->dev->tt->multi)
  1780. qh->h_addr_reg |= 0x80;
  1781. }
  1782. }
  1783. }
  1784. /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
  1785. * until we get real dma queues (with an entry for each urb/buffer),
  1786. * we only have work to do in the former case.
  1787. */
  1788. spin_lock_irqsave(&musb->lock, flags);
  1789. if (hep->hcpriv) {
  1790. /* some concurrent activity submitted another urb to hep...
  1791. * odd, rare, error prone, but legal.
  1792. */
  1793. kfree(qh);
  1794. qh = NULL;
  1795. ret = 0;
  1796. } else
  1797. ret = musb_schedule(musb, qh,
  1798. epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
  1799. if (ret == 0) {
  1800. urb->hcpriv = qh;
  1801. /* FIXME set urb->start_frame for iso/intr, it's tested in
  1802. * musb_start_urb(), but otherwise only konicawc cares ...
  1803. */
  1804. }
  1805. spin_unlock_irqrestore(&musb->lock, flags);
  1806. done:
  1807. if (ret != 0) {
  1808. spin_lock_irqsave(&musb->lock, flags);
  1809. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1810. spin_unlock_irqrestore(&musb->lock, flags);
  1811. kfree(qh);
  1812. }
  1813. return ret;
  1814. }
  1815. /*
  1816. * abort a transfer that's at the head of a hardware queue.
  1817. * called with controller locked, irqs blocked
  1818. * that hardware queue advances to the next transfer, unless prevented
  1819. */
  1820. static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
  1821. {
  1822. struct musb_hw_ep *ep = qh->hw_ep;
  1823. void __iomem *epio = ep->regs;
  1824. unsigned hw_end = ep->epnum;
  1825. void __iomem *regs = ep->musb->mregs;
  1826. int is_in = usb_pipein(urb->pipe);
  1827. int status = 0;
  1828. u16 csr;
  1829. musb_ep_select(regs, hw_end);
  1830. if (is_dma_capable()) {
  1831. struct dma_channel *dma;
  1832. dma = is_in ? ep->rx_channel : ep->tx_channel;
  1833. if (dma) {
  1834. status = ep->musb->dma_controller->channel_abort(dma);
  1835. DBG(status ? 1 : 3,
  1836. "abort %cX%d DMA for urb %p --> %d\n",
  1837. is_in ? 'R' : 'T', ep->epnum,
  1838. urb, status);
  1839. urb->actual_length += dma->actual_len;
  1840. }
  1841. }
  1842. /* turn off DMA requests, discard state, stop polling ... */
  1843. if (is_in) {
  1844. /* giveback saves bulk toggle */
  1845. csr = musb_h_flush_rxfifo(ep, 0);
  1846. /* REVISIT we still get an irq; should likely clear the
  1847. * endpoint's irq status here to avoid bogus irqs.
  1848. * clearing that status is platform-specific...
  1849. */
  1850. } else if (ep->epnum) {
  1851. musb_h_tx_flush_fifo(ep);
  1852. csr = musb_readw(epio, MUSB_TXCSR);
  1853. csr &= ~(MUSB_TXCSR_AUTOSET
  1854. | MUSB_TXCSR_DMAENAB
  1855. | MUSB_TXCSR_H_RXSTALL
  1856. | MUSB_TXCSR_H_NAKTIMEOUT
  1857. | MUSB_TXCSR_H_ERROR
  1858. | MUSB_TXCSR_TXPKTRDY);
  1859. musb_writew(epio, MUSB_TXCSR, csr);
  1860. /* REVISIT may need to clear FLUSHFIFO ... */
  1861. musb_writew(epio, MUSB_TXCSR, csr);
  1862. /* flush cpu writebuffer */
  1863. csr = musb_readw(epio, MUSB_TXCSR);
  1864. } else {
  1865. musb_h_ep0_flush_fifo(ep);
  1866. }
  1867. if (status == 0)
  1868. musb_advance_schedule(ep->musb, urb, ep, is_in);
  1869. return status;
  1870. }
  1871. static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  1872. {
  1873. struct musb *musb = hcd_to_musb(hcd);
  1874. struct musb_qh *qh;
  1875. unsigned long flags;
  1876. int is_in = usb_pipein(urb->pipe);
  1877. int ret;
  1878. DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
  1879. usb_pipedevice(urb->pipe),
  1880. usb_pipeendpoint(urb->pipe),
  1881. is_in ? "in" : "out");
  1882. spin_lock_irqsave(&musb->lock, flags);
  1883. ret = usb_hcd_check_unlink_urb(hcd, urb, status);
  1884. if (ret)
  1885. goto done;
  1886. qh = urb->hcpriv;
  1887. if (!qh)
  1888. goto done;
  1889. /*
  1890. * Any URB not actively programmed into endpoint hardware can be
  1891. * immediately given back; that's any URB not at the head of an
  1892. * endpoint queue, unless someday we get real DMA queues. And even
  1893. * if it's at the head, it might not be known to the hardware...
  1894. *
  1895. * Otherwise abort current transfer, pending DMA, etc.; urb->status
  1896. * has already been updated. This is a synchronous abort; it'd be
  1897. * OK to hold off until after some IRQ, though.
  1898. *
  1899. * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
  1900. */
  1901. if (!qh->is_ready
  1902. || urb->urb_list.prev != &qh->hep->urb_list
  1903. || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
  1904. int ready = qh->is_ready;
  1905. qh->is_ready = 0;
  1906. musb_giveback(musb, urb, 0);
  1907. qh->is_ready = ready;
  1908. /* If nothing else (usually musb_giveback) is using it
  1909. * and its URB list has emptied, recycle this qh.
  1910. */
  1911. if (ready && list_empty(&qh->hep->urb_list)) {
  1912. qh->hep->hcpriv = NULL;
  1913. list_del(&qh->ring);
  1914. kfree(qh);
  1915. }
  1916. } else
  1917. ret = musb_cleanup_urb(urb, qh);
  1918. done:
  1919. spin_unlock_irqrestore(&musb->lock, flags);
  1920. return ret;
  1921. }
  1922. /* disable an endpoint */
  1923. static void
  1924. musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
  1925. {
  1926. u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
  1927. unsigned long flags;
  1928. struct musb *musb = hcd_to_musb(hcd);
  1929. struct musb_qh *qh;
  1930. struct urb *urb;
  1931. spin_lock_irqsave(&musb->lock, flags);
  1932. qh = hep->hcpriv;
  1933. if (qh == NULL)
  1934. goto exit;
  1935. /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
  1936. /* Kick the first URB off the hardware, if needed */
  1937. qh->is_ready = 0;
  1938. if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
  1939. urb = next_urb(qh);
  1940. /* make software (then hardware) stop ASAP */
  1941. if (!urb->unlinked)
  1942. urb->status = -ESHUTDOWN;
  1943. /* cleanup */
  1944. musb_cleanup_urb(urb, qh);
  1945. /* Then nuke all the others ... and advance the
  1946. * queue on hw_ep (e.g. bulk ring) when we're done.
  1947. */
  1948. while (!list_empty(&hep->urb_list)) {
  1949. urb = next_urb(qh);
  1950. urb->status = -ESHUTDOWN;
  1951. musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
  1952. }
  1953. } else {
  1954. /* Just empty the queue; the hardware is busy with
  1955. * other transfers, and since !qh->is_ready nothing
  1956. * will activate any of these as it advances.
  1957. */
  1958. while (!list_empty(&hep->urb_list))
  1959. musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
  1960. hep->hcpriv = NULL;
  1961. list_del(&qh->ring);
  1962. kfree(qh);
  1963. }
  1964. exit:
  1965. spin_unlock_irqrestore(&musb->lock, flags);
  1966. }
  1967. static int musb_h_get_frame_number(struct usb_hcd *hcd)
  1968. {
  1969. struct musb *musb = hcd_to_musb(hcd);
  1970. return musb_readw(musb->mregs, MUSB_FRAME);
  1971. }
  1972. static int musb_h_start(struct usb_hcd *hcd)
  1973. {
  1974. struct musb *musb = hcd_to_musb(hcd);
  1975. /* NOTE: musb_start() is called when the hub driver turns
  1976. * on port power, or when (OTG) peripheral starts.
  1977. */
  1978. hcd->state = HC_STATE_RUNNING;
  1979. musb->port1_status = 0;
  1980. return 0;
  1981. }
  1982. static void musb_h_stop(struct usb_hcd *hcd)
  1983. {
  1984. musb_stop(hcd_to_musb(hcd));
  1985. hcd->state = HC_STATE_HALT;
  1986. }
  1987. static int musb_bus_suspend(struct usb_hcd *hcd)
  1988. {
  1989. struct musb *musb = hcd_to_musb(hcd);
  1990. u8 devctl;
  1991. if (!is_host_active(musb))
  1992. return 0;
  1993. switch (musb->xceiv->state) {
  1994. case OTG_STATE_A_SUSPEND:
  1995. return 0;
  1996. case OTG_STATE_A_WAIT_VRISE:
  1997. /* ID could be grounded even if there's no device
  1998. * on the other end of the cable. NOTE that the
  1999. * A_WAIT_VRISE timers are messy with MUSB...
  2000. */
  2001. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  2002. if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
  2003. musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
  2004. break;
  2005. default:
  2006. break;
  2007. }
  2008. if (musb->is_active) {
  2009. WARNING("trying to suspend as %s while active\n",
  2010. otg_state_string(musb));
  2011. return -EBUSY;
  2012. } else
  2013. return 0;
  2014. }
  2015. static int musb_bus_resume(struct usb_hcd *hcd)
  2016. {
  2017. /* resuming child port does the work */
  2018. return 0;
  2019. }
  2020. const struct hc_driver musb_hc_driver = {
  2021. .description = "musb-hcd",
  2022. .product_desc = "MUSB HDRC host driver",
  2023. .hcd_priv_size = sizeof(struct musb),
  2024. .flags = HCD_USB2 | HCD_MEMORY,
  2025. /* not using irq handler or reset hooks from usbcore, since
  2026. * those must be shared with peripheral code for OTG configs
  2027. */
  2028. .start = musb_h_start,
  2029. .stop = musb_h_stop,
  2030. .get_frame_number = musb_h_get_frame_number,
  2031. .urb_enqueue = musb_urb_enqueue,
  2032. .urb_dequeue = musb_urb_dequeue,
  2033. .endpoint_disable = musb_h_disable,
  2034. .hub_status_data = musb_hub_status_data,
  2035. .hub_control = musb_hub_control,
  2036. .bus_suspend = musb_bus_suspend,
  2037. .bus_resume = musb_bus_resume,
  2038. /* .start_port_reset = NULL, */
  2039. /* .hub_irq_enable = NULL, */
  2040. };