ehci-q.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316
  1. /*
  2. * Copyright (C) 2001-2004 by David Brownell
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation; either version 2 of the License, or (at your
  7. * option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  11. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  12. * for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software Foundation,
  16. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /* this file is part of ehci-hcd.c */
  19. /*-------------------------------------------------------------------------*/
  20. /*
  21. * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
  22. *
  23. * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
  24. * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
  25. * buffers needed for the larger number). We use one QH per endpoint, queue
  26. * multiple urbs (all three types) per endpoint. URBs may need several qtds.
  27. *
  28. * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
  29. * interrupts) needs careful scheduling. Performance improvements can be
  30. * an ongoing challenge. That's in "ehci-sched.c".
  31. *
  32. * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
  33. * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
  34. * (b) special fields in qh entries or (c) split iso entries. TTs will
  35. * buffer low/full speed data so the host collects it at high speed.
  36. */
  37. /*-------------------------------------------------------------------------*/
  38. /* fill a qtd, returning how much of the buffer we were able to queue up */
  39. static int
  40. qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
  41. size_t len, int token, int maxpacket)
  42. {
  43. int i, count;
  44. u64 addr = buf;
  45. /* one buffer entry per 4K ... first might be short or unaligned */
  46. qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
  47. qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
  48. count = 0x1000 - (buf & 0x0fff); /* rest of that page */
  49. if (likely (len < count)) /* ... iff needed */
  50. count = len;
  51. else {
  52. buf += 0x1000;
  53. buf &= ~0x0fff;
  54. /* per-qtd limit: from 16K to 20K (best alignment) */
  55. for (i = 1; count < len && i < 5; i++) {
  56. addr = buf;
  57. qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
  58. qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
  59. (u32)(addr >> 32));
  60. buf += 0x1000;
  61. if ((count + 0x1000) < len)
  62. count += 0x1000;
  63. else
  64. count = len;
  65. }
  66. /* short packets may only terminate transfers */
  67. if (count != len)
  68. count -= (count % maxpacket);
  69. }
  70. qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
  71. qtd->length = count;
  72. return count;
  73. }
  74. /*-------------------------------------------------------------------------*/
  75. static inline void
  76. qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
  77. {
  78. struct ehci_qh_hw *hw = qh->hw;
  79. /* writes to an active overlay are unsafe */
  80. BUG_ON(qh->qh_state != QH_STATE_IDLE);
  81. hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
  82. hw->hw_alt_next = EHCI_LIST_END(ehci);
  83. /* Except for control endpoints, we make hardware maintain data
  84. * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
  85. * and set the pseudo-toggle in udev. Only usb_clear_halt() will
  86. * ever clear it.
  87. */
  88. if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
  89. unsigned is_out, epnum;
  90. is_out = qh->is_out;
  91. epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
  92. if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
  93. hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
  94. usb_settoggle (qh->dev, epnum, is_out, 1);
  95. }
  96. }
  97. hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
  98. }
  99. /* if it weren't for a common silicon quirk (writing the dummy into the qh
  100. * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
  101. * recovery (including urb dequeue) would need software changes to a QH...
  102. */
  103. static void
  104. qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
  105. {
  106. struct ehci_qtd *qtd;
  107. if (list_empty (&qh->qtd_list))
  108. qtd = qh->dummy;
  109. else {
  110. qtd = list_entry (qh->qtd_list.next,
  111. struct ehci_qtd, qtd_list);
  112. /* first qtd may already be partially processed */
  113. if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
  114. qtd = NULL;
  115. }
  116. if (qtd)
  117. qh_update (ehci, qh, qtd);
  118. }
  119. /*-------------------------------------------------------------------------*/
  120. static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
  121. static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
  122. struct usb_host_endpoint *ep)
  123. {
  124. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  125. struct ehci_qh *qh = ep->hcpriv;
  126. unsigned long flags;
  127. spin_lock_irqsave(&ehci->lock, flags);
  128. qh->clearing_tt = 0;
  129. if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
  130. && ehci->rh_state == EHCI_RH_RUNNING)
  131. qh_link_async(ehci, qh);
  132. spin_unlock_irqrestore(&ehci->lock, flags);
  133. }
  134. static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
  135. struct urb *urb, u32 token)
  136. {
  137. /* If an async split transaction gets an error or is unlinked,
  138. * the TT buffer may be left in an indeterminate state. We
  139. * have to clear the TT buffer.
  140. *
  141. * Note: this routine is never called for Isochronous transfers.
  142. */
  143. if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
  144. #ifdef DEBUG
  145. struct usb_device *tt = urb->dev->tt->hub;
  146. dev_dbg(&tt->dev,
  147. "clear tt buffer port %d, a%d ep%d t%08x\n",
  148. urb->dev->ttport, urb->dev->devnum,
  149. usb_pipeendpoint(urb->pipe), token);
  150. #endif /* DEBUG */
  151. if (!ehci_is_TDI(ehci)
  152. || urb->dev->tt->hub !=
  153. ehci_to_hcd(ehci)->self.root_hub) {
  154. if (usb_hub_clear_tt_buffer(urb) == 0)
  155. qh->clearing_tt = 1;
  156. } else {
  157. /* REVISIT ARC-derived cores don't clear the root
  158. * hub TT buffer in this way...
  159. */
  160. }
  161. }
  162. }
  163. static int qtd_copy_status (
  164. struct ehci_hcd *ehci,
  165. struct urb *urb,
  166. size_t length,
  167. u32 token
  168. )
  169. {
  170. int status = -EINPROGRESS;
  171. /* count IN/OUT bytes, not SETUP (even short packets) */
  172. if (likely (QTD_PID (token) != 2))
  173. urb->actual_length += length - QTD_LENGTH (token);
  174. /* don't modify error codes */
  175. if (unlikely(urb->unlinked))
  176. return status;
  177. /* force cleanup after short read; not always an error */
  178. if (unlikely (IS_SHORT_READ (token)))
  179. status = -EREMOTEIO;
  180. /* serious "can't proceed" faults reported by the hardware */
  181. if (token & QTD_STS_HALT) {
  182. if (token & QTD_STS_BABBLE) {
  183. /* FIXME "must" disable babbling device's port too */
  184. status = -EOVERFLOW;
  185. /* CERR nonzero + halt --> stall */
  186. } else if (QTD_CERR(token)) {
  187. status = -EPIPE;
  188. /* In theory, more than one of the following bits can be set
  189. * since they are sticky and the transaction is retried.
  190. * Which to test first is rather arbitrary.
  191. */
  192. } else if (token & QTD_STS_MMF) {
  193. /* fs/ls interrupt xfer missed the complete-split */
  194. status = -EPROTO;
  195. } else if (token & QTD_STS_DBE) {
  196. status = (QTD_PID (token) == 1) /* IN ? */
  197. ? -ENOSR /* hc couldn't read data */
  198. : -ECOMM; /* hc couldn't write data */
  199. } else if (token & QTD_STS_XACT) {
  200. /* timeout, bad CRC, wrong PID, etc */
  201. ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
  202. urb->dev->devpath,
  203. usb_pipeendpoint(urb->pipe),
  204. usb_pipein(urb->pipe) ? "in" : "out");
  205. status = -EPROTO;
  206. } else { /* unknown */
  207. status = -EPROTO;
  208. }
  209. ehci_vdbg (ehci,
  210. "dev%d ep%d%s qtd token %08x --> status %d\n",
  211. usb_pipedevice (urb->pipe),
  212. usb_pipeendpoint (urb->pipe),
  213. usb_pipein (urb->pipe) ? "in" : "out",
  214. token, status);
  215. }
  216. return status;
  217. }
  218. static void
  219. ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
  220. __releases(ehci->lock)
  221. __acquires(ehci->lock)
  222. {
  223. if (likely (urb->hcpriv != NULL)) {
  224. struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
  225. /* S-mask in a QH means it's an interrupt urb */
  226. if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
  227. /* ... update hc-wide periodic stats (for usbfs) */
  228. ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
  229. }
  230. qh_put (qh);
  231. }
  232. if (unlikely(urb->unlinked)) {
  233. COUNT(ehci->stats.unlink);
  234. } else {
  235. /* report non-error and short read status as zero */
  236. if (status == -EINPROGRESS || status == -EREMOTEIO)
  237. status = 0;
  238. COUNT(ehci->stats.complete);
  239. }
  240. #ifdef EHCI_URB_TRACE
  241. ehci_dbg (ehci,
  242. "%s %s urb %p ep%d%s status %d len %d/%d\n",
  243. __func__, urb->dev->devpath, urb,
  244. usb_pipeendpoint (urb->pipe),
  245. usb_pipein (urb->pipe) ? "in" : "out",
  246. status,
  247. urb->actual_length, urb->transfer_buffer_length);
  248. #endif
  249. /* complete() can reenter this HCD */
  250. usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
  251. spin_unlock (&ehci->lock);
  252. usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
  253. spin_lock (&ehci->lock);
  254. }
  255. static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
  256. static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
  257. static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
  258. /*
  259. * Process and free completed qtds for a qh, returning URBs to drivers.
  260. * Chases up to qh->hw_current. Returns number of completions called,
  261. * indicating how much "real" work we did.
  262. */
  263. static unsigned
  264. qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
  265. {
  266. struct ehci_qtd *last, *end = qh->dummy;
  267. struct list_head *entry, *tmp;
  268. int last_status;
  269. int stopped;
  270. unsigned count = 0;
  271. u8 state;
  272. struct ehci_qh_hw *hw = qh->hw;
  273. if (unlikely (list_empty (&qh->qtd_list)))
  274. return count;
  275. /* completions (or tasks on other cpus) must never clobber HALT
  276. * till we've gone through and cleaned everything up, even when
  277. * they add urbs to this qh's queue or mark them for unlinking.
  278. *
  279. * NOTE: unlinking expects to be done in queue order.
  280. *
  281. * It's a bug for qh->qh_state to be anything other than
  282. * QH_STATE_IDLE, unless our caller is scan_async() or
  283. * scan_periodic().
  284. */
  285. state = qh->qh_state;
  286. qh->qh_state = QH_STATE_COMPLETING;
  287. stopped = (state == QH_STATE_IDLE);
  288. rescan:
  289. last = NULL;
  290. last_status = -EINPROGRESS;
  291. qh->needs_rescan = 0;
  292. /* remove de-activated QTDs from front of queue.
  293. * after faults (including short reads), cleanup this urb
  294. * then let the queue advance.
  295. * if queue is stopped, handles unlinks.
  296. */
  297. list_for_each_safe (entry, tmp, &qh->qtd_list) {
  298. struct ehci_qtd *qtd;
  299. struct urb *urb;
  300. u32 token = 0;
  301. qtd = list_entry (entry, struct ehci_qtd, qtd_list);
  302. urb = qtd->urb;
  303. /* clean up any state from previous QTD ...*/
  304. if (last) {
  305. if (likely (last->urb != urb)) {
  306. ehci_urb_done(ehci, last->urb, last_status);
  307. count++;
  308. last_status = -EINPROGRESS;
  309. }
  310. ehci_qtd_free (ehci, last);
  311. last = NULL;
  312. }
  313. /* ignore urbs submitted during completions we reported */
  314. if (qtd == end)
  315. break;
  316. /* hardware copies qtd out of qh overlay */
  317. rmb ();
  318. token = hc32_to_cpu(ehci, qtd->hw_token);
  319. /* always clean up qtds the hc de-activated */
  320. retry_xacterr:
  321. if ((token & QTD_STS_ACTIVE) == 0) {
  322. /* Report Data Buffer Error: non-fatal but useful */
  323. if (token & QTD_STS_DBE)
  324. ehci_dbg(ehci,
  325. "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
  326. urb,
  327. usb_endpoint_num(&urb->ep->desc),
  328. usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
  329. urb->transfer_buffer_length,
  330. qtd,
  331. qh);
  332. /* on STALL, error, and short reads this urb must
  333. * complete and all its qtds must be recycled.
  334. */
  335. if ((token & QTD_STS_HALT) != 0) {
  336. /* retry transaction errors until we
  337. * reach the software xacterr limit
  338. */
  339. if ((token & QTD_STS_XACT) &&
  340. QTD_CERR(token) == 0 &&
  341. ++qh->xacterrs < QH_XACTERR_MAX &&
  342. !urb->unlinked) {
  343. ehci_dbg(ehci,
  344. "detected XactErr len %zu/%zu retry %d\n",
  345. qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
  346. /* reset the token in the qtd and the
  347. * qh overlay (which still contains
  348. * the qtd) so that we pick up from
  349. * where we left off
  350. */
  351. token &= ~QTD_STS_HALT;
  352. token |= QTD_STS_ACTIVE |
  353. (EHCI_TUNE_CERR << 10);
  354. qtd->hw_token = cpu_to_hc32(ehci,
  355. token);
  356. wmb();
  357. hw->hw_token = cpu_to_hc32(ehci,
  358. token);
  359. goto retry_xacterr;
  360. }
  361. stopped = 1;
  362. /* magic dummy for some short reads; qh won't advance.
  363. * that silicon quirk can kick in with this dummy too.
  364. *
  365. * other short reads won't stop the queue, including
  366. * control transfers (status stage handles that) or
  367. * most other single-qtd reads ... the queue stops if
  368. * URB_SHORT_NOT_OK was set so the driver submitting
  369. * the urbs could clean it up.
  370. */
  371. } else if (IS_SHORT_READ (token)
  372. && !(qtd->hw_alt_next
  373. & EHCI_LIST_END(ehci))) {
  374. stopped = 1;
  375. }
  376. /* stop scanning when we reach qtds the hc is using */
  377. } else if (likely (!stopped
  378. && ehci->rh_state == EHCI_RH_RUNNING)) {
  379. break;
  380. /* scan the whole queue for unlinks whenever it stops */
  381. } else {
  382. stopped = 1;
  383. /* cancel everything if we halt, suspend, etc */
  384. if (ehci->rh_state != EHCI_RH_RUNNING)
  385. last_status = -ESHUTDOWN;
  386. /* this qtd is active; skip it unless a previous qtd
  387. * for its urb faulted, or its urb was canceled.
  388. */
  389. else if (last_status == -EINPROGRESS && !urb->unlinked)
  390. continue;
  391. /* qh unlinked; token in overlay may be most current */
  392. if (state == QH_STATE_IDLE
  393. && cpu_to_hc32(ehci, qtd->qtd_dma)
  394. == hw->hw_current) {
  395. token = hc32_to_cpu(ehci, hw->hw_token);
  396. /* An unlink may leave an incomplete
  397. * async transaction in the TT buffer.
  398. * We have to clear it.
  399. */
  400. ehci_clear_tt_buffer(ehci, qh, urb, token);
  401. }
  402. }
  403. /* unless we already know the urb's status, collect qtd status
  404. * and update count of bytes transferred. in common short read
  405. * cases with only one data qtd (including control transfers),
  406. * queue processing won't halt. but with two or more qtds (for
  407. * example, with a 32 KB transfer), when the first qtd gets a
  408. * short read the second must be removed by hand.
  409. */
  410. if (last_status == -EINPROGRESS) {
  411. last_status = qtd_copy_status(ehci, urb,
  412. qtd->length, token);
  413. if (last_status == -EREMOTEIO
  414. && (qtd->hw_alt_next
  415. & EHCI_LIST_END(ehci)))
  416. last_status = -EINPROGRESS;
  417. /* As part of low/full-speed endpoint-halt processing
  418. * we must clear the TT buffer (11.17.5).
  419. */
  420. if (unlikely(last_status != -EINPROGRESS &&
  421. last_status != -EREMOTEIO)) {
  422. /* The TT's in some hubs malfunction when they
  423. * receive this request following a STALL (they
  424. * stop sending isochronous packets). Since a
  425. * STALL can't leave the TT buffer in a busy
  426. * state (if you believe Figures 11-48 - 11-51
  427. * in the USB 2.0 spec), we won't clear the TT
  428. * buffer in this case. Strictly speaking this
  429. * is a violation of the spec.
  430. */
  431. if (last_status != -EPIPE)
  432. ehci_clear_tt_buffer(ehci, qh, urb,
  433. token);
  434. }
  435. }
  436. /* if we're removing something not at the queue head,
  437. * patch the hardware queue pointer.
  438. */
  439. if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
  440. last = list_entry (qtd->qtd_list.prev,
  441. struct ehci_qtd, qtd_list);
  442. last->hw_next = qtd->hw_next;
  443. }
  444. /* remove qtd; it's recycled after possible urb completion */
  445. list_del (&qtd->qtd_list);
  446. last = qtd;
  447. /* reinit the xacterr counter for the next qtd */
  448. qh->xacterrs = 0;
  449. }
  450. /* last urb's completion might still need calling */
  451. if (likely (last != NULL)) {
  452. ehci_urb_done(ehci, last->urb, last_status);
  453. count++;
  454. ehci_qtd_free (ehci, last);
  455. }
  456. /* Do we need to rescan for URBs dequeued during a giveback? */
  457. if (unlikely(qh->needs_rescan)) {
  458. /* If the QH is already unlinked, do the rescan now. */
  459. if (state == QH_STATE_IDLE)
  460. goto rescan;
  461. /* Otherwise we have to wait until the QH is fully unlinked.
  462. * Our caller will start an unlink if qh->needs_rescan is
  463. * set. But if an unlink has already started, nothing needs
  464. * to be done.
  465. */
  466. if (state != QH_STATE_LINKED)
  467. qh->needs_rescan = 0;
  468. }
  469. /* restore original state; caller must unlink or relink */
  470. qh->qh_state = state;
  471. /* be sure the hardware's done with the qh before refreshing
  472. * it after fault cleanup, or recovering from silicon wrongly
  473. * overlaying the dummy qtd (which reduces DMA chatter).
  474. */
  475. if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
  476. switch (state) {
  477. case QH_STATE_IDLE:
  478. qh_refresh(ehci, qh);
  479. break;
  480. case QH_STATE_LINKED:
  481. /* We won't refresh a QH that's linked (after the HC
  482. * stopped the queue). That avoids a race:
  483. * - HC reads first part of QH;
  484. * - CPU updates that first part and the token;
  485. * - HC reads rest of that QH, including token
  486. * Result: HC gets an inconsistent image, and then
  487. * DMAs to/from the wrong memory (corrupting it).
  488. *
  489. * That should be rare for interrupt transfers,
  490. * except maybe high bandwidth ...
  491. */
  492. /* Tell the caller to start an unlink */
  493. qh->needs_rescan = 1;
  494. break;
  495. /* otherwise, unlink already started */
  496. }
  497. }
  498. return count;
  499. }
  500. /*-------------------------------------------------------------------------*/
  501. // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
  502. #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
  503. // ... and packet size, for any kind of endpoint descriptor
  504. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  505. /*
  506. * reverse of qh_urb_transaction: free a list of TDs.
  507. * used for cleanup after errors, before HC sees an URB's TDs.
  508. */
  509. static void qtd_list_free (
  510. struct ehci_hcd *ehci,
  511. struct urb *urb,
  512. struct list_head *qtd_list
  513. ) {
  514. struct list_head *entry, *temp;
  515. list_for_each_safe (entry, temp, qtd_list) {
  516. struct ehci_qtd *qtd;
  517. qtd = list_entry (entry, struct ehci_qtd, qtd_list);
  518. list_del (&qtd->qtd_list);
  519. ehci_qtd_free (ehci, qtd);
  520. }
  521. }
  522. /*
  523. * create a list of filled qtds for this URB; won't link into qh.
  524. */
  525. static struct list_head *
  526. qh_urb_transaction (
  527. struct ehci_hcd *ehci,
  528. struct urb *urb,
  529. struct list_head *head,
  530. gfp_t flags
  531. ) {
  532. struct ehci_qtd *qtd, *qtd_prev;
  533. dma_addr_t buf;
  534. int len, this_sg_len, maxpacket;
  535. int is_input;
  536. u32 token;
  537. int i;
  538. struct scatterlist *sg;
  539. /*
  540. * URBs map to sequences of QTDs: one logical transaction
  541. */
  542. qtd = ehci_qtd_alloc (ehci, flags);
  543. if (unlikely (!qtd))
  544. return NULL;
  545. list_add_tail (&qtd->qtd_list, head);
  546. qtd->urb = urb;
  547. token = QTD_STS_ACTIVE;
  548. token |= (EHCI_TUNE_CERR << 10);
  549. /* for split transactions, SplitXState initialized to zero */
  550. len = urb->transfer_buffer_length;
  551. is_input = usb_pipein (urb->pipe);
  552. if (usb_pipecontrol (urb->pipe)) {
  553. /* SETUP pid */
  554. qtd_fill(ehci, qtd, urb->setup_dma,
  555. sizeof (struct usb_ctrlrequest),
  556. token | (2 /* "setup" */ << 8), 8);
  557. /* ... and always at least one more pid */
  558. token ^= QTD_TOGGLE;
  559. qtd_prev = qtd;
  560. qtd = ehci_qtd_alloc (ehci, flags);
  561. if (unlikely (!qtd))
  562. goto cleanup;
  563. qtd->urb = urb;
  564. qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
  565. list_add_tail (&qtd->qtd_list, head);
  566. /* for zero length DATA stages, STATUS is always IN */
  567. if (len == 0)
  568. token |= (1 /* "in" */ << 8);
  569. }
  570. /*
  571. * data transfer stage: buffer setup
  572. */
  573. i = urb->num_mapped_sgs;
  574. if (len > 0 && i > 0) {
  575. sg = urb->sg;
  576. buf = sg_dma_address(sg);
  577. /* urb->transfer_buffer_length may be smaller than the
  578. * size of the scatterlist (or vice versa)
  579. */
  580. this_sg_len = min_t(int, sg_dma_len(sg), len);
  581. } else {
  582. sg = NULL;
  583. buf = urb->transfer_dma;
  584. this_sg_len = len;
  585. }
  586. if (is_input)
  587. token |= (1 /* "in" */ << 8);
  588. /* else it's already initted to "out" pid (0 << 8) */
  589. maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
  590. /*
  591. * buffer gets wrapped in one or more qtds;
  592. * last one may be "short" (including zero len)
  593. * and may serve as a control status ack
  594. */
  595. for (;;) {
  596. int this_qtd_len;
  597. this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
  598. maxpacket);
  599. this_sg_len -= this_qtd_len;
  600. len -= this_qtd_len;
  601. buf += this_qtd_len;
  602. /*
  603. * short reads advance to a "magic" dummy instead of the next
  604. * qtd ... that forces the queue to stop, for manual cleanup.
  605. * (this will usually be overridden later.)
  606. */
  607. if (is_input)
  608. qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
  609. /* qh makes control packets use qtd toggle; maybe switch it */
  610. if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
  611. token ^= QTD_TOGGLE;
  612. if (likely(this_sg_len <= 0)) {
  613. if (--i <= 0 || len <= 0)
  614. break;
  615. sg = sg_next(sg);
  616. buf = sg_dma_address(sg);
  617. this_sg_len = min_t(int, sg_dma_len(sg), len);
  618. }
  619. qtd_prev = qtd;
  620. qtd = ehci_qtd_alloc (ehci, flags);
  621. if (unlikely (!qtd))
  622. goto cleanup;
  623. qtd->urb = urb;
  624. qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
  625. list_add_tail (&qtd->qtd_list, head);
  626. }
  627. /*
  628. * unless the caller requires manual cleanup after short reads,
  629. * have the alt_next mechanism keep the queue running after the
  630. * last data qtd (the only one, for control and most other cases).
  631. */
  632. if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
  633. || usb_pipecontrol (urb->pipe)))
  634. qtd->hw_alt_next = EHCI_LIST_END(ehci);
  635. /*
  636. * control requests may need a terminating data "status" ack;
  637. * other OUT ones may need a terminating short packet
  638. * (zero length).
  639. */
  640. if (likely (urb->transfer_buffer_length != 0)) {
  641. int one_more = 0;
  642. if (usb_pipecontrol (urb->pipe)) {
  643. one_more = 1;
  644. token ^= 0x0100; /* "in" <--> "out" */
  645. token |= QTD_TOGGLE; /* force DATA1 */
  646. } else if (usb_pipeout(urb->pipe)
  647. && (urb->transfer_flags & URB_ZERO_PACKET)
  648. && !(urb->transfer_buffer_length % maxpacket)) {
  649. one_more = 1;
  650. }
  651. if (one_more) {
  652. qtd_prev = qtd;
  653. qtd = ehci_qtd_alloc (ehci, flags);
  654. if (unlikely (!qtd))
  655. goto cleanup;
  656. qtd->urb = urb;
  657. qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
  658. list_add_tail (&qtd->qtd_list, head);
  659. /* never any data in such packets */
  660. qtd_fill(ehci, qtd, 0, 0, token, 0);
  661. }
  662. }
  663. /* by default, enable interrupt on urb completion */
  664. if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
  665. qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
  666. return head;
  667. cleanup:
  668. qtd_list_free (ehci, urb, head);
  669. return NULL;
  670. }
  671. /*-------------------------------------------------------------------------*/
  672. // Would be best to create all qh's from config descriptors,
  673. // when each interface/altsetting is established. Unlink
  674. // any previous qh and cancel its urbs first; endpoints are
  675. // implicitly reset then (data toggle too).
  676. // That'd mean updating how usbcore talks to HCDs. (2.7?)
  677. /*
  678. * Each QH holds a qtd list; a QH is used for everything except iso.
  679. *
  680. * For interrupt urbs, the scheduler must set the microframe scheduling
  681. * mask(s) each time the QH gets scheduled. For highspeed, that's
  682. * just one microframe in the s-mask. For split interrupt transactions
  683. * there are additional complications: c-mask, maybe FSTNs.
  684. */
  685. static struct ehci_qh *
  686. qh_make (
  687. struct ehci_hcd *ehci,
  688. struct urb *urb,
  689. gfp_t flags
  690. ) {
  691. struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
  692. u32 info1 = 0, info2 = 0;
  693. int is_input, type;
  694. int maxp = 0;
  695. struct usb_tt *tt = urb->dev->tt;
  696. struct ehci_qh_hw *hw;
  697. if (!qh)
  698. return qh;
  699. /*
  700. * init endpoint/device data for this QH
  701. */
  702. info1 |= usb_pipeendpoint (urb->pipe) << 8;
  703. info1 |= usb_pipedevice (urb->pipe) << 0;
  704. is_input = usb_pipein (urb->pipe);
  705. type = usb_pipetype (urb->pipe);
  706. maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
  707. /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
  708. * acts like up to 3KB, but is built from smaller packets.
  709. */
  710. if (max_packet(maxp) > 1024) {
  711. ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
  712. goto done;
  713. }
  714. /* Compute interrupt scheduling parameters just once, and save.
  715. * - allowing for high bandwidth, how many nsec/uframe are used?
  716. * - split transactions need a second CSPLIT uframe; same question
  717. * - splits also need a schedule gap (for full/low speed I/O)
  718. * - qh has a polling interval
  719. *
  720. * For control/bulk requests, the HC or TT handles these.
  721. */
  722. if (type == PIPE_INTERRUPT) {
  723. qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
  724. is_input, 0,
  725. hb_mult(maxp) * max_packet(maxp)));
  726. qh->start = NO_FRAME;
  727. qh->stamp = ehci->periodic_stamp;
  728. if (urb->dev->speed == USB_SPEED_HIGH) {
  729. qh->c_usecs = 0;
  730. qh->gap_uf = 0;
  731. qh->period = urb->interval >> 3;
  732. if (qh->period == 0 && urb->interval != 1) {
  733. /* NOTE interval 2 or 4 uframes could work.
  734. * But interval 1 scheduling is simpler, and
  735. * includes high bandwidth.
  736. */
  737. urb->interval = 1;
  738. } else if (qh->period > ehci->periodic_size) {
  739. qh->period = ehci->periodic_size;
  740. urb->interval = qh->period << 3;
  741. }
  742. } else {
  743. int think_time;
  744. /* gap is f(FS/LS transfer times) */
  745. qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
  746. is_input, 0, maxp) / (125 * 1000);
  747. /* FIXME this just approximates SPLIT/CSPLIT times */
  748. if (is_input) { // SPLIT, gap, CSPLIT+DATA
  749. qh->c_usecs = qh->usecs + HS_USECS (0);
  750. qh->usecs = HS_USECS (1);
  751. } else { // SPLIT+DATA, gap, CSPLIT
  752. qh->usecs += HS_USECS (1);
  753. qh->c_usecs = HS_USECS (0);
  754. }
  755. think_time = tt ? tt->think_time : 0;
  756. qh->tt_usecs = NS_TO_US (think_time +
  757. usb_calc_bus_time (urb->dev->speed,
  758. is_input, 0, max_packet (maxp)));
  759. qh->period = urb->interval;
  760. if (qh->period > ehci->periodic_size) {
  761. qh->period = ehci->periodic_size;
  762. urb->interval = qh->period;
  763. }
  764. }
  765. }
  766. /* support for tt scheduling, and access to toggles */
  767. qh->dev = urb->dev;
  768. /* using TT? */
  769. switch (urb->dev->speed) {
  770. case USB_SPEED_LOW:
  771. info1 |= (1 << 12); /* EPS "low" */
  772. /* FALL THROUGH */
  773. case USB_SPEED_FULL:
  774. /* EPS 0 means "full" */
  775. if (type != PIPE_INTERRUPT)
  776. info1 |= (EHCI_TUNE_RL_TT << 28);
  777. if (type == PIPE_CONTROL) {
  778. info1 |= (1 << 27); /* for TT */
  779. info1 |= 1 << 14; /* toggle from qtd */
  780. }
  781. info1 |= maxp << 16;
  782. info2 |= (EHCI_TUNE_MULT_TT << 30);
  783. /* Some Freescale processors have an erratum in which the
  784. * port number in the queue head was 0..N-1 instead of 1..N.
  785. */
  786. if (ehci_has_fsl_portno_bug(ehci))
  787. info2 |= (urb->dev->ttport-1) << 23;
  788. else
  789. info2 |= urb->dev->ttport << 23;
  790. /* set the address of the TT; for TDI's integrated
  791. * root hub tt, leave it zeroed.
  792. */
  793. if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
  794. info2 |= tt->hub->devnum << 16;
  795. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
  796. break;
  797. case USB_SPEED_HIGH: /* no TT involved */
  798. info1 |= (2 << 12); /* EPS "high" */
  799. if (type == PIPE_CONTROL) {
  800. info1 |= (EHCI_TUNE_RL_HS << 28);
  801. info1 |= 64 << 16; /* usb2 fixed maxpacket */
  802. info1 |= 1 << 14; /* toggle from qtd */
  803. info2 |= (EHCI_TUNE_MULT_HS << 30);
  804. } else if (type == PIPE_BULK) {
  805. info1 |= (EHCI_TUNE_RL_HS << 28);
  806. /* The USB spec says that high speed bulk endpoints
  807. * always use 512 byte maxpacket. But some device
  808. * vendors decided to ignore that, and MSFT is happy
  809. * to help them do so. So now people expect to use
  810. * such nonconformant devices with Linux too; sigh.
  811. */
  812. info1 |= max_packet(maxp) << 16;
  813. info2 |= (EHCI_TUNE_MULT_HS << 30);
  814. } else { /* PIPE_INTERRUPT */
  815. info1 |= max_packet (maxp) << 16;
  816. info2 |= hb_mult (maxp) << 30;
  817. }
  818. break;
  819. default:
  820. dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
  821. done:
  822. qh_put (qh);
  823. return NULL;
  824. }
  825. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
  826. /* init as live, toggle clear, advance to dummy */
  827. qh->qh_state = QH_STATE_IDLE;
  828. hw = qh->hw;
  829. hw->hw_info1 = cpu_to_hc32(ehci, info1);
  830. hw->hw_info2 = cpu_to_hc32(ehci, info2);
  831. qh->is_out = !is_input;
  832. usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
  833. qh_refresh (ehci, qh);
  834. return qh;
  835. }
  836. /*-------------------------------------------------------------------------*/
  837. /* move qh (and its qtds) onto async queue; maybe enable queue. */
  838. static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
  839. {
  840. __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
  841. struct ehci_qh *head;
  842. /* Don't link a QH if there's a Clear-TT-Buffer pending */
  843. if (unlikely(qh->clearing_tt))
  844. return;
  845. WARN_ON(qh->qh_state != QH_STATE_IDLE);
  846. /* (re)start the async schedule? */
  847. head = ehci->async;
  848. timer_action_done (ehci, TIMER_ASYNC_OFF);
  849. if (!head->qh_next.qh) {
  850. u32 cmd = ehci_readl(ehci, &ehci->regs->command);
  851. if (!(cmd & CMD_ASE)) {
  852. /* in case a clear of CMD_ASE didn't take yet */
  853. (void)handshake(ehci, &ehci->regs->status,
  854. STS_ASS, 0, 150);
  855. cmd |= CMD_ASE;
  856. ehci_writel(ehci, cmd, &ehci->regs->command);
  857. /* posted write need not be known to HC yet ... */
  858. }
  859. }
  860. /* clear halt and/or toggle; and maybe recover from silicon quirk */
  861. qh_refresh(ehci, qh);
  862. /* splice right after start */
  863. qh->qh_next = head->qh_next;
  864. qh->hw->hw_next = head->hw->hw_next;
  865. wmb ();
  866. head->qh_next.qh = qh;
  867. head->hw->hw_next = dma;
  868. qh_get(qh);
  869. qh->xacterrs = 0;
  870. qh->qh_state = QH_STATE_LINKED;
  871. /* qtd completions reported later by interrupt */
  872. }
  873. /*-------------------------------------------------------------------------*/
  874. /*
  875. * For control/bulk/interrupt, return QH with these TDs appended.
  876. * Allocates and initializes the QH if necessary.
  877. * Returns null if it can't allocate a QH it needs to.
  878. * If the QH has TDs (urbs) already, that's great.
  879. */
  880. static struct ehci_qh *qh_append_tds (
  881. struct ehci_hcd *ehci,
  882. struct urb *urb,
  883. struct list_head *qtd_list,
  884. int epnum,
  885. void **ptr
  886. )
  887. {
  888. struct ehci_qh *qh = NULL;
  889. __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
  890. qh = (struct ehci_qh *) *ptr;
  891. if (unlikely (qh == NULL)) {
  892. /* can't sleep here, we have ehci->lock... */
  893. qh = qh_make (ehci, urb, GFP_ATOMIC);
  894. *ptr = qh;
  895. }
  896. if (likely (qh != NULL)) {
  897. struct ehci_qtd *qtd;
  898. if (unlikely (list_empty (qtd_list)))
  899. qtd = NULL;
  900. else
  901. qtd = list_entry (qtd_list->next, struct ehci_qtd,
  902. qtd_list);
  903. /* control qh may need patching ... */
  904. if (unlikely (epnum == 0)) {
  905. /* usb_reset_device() briefly reverts to address 0 */
  906. if (usb_pipedevice (urb->pipe) == 0)
  907. qh->hw->hw_info1 &= ~qh_addr_mask;
  908. }
  909. /* just one way to queue requests: swap with the dummy qtd.
  910. * only hc or qh_refresh() ever modify the overlay.
  911. */
  912. if (likely (qtd != NULL)) {
  913. struct ehci_qtd *dummy;
  914. dma_addr_t dma;
  915. __hc32 token;
  916. /* to avoid racing the HC, use the dummy td instead of
  917. * the first td of our list (becomes new dummy). both
  918. * tds stay deactivated until we're done, when the
  919. * HC is allowed to fetch the old dummy (4.10.2).
  920. */
  921. token = qtd->hw_token;
  922. qtd->hw_token = HALT_BIT(ehci);
  923. dummy = qh->dummy;
  924. dma = dummy->qtd_dma;
  925. *dummy = *qtd;
  926. dummy->qtd_dma = dma;
  927. list_del (&qtd->qtd_list);
  928. list_add (&dummy->qtd_list, qtd_list);
  929. list_splice_tail(qtd_list, &qh->qtd_list);
  930. ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
  931. qh->dummy = qtd;
  932. /* hc must see the new dummy at list end */
  933. dma = qtd->qtd_dma;
  934. qtd = list_entry (qh->qtd_list.prev,
  935. struct ehci_qtd, qtd_list);
  936. qtd->hw_next = QTD_NEXT(ehci, dma);
  937. /* let the hc process these next qtds */
  938. wmb ();
  939. dummy->hw_token = token;
  940. urb->hcpriv = qh_get (qh);
  941. }
  942. }
  943. return qh;
  944. }
  945. /*-------------------------------------------------------------------------*/
  946. static int
  947. submit_async (
  948. struct ehci_hcd *ehci,
  949. struct urb *urb,
  950. struct list_head *qtd_list,
  951. gfp_t mem_flags
  952. ) {
  953. int epnum;
  954. unsigned long flags;
  955. struct ehci_qh *qh = NULL;
  956. int rc;
  957. epnum = urb->ep->desc.bEndpointAddress;
  958. #ifdef EHCI_URB_TRACE
  959. {
  960. struct ehci_qtd *qtd;
  961. qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
  962. ehci_dbg(ehci,
  963. "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
  964. __func__, urb->dev->devpath, urb,
  965. epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
  966. urb->transfer_buffer_length,
  967. qtd, urb->ep->hcpriv);
  968. }
  969. #endif
  970. spin_lock_irqsave (&ehci->lock, flags);
  971. if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
  972. rc = -ESHUTDOWN;
  973. goto done;
  974. }
  975. rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
  976. if (unlikely(rc))
  977. goto done;
  978. qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
  979. if (unlikely(qh == NULL)) {
  980. usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
  981. rc = -ENOMEM;
  982. goto done;
  983. }
  984. /* Control/bulk operations through TTs don't need scheduling,
  985. * the HC and TT handle it when the TT has a buffer ready.
  986. */
  987. if (likely (qh->qh_state == QH_STATE_IDLE))
  988. qh_link_async(ehci, qh);
  989. done:
  990. spin_unlock_irqrestore (&ehci->lock, flags);
  991. if (unlikely (qh == NULL))
  992. qtd_list_free (ehci, urb, qtd_list);
  993. return rc;
  994. }
  995. /*-------------------------------------------------------------------------*/
  996. /* the async qh for the qtds being reclaimed are now unlinked from the HC */
  997. static void end_unlink_async (struct ehci_hcd *ehci)
  998. {
  999. struct ehci_qh *qh = ehci->reclaim;
  1000. struct ehci_qh *next;
  1001. iaa_watchdog_done(ehci);
  1002. // qh->hw_next = cpu_to_hc32(qh->qh_dma);
  1003. qh->qh_state = QH_STATE_IDLE;
  1004. qh->qh_next.qh = NULL;
  1005. qh_put (qh); // refcount from reclaim
  1006. /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
  1007. next = qh->reclaim;
  1008. ehci->reclaim = next;
  1009. qh->reclaim = NULL;
  1010. qh_completions (ehci, qh);
  1011. if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
  1012. qh_link_async (ehci, qh);
  1013. } else {
  1014. /* it's not free to turn the async schedule on/off; leave it
  1015. * active but idle for a while once it empties.
  1016. */
  1017. if (ehci->rh_state == EHCI_RH_RUNNING
  1018. && ehci->async->qh_next.qh == NULL)
  1019. timer_action (ehci, TIMER_ASYNC_OFF);
  1020. }
  1021. qh_put(qh); /* refcount from async list */
  1022. if (next) {
  1023. ehci->reclaim = NULL;
  1024. start_unlink_async (ehci, next);
  1025. }
  1026. if (ehci->has_synopsys_hc_bug)
  1027. ehci_writel(ehci, (u32) ehci->async->qh_dma,
  1028. &ehci->regs->async_next);
  1029. }
  1030. /* makes sure the async qh will become idle */
  1031. /* caller must own ehci->lock */
  1032. static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
  1033. {
  1034. int cmd = ehci_readl(ehci, &ehci->regs->command);
  1035. struct ehci_qh *prev;
  1036. #ifdef DEBUG
  1037. assert_spin_locked(&ehci->lock);
  1038. if (ehci->reclaim
  1039. || (qh->qh_state != QH_STATE_LINKED
  1040. && qh->qh_state != QH_STATE_UNLINK_WAIT)
  1041. )
  1042. BUG ();
  1043. #endif
  1044. /* stop async schedule right now? */
  1045. if (unlikely (qh == ehci->async)) {
  1046. /* can't get here without STS_ASS set */
  1047. if (ehci->rh_state != EHCI_RH_HALTED
  1048. && !ehci->reclaim) {
  1049. /* ... and CMD_IAAD clear */
  1050. ehci_writel(ehci, cmd & ~CMD_ASE,
  1051. &ehci->regs->command);
  1052. wmb ();
  1053. // handshake later, if we need to
  1054. timer_action_done (ehci, TIMER_ASYNC_OFF);
  1055. }
  1056. return;
  1057. }
  1058. qh->qh_state = QH_STATE_UNLINK;
  1059. ehci->reclaim = qh = qh_get (qh);
  1060. prev = ehci->async;
  1061. while (prev->qh_next.qh != qh)
  1062. prev = prev->qh_next.qh;
  1063. prev->hw->hw_next = qh->hw->hw_next;
  1064. prev->qh_next = qh->qh_next;
  1065. if (ehci->qh_scan_next == qh)
  1066. ehci->qh_scan_next = qh->qh_next.qh;
  1067. wmb ();
  1068. /* If the controller isn't running, we don't have to wait for it */
  1069. if (unlikely(ehci->rh_state != EHCI_RH_RUNNING)) {
  1070. /* if (unlikely (qh->reclaim != 0))
  1071. * this will recurse, probably not much
  1072. */
  1073. end_unlink_async (ehci);
  1074. return;
  1075. }
  1076. cmd |= CMD_IAAD;
  1077. ehci_writel(ehci, cmd, &ehci->regs->command);
  1078. (void)ehci_readl(ehci, &ehci->regs->command);
  1079. iaa_watchdog_start(ehci);
  1080. }
  1081. /*-------------------------------------------------------------------------*/
  1082. static void scan_async (struct ehci_hcd *ehci)
  1083. {
  1084. bool stopped;
  1085. struct ehci_qh *qh;
  1086. enum ehci_timer_action action = TIMER_IO_WATCHDOG;
  1087. timer_action_done (ehci, TIMER_ASYNC_SHRINK);
  1088. stopped = (ehci->rh_state != EHCI_RH_RUNNING);
  1089. ehci->qh_scan_next = ehci->async->qh_next.qh;
  1090. while (ehci->qh_scan_next) {
  1091. qh = ehci->qh_scan_next;
  1092. ehci->qh_scan_next = qh->qh_next.qh;
  1093. rescan:
  1094. /* clean any finished work for this qh */
  1095. if (!list_empty(&qh->qtd_list)) {
  1096. int temp;
  1097. /*
  1098. * Unlinks could happen here; completion reporting
  1099. * drops the lock. That's why ehci->qh_scan_next
  1100. * always holds the next qh to scan; if the next qh
  1101. * gets unlinked then ehci->qh_scan_next is adjusted
  1102. * in start_unlink_async().
  1103. */
  1104. qh = qh_get(qh);
  1105. temp = qh_completions(ehci, qh);
  1106. if (qh->needs_rescan)
  1107. unlink_async(ehci, qh);
  1108. qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
  1109. qh_put(qh);
  1110. if (temp != 0)
  1111. goto rescan;
  1112. }
  1113. /* unlink idle entries, reducing DMA usage as well
  1114. * as HCD schedule-scanning costs. delay for any qh
  1115. * we just scanned, there's a not-unusual case that it
  1116. * doesn't stay idle for long.
  1117. * (plus, avoids some kind of re-activation race.)
  1118. */
  1119. if (list_empty(&qh->qtd_list)
  1120. && qh->qh_state == QH_STATE_LINKED) {
  1121. if (!ehci->reclaim && (stopped ||
  1122. time_after_eq(jiffies, qh->unlink_time)))
  1123. start_unlink_async(ehci, qh);
  1124. else
  1125. action = TIMER_ASYNC_SHRINK;
  1126. }
  1127. }
  1128. if (action == TIMER_ASYNC_SHRINK)
  1129. timer_action (ehci, TIMER_ASYNC_SHRINK);
  1130. }