ehci-sched.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000
  1. /*
  2. * Copyright (c) 2001-2004 by David Brownell
  3. * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. * for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software Foundation,
  17. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. /* this file is part of ehci-hcd.c */
  20. /*-------------------------------------------------------------------------*/
  21. /*
  22. * EHCI scheduled transaction support: interrupt, iso, split iso
  23. * These are called "periodic" transactions in the EHCI spec.
  24. *
  25. * Note that for interrupt transfers, the QH/QTD manipulation is shared
  26. * with the "asynchronous" transaction support (control/bulk transfers).
  27. * The only real difference is in how interrupt transfers are scheduled.
  28. *
  29. * For ISO, we make an "iso_stream" head to serve the same role as a QH.
  30. * It keeps track of every ITD (or SITD) that's linked, and holds enough
  31. * pre-calculated schedule data to make appending to the queue be quick.
  32. */
  33. static int ehci_get_frame (struct usb_hcd *hcd);
  34. /*-------------------------------------------------------------------------*/
  35. /*
  36. * periodic_next_shadow - return "next" pointer on shadow list
  37. * @periodic: host pointer to qh/itd/sitd
  38. * @tag: hardware tag for type of this record
  39. */
  40. static union ehci_shadow *
  41. periodic_next_shadow (union ehci_shadow *periodic, __le32 tag)
  42. {
  43. switch (tag) {
  44. case Q_TYPE_QH:
  45. return &periodic->qh->qh_next;
  46. case Q_TYPE_FSTN:
  47. return &periodic->fstn->fstn_next;
  48. case Q_TYPE_ITD:
  49. return &periodic->itd->itd_next;
  50. // case Q_TYPE_SITD:
  51. default:
  52. return &periodic->sitd->sitd_next;
  53. }
  54. }
  55. /* caller must hold ehci->lock */
  56. static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
  57. {
  58. union ehci_shadow *prev_p = &ehci->pshadow [frame];
  59. __le32 *hw_p = &ehci->periodic [frame];
  60. union ehci_shadow here = *prev_p;
  61. /* find predecessor of "ptr"; hw and shadow lists are in sync */
  62. while (here.ptr && here.ptr != ptr) {
  63. prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
  64. hw_p = here.hw_next;
  65. here = *prev_p;
  66. }
  67. /* an interrupt entry (at list end) could have been shared */
  68. if (!here.ptr)
  69. return;
  70. /* update shadow and hardware lists ... the old "next" pointers
  71. * from ptr may still be in use, the caller updates them.
  72. */
  73. *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
  74. *hw_p = *here.hw_next;
  75. }
  76. /* how many of the uframe's 125 usecs are allocated? */
  77. static unsigned short
  78. periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
  79. {
  80. __le32 *hw_p = &ehci->periodic [frame];
  81. union ehci_shadow *q = &ehci->pshadow [frame];
  82. unsigned usecs = 0;
  83. while (q->ptr) {
  84. switch (Q_NEXT_TYPE (*hw_p)) {
  85. case Q_TYPE_QH:
  86. /* is it in the S-mask? */
  87. if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
  88. usecs += q->qh->usecs;
  89. /* ... or C-mask? */
  90. if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
  91. usecs += q->qh->c_usecs;
  92. hw_p = &q->qh->hw_next;
  93. q = &q->qh->qh_next;
  94. break;
  95. // case Q_TYPE_FSTN:
  96. default:
  97. /* for "save place" FSTNs, count the relevant INTR
  98. * bandwidth from the previous frame
  99. */
  100. if (q->fstn->hw_prev != EHCI_LIST_END) {
  101. ehci_dbg (ehci, "ignoring FSTN cost ...\n");
  102. }
  103. hw_p = &q->fstn->hw_next;
  104. q = &q->fstn->fstn_next;
  105. break;
  106. case Q_TYPE_ITD:
  107. usecs += q->itd->usecs [uframe];
  108. hw_p = &q->itd->hw_next;
  109. q = &q->itd->itd_next;
  110. break;
  111. case Q_TYPE_SITD:
  112. /* is it in the S-mask? (count SPLIT, DATA) */
  113. if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
  114. if (q->sitd->hw_fullspeed_ep &
  115. __constant_cpu_to_le32 (1<<31))
  116. usecs += q->sitd->stream->usecs;
  117. else /* worst case for OUT start-split */
  118. usecs += HS_USECS_ISO (188);
  119. }
  120. /* ... C-mask? (count CSPLIT, DATA) */
  121. if (q->sitd->hw_uframe &
  122. cpu_to_le32 (1 << (8 + uframe))) {
  123. /* worst case for IN complete-split */
  124. usecs += q->sitd->stream->c_usecs;
  125. }
  126. hw_p = &q->sitd->hw_next;
  127. q = &q->sitd->sitd_next;
  128. break;
  129. }
  130. }
  131. #ifdef DEBUG
  132. if (usecs > 100)
  133. ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
  134. frame * 8 + uframe, usecs);
  135. #endif
  136. return usecs;
  137. }
  138. /*-------------------------------------------------------------------------*/
  139. static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
  140. {
  141. if (!dev1->tt || !dev2->tt)
  142. return 0;
  143. if (dev1->tt != dev2->tt)
  144. return 0;
  145. if (dev1->tt->multi)
  146. return dev1->ttport == dev2->ttport;
  147. else
  148. return 1;
  149. }
  150. /* return true iff the device's transaction translator is available
  151. * for a periodic transfer starting at the specified frame, using
  152. * all the uframes in the mask.
  153. */
  154. static int tt_no_collision (
  155. struct ehci_hcd *ehci,
  156. unsigned period,
  157. struct usb_device *dev,
  158. unsigned frame,
  159. u32 uf_mask
  160. )
  161. {
  162. if (period == 0) /* error */
  163. return 0;
  164. /* note bandwidth wastage: split never follows csplit
  165. * (different dev or endpoint) until the next uframe.
  166. * calling convention doesn't make that distinction.
  167. */
  168. for (; frame < ehci->periodic_size; frame += period) {
  169. union ehci_shadow here;
  170. __le32 type;
  171. here = ehci->pshadow [frame];
  172. type = Q_NEXT_TYPE (ehci->periodic [frame]);
  173. while (here.ptr) {
  174. switch (type) {
  175. case Q_TYPE_ITD:
  176. type = Q_NEXT_TYPE (here.itd->hw_next);
  177. here = here.itd->itd_next;
  178. continue;
  179. case Q_TYPE_QH:
  180. if (same_tt (dev, here.qh->dev)) {
  181. u32 mask;
  182. mask = le32_to_cpu (here.qh->hw_info2);
  183. /* "knows" no gap is needed */
  184. mask |= mask >> 8;
  185. if (mask & uf_mask)
  186. break;
  187. }
  188. type = Q_NEXT_TYPE (here.qh->hw_next);
  189. here = here.qh->qh_next;
  190. continue;
  191. case Q_TYPE_SITD:
  192. if (same_tt (dev, here.sitd->urb->dev)) {
  193. u16 mask;
  194. mask = le32_to_cpu (here.sitd
  195. ->hw_uframe);
  196. /* FIXME assumes no gap for IN! */
  197. mask |= mask >> 8;
  198. if (mask & uf_mask)
  199. break;
  200. }
  201. type = Q_NEXT_TYPE (here.sitd->hw_next);
  202. here = here.sitd->sitd_next;
  203. continue;
  204. // case Q_TYPE_FSTN:
  205. default:
  206. ehci_dbg (ehci,
  207. "periodic frame %d bogus type %d\n",
  208. frame, type);
  209. }
  210. /* collision or error */
  211. return 0;
  212. }
  213. }
  214. /* no collision */
  215. return 1;
  216. }
  217. /*-------------------------------------------------------------------------*/
  218. static int enable_periodic (struct ehci_hcd *ehci)
  219. {
  220. u32 cmd;
  221. int status;
  222. /* did clearing PSE did take effect yet?
  223. * takes effect only at frame boundaries...
  224. */
  225. status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
  226. if (status != 0) {
  227. ehci_to_hcd(ehci)->state = HC_STATE_HALT;
  228. return status;
  229. }
  230. cmd = readl (&ehci->regs->command) | CMD_PSE;
  231. writel (cmd, &ehci->regs->command);
  232. /* posted write ... PSS happens later */
  233. ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
  234. /* make sure ehci_work scans these */
  235. ehci->next_uframe = readl (&ehci->regs->frame_index)
  236. % (ehci->periodic_size << 3);
  237. return 0;
  238. }
  239. static int disable_periodic (struct ehci_hcd *ehci)
  240. {
  241. u32 cmd;
  242. int status;
  243. /* did setting PSE not take effect yet?
  244. * takes effect only at frame boundaries...
  245. */
  246. status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
  247. if (status != 0) {
  248. ehci_to_hcd(ehci)->state = HC_STATE_HALT;
  249. return status;
  250. }
  251. cmd = readl (&ehci->regs->command) & ~CMD_PSE;
  252. writel (cmd, &ehci->regs->command);
  253. /* posted write ... */
  254. ehci->next_uframe = -1;
  255. return 0;
  256. }
  257. /*-------------------------------------------------------------------------*/
  258. /* periodic schedule slots have iso tds (normal or split) first, then a
  259. * sparse tree for active interrupt transfers.
  260. *
  261. * this just links in a qh; caller guarantees uframe masks are set right.
  262. * no FSTN support (yet; ehci 0.96+)
  263. */
  264. static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
  265. {
  266. unsigned i;
  267. unsigned period = qh->period;
  268. dev_dbg (&qh->dev->dev,
  269. "link qh%d-%04x/%p start %d [%d/%d us]\n",
  270. period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  271. qh, qh->start, qh->usecs, qh->c_usecs);
  272. /* high bandwidth, or otherwise every microframe */
  273. if (period == 0)
  274. period = 1;
  275. for (i = qh->start; i < ehci->periodic_size; i += period) {
  276. union ehci_shadow *prev = &ehci->pshadow [i];
  277. __le32 *hw_p = &ehci->periodic [i];
  278. union ehci_shadow here = *prev;
  279. __le32 type = 0;
  280. /* skip the iso nodes at list head */
  281. while (here.ptr) {
  282. type = Q_NEXT_TYPE (*hw_p);
  283. if (type == Q_TYPE_QH)
  284. break;
  285. prev = periodic_next_shadow (prev, type);
  286. hw_p = &here.qh->hw_next;
  287. here = *prev;
  288. }
  289. /* sorting each branch by period (slow-->fast)
  290. * enables sharing interior tree nodes
  291. */
  292. while (here.ptr && qh != here.qh) {
  293. if (qh->period > here.qh->period)
  294. break;
  295. prev = &here.qh->qh_next;
  296. hw_p = &here.qh->hw_next;
  297. here = *prev;
  298. }
  299. /* link in this qh, unless some earlier pass did that */
  300. if (qh != here.qh) {
  301. qh->qh_next = here;
  302. if (here.qh)
  303. qh->hw_next = *hw_p;
  304. wmb ();
  305. prev->qh = qh;
  306. *hw_p = QH_NEXT (qh->qh_dma);
  307. }
  308. }
  309. qh->qh_state = QH_STATE_LINKED;
  310. qh_get (qh);
  311. /* update per-qh bandwidth for usbfs */
  312. ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
  313. ? ((qh->usecs + qh->c_usecs) / qh->period)
  314. : (qh->usecs * 8);
  315. /* maybe enable periodic schedule processing */
  316. if (!ehci->periodic_sched++)
  317. return enable_periodic (ehci);
  318. return 0;
  319. }
  320. static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
  321. {
  322. unsigned i;
  323. unsigned period;
  324. // FIXME:
  325. // IF this isn't high speed
  326. // and this qh is active in the current uframe
  327. // (and overlay token SplitXstate is false?)
  328. // THEN
  329. // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */);
  330. /* high bandwidth, or otherwise part of every microframe */
  331. if ((period = qh->period) == 0)
  332. period = 1;
  333. for (i = qh->start; i < ehci->periodic_size; i += period)
  334. periodic_unlink (ehci, i, qh);
  335. /* update per-qh bandwidth for usbfs */
  336. ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
  337. ? ((qh->usecs + qh->c_usecs) / qh->period)
  338. : (qh->usecs * 8);
  339. dev_dbg (&qh->dev->dev,
  340. "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
  341. qh->period,
  342. le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  343. qh, qh->start, qh->usecs, qh->c_usecs);
  344. /* qh->qh_next still "live" to HC */
  345. qh->qh_state = QH_STATE_UNLINK;
  346. qh->qh_next.ptr = NULL;
  347. qh_put (qh);
  348. /* maybe turn off periodic schedule */
  349. ehci->periodic_sched--;
  350. if (!ehci->periodic_sched)
  351. (void) disable_periodic (ehci);
  352. }
  353. static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
  354. {
  355. unsigned wait;
  356. qh_unlink_periodic (ehci, qh);
  357. /* simple/paranoid: always delay, expecting the HC needs to read
  358. * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
  359. * expect khubd to clean up after any CSPLITs we won't issue.
  360. * active high speed queues may need bigger delays...
  361. */
  362. if (list_empty (&qh->qtd_list)
  363. || (__constant_cpu_to_le32 (QH_CMASK)
  364. & qh->hw_info2) != 0)
  365. wait = 2;
  366. else
  367. wait = 55; /* worst case: 3 * 1024 */
  368. udelay (wait);
  369. qh->qh_state = QH_STATE_IDLE;
  370. qh->hw_next = EHCI_LIST_END;
  371. wmb ();
  372. }
  373. /*-------------------------------------------------------------------------*/
  374. static int check_period (
  375. struct ehci_hcd *ehci,
  376. unsigned frame,
  377. unsigned uframe,
  378. unsigned period,
  379. unsigned usecs
  380. ) {
  381. int claimed;
  382. /* complete split running into next frame?
  383. * given FSTN support, we could sometimes check...
  384. */
  385. if (uframe >= 8)
  386. return 0;
  387. /*
  388. * 80% periodic == 100 usec/uframe available
  389. * convert "usecs we need" to "max already claimed"
  390. */
  391. usecs = 100 - usecs;
  392. /* we "know" 2 and 4 uframe intervals were rejected; so
  393. * for period 0, check _every_ microframe in the schedule.
  394. */
  395. if (unlikely (period == 0)) {
  396. do {
  397. for (uframe = 0; uframe < 7; uframe++) {
  398. claimed = periodic_usecs (ehci, frame, uframe);
  399. if (claimed > usecs)
  400. return 0;
  401. }
  402. } while ((frame += 1) < ehci->periodic_size);
  403. /* just check the specified uframe, at that period */
  404. } else {
  405. do {
  406. claimed = periodic_usecs (ehci, frame, uframe);
  407. if (claimed > usecs)
  408. return 0;
  409. } while ((frame += period) < ehci->periodic_size);
  410. }
  411. // success!
  412. return 1;
  413. }
  414. static int check_intr_schedule (
  415. struct ehci_hcd *ehci,
  416. unsigned frame,
  417. unsigned uframe,
  418. const struct ehci_qh *qh,
  419. __le32 *c_maskp
  420. )
  421. {
  422. int retval = -ENOSPC;
  423. u8 mask;
  424. if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
  425. goto done;
  426. if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
  427. goto done;
  428. if (!qh->c_usecs) {
  429. retval = 0;
  430. *c_maskp = 0;
  431. goto done;
  432. }
  433. /* Make sure this tt's buffer is also available for CSPLITs.
  434. * We pessimize a bit; probably the typical full speed case
  435. * doesn't need the second CSPLIT.
  436. *
  437. * NOTE: both SPLIT and CSPLIT could be checked in just
  438. * one smart pass...
  439. */
  440. mask = 0x03 << (uframe + qh->gap_uf);
  441. *c_maskp = cpu_to_le32 (mask << 8);
  442. mask |= 1 << uframe;
  443. if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
  444. if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
  445. qh->period, qh->c_usecs))
  446. goto done;
  447. if (!check_period (ehci, frame, uframe + qh->gap_uf,
  448. qh->period, qh->c_usecs))
  449. goto done;
  450. retval = 0;
  451. }
  452. done:
  453. return retval;
  454. }
  455. /* "first fit" scheduling policy used the first time through,
  456. * or when the previous schedule slot can't be re-used.
  457. */
  458. static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
  459. {
  460. int status;
  461. unsigned uframe;
  462. __le32 c_mask;
  463. unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
  464. qh_refresh(ehci, qh);
  465. qh->hw_next = EHCI_LIST_END;
  466. frame = qh->start;
  467. /* reuse the previous schedule slots, if we can */
  468. if (frame < qh->period) {
  469. uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK);
  470. status = check_intr_schedule (ehci, frame, --uframe,
  471. qh, &c_mask);
  472. } else {
  473. uframe = 0;
  474. c_mask = 0;
  475. status = -ENOSPC;
  476. }
  477. /* else scan the schedule to find a group of slots such that all
  478. * uframes have enough periodic bandwidth available.
  479. */
  480. if (status) {
  481. /* "normal" case, uframing flexible except with splits */
  482. if (qh->period) {
  483. frame = qh->period - 1;
  484. do {
  485. for (uframe = 0; uframe < 8; uframe++) {
  486. status = check_intr_schedule (ehci,
  487. frame, uframe, qh,
  488. &c_mask);
  489. if (status == 0)
  490. break;
  491. }
  492. } while (status && frame--);
  493. /* qh->period == 0 means every uframe */
  494. } else {
  495. frame = 0;
  496. status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
  497. }
  498. if (status)
  499. goto done;
  500. qh->start = frame;
  501. /* reset S-frame and (maybe) C-frame masks */
  502. qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
  503. qh->hw_info2 |= qh->period
  504. ? cpu_to_le32 (1 << uframe)
  505. : __constant_cpu_to_le32 (QH_SMASK);
  506. qh->hw_info2 |= c_mask;
  507. } else
  508. ehci_dbg (ehci, "reused qh %p schedule\n", qh);
  509. /* stuff into the periodic schedule */
  510. status = qh_link_periodic (ehci, qh);
  511. done:
  512. return status;
  513. }
  514. static int intr_submit (
  515. struct ehci_hcd *ehci,
  516. struct usb_host_endpoint *ep,
  517. struct urb *urb,
  518. struct list_head *qtd_list,
  519. unsigned mem_flags
  520. ) {
  521. unsigned epnum;
  522. unsigned long flags;
  523. struct ehci_qh *qh;
  524. int status = 0;
  525. struct list_head empty;
  526. /* get endpoint and transfer/schedule data */
  527. epnum = ep->desc.bEndpointAddress;
  528. spin_lock_irqsave (&ehci->lock, flags);
  529. /* get qh and force any scheduling errors */
  530. INIT_LIST_HEAD (&empty);
  531. qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv);
  532. if (qh == NULL) {
  533. status = -ENOMEM;
  534. goto done;
  535. }
  536. if (qh->qh_state == QH_STATE_IDLE) {
  537. if ((status = qh_schedule (ehci, qh)) != 0)
  538. goto done;
  539. }
  540. /* then queue the urb's tds to the qh */
  541. qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
  542. BUG_ON (qh == NULL);
  543. /* ... update usbfs periodic stats */
  544. ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
  545. done:
  546. spin_unlock_irqrestore (&ehci->lock, flags);
  547. if (status)
  548. qtd_list_free (ehci, urb, qtd_list);
  549. return status;
  550. }
  551. /*-------------------------------------------------------------------------*/
  552. /* ehci_iso_stream ops work with both ITD and SITD */
  553. static struct ehci_iso_stream *
  554. iso_stream_alloc (unsigned mem_flags)
  555. {
  556. struct ehci_iso_stream *stream;
  557. stream = kcalloc(1, sizeof *stream, mem_flags);
  558. if (likely (stream != NULL)) {
  559. INIT_LIST_HEAD(&stream->td_list);
  560. INIT_LIST_HEAD(&stream->free_list);
  561. stream->next_uframe = -1;
  562. stream->refcount = 1;
  563. }
  564. return stream;
  565. }
  566. static void
  567. iso_stream_init (
  568. struct ehci_hcd *ehci,
  569. struct ehci_iso_stream *stream,
  570. struct usb_device *dev,
  571. int pipe,
  572. unsigned interval
  573. )
  574. {
  575. static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
  576. u32 buf1;
  577. unsigned epnum, maxp;
  578. int is_input;
  579. long bandwidth;
  580. /*
  581. * this might be a "high bandwidth" highspeed endpoint,
  582. * as encoded in the ep descriptor's wMaxPacket field
  583. */
  584. epnum = usb_pipeendpoint (pipe);
  585. is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
  586. maxp = usb_maxpacket(dev, pipe, !is_input);
  587. if (is_input) {
  588. buf1 = (1 << 11);
  589. } else {
  590. buf1 = 0;
  591. }
  592. /* knows about ITD vs SITD */
  593. if (dev->speed == USB_SPEED_HIGH) {
  594. unsigned multi = hb_mult(maxp);
  595. stream->highspeed = 1;
  596. maxp = max_packet(maxp);
  597. buf1 |= maxp;
  598. maxp *= multi;
  599. stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);
  600. stream->buf1 = cpu_to_le32 (buf1);
  601. stream->buf2 = cpu_to_le32 (multi);
  602. /* usbfs wants to report the average usecs per frame tied up
  603. * when transfers on this endpoint are scheduled ...
  604. */
  605. stream->usecs = HS_USECS_ISO (maxp);
  606. bandwidth = stream->usecs * 8;
  607. bandwidth /= 1 << (interval - 1);
  608. } else {
  609. u32 addr;
  610. addr = dev->ttport << 24;
  611. if (!ehci_is_TDI(ehci)
  612. || (dev->tt->hub !=
  613. ehci_to_hcd(ehci)->self.root_hub))
  614. addr |= dev->tt->hub->devnum << 16;
  615. addr |= epnum << 8;
  616. addr |= dev->devnum;
  617. stream->usecs = HS_USECS_ISO (maxp);
  618. if (is_input) {
  619. u32 tmp;
  620. addr |= 1 << 31;
  621. stream->c_usecs = stream->usecs;
  622. stream->usecs = HS_USECS_ISO (1);
  623. stream->raw_mask = 1;
  624. /* pessimistic c-mask */
  625. tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp)
  626. / (125 * 1000);
  627. stream->raw_mask |= 3 << (tmp + 9);
  628. } else
  629. stream->raw_mask = smask_out [maxp / 188];
  630. bandwidth = stream->usecs + stream->c_usecs;
  631. bandwidth /= 1 << (interval + 2);
  632. /* stream->splits gets created from raw_mask later */
  633. stream->address = cpu_to_le32 (addr);
  634. }
  635. stream->bandwidth = bandwidth;
  636. stream->udev = dev;
  637. stream->bEndpointAddress = is_input | epnum;
  638. stream->interval = interval;
  639. stream->maxp = maxp;
  640. }
  641. static void
  642. iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
  643. {
  644. stream->refcount--;
  645. /* free whenever just a dev->ep reference remains.
  646. * not like a QH -- no persistent state (toggle, halt)
  647. */
  648. if (stream->refcount == 1) {
  649. int is_in;
  650. // BUG_ON (!list_empty(&stream->td_list));
  651. while (!list_empty (&stream->free_list)) {
  652. struct list_head *entry;
  653. entry = stream->free_list.next;
  654. list_del (entry);
  655. /* knows about ITD vs SITD */
  656. if (stream->highspeed) {
  657. struct ehci_itd *itd;
  658. itd = list_entry (entry, struct ehci_itd,
  659. itd_list);
  660. dma_pool_free (ehci->itd_pool, itd,
  661. itd->itd_dma);
  662. } else {
  663. struct ehci_sitd *sitd;
  664. sitd = list_entry (entry, struct ehci_sitd,
  665. sitd_list);
  666. dma_pool_free (ehci->sitd_pool, sitd,
  667. sitd->sitd_dma);
  668. }
  669. }
  670. is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
  671. stream->bEndpointAddress &= 0x0f;
  672. stream->ep->hcpriv = NULL;
  673. if (stream->rescheduled) {
  674. ehci_info (ehci, "ep%d%s-iso rescheduled "
  675. "%lu times in %lu seconds\n",
  676. stream->bEndpointAddress, is_in ? "in" : "out",
  677. stream->rescheduled,
  678. ((jiffies - stream->start)/HZ)
  679. );
  680. }
  681. kfree(stream);
  682. }
  683. }
  684. static inline struct ehci_iso_stream *
  685. iso_stream_get (struct ehci_iso_stream *stream)
  686. {
  687. if (likely (stream != NULL))
  688. stream->refcount++;
  689. return stream;
  690. }
  691. static struct ehci_iso_stream *
  692. iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
  693. {
  694. unsigned epnum;
  695. struct ehci_iso_stream *stream;
  696. struct usb_host_endpoint *ep;
  697. unsigned long flags;
  698. epnum = usb_pipeendpoint (urb->pipe);
  699. if (usb_pipein(urb->pipe))
  700. ep = urb->dev->ep_in[epnum];
  701. else
  702. ep = urb->dev->ep_out[epnum];
  703. spin_lock_irqsave (&ehci->lock, flags);
  704. stream = ep->hcpriv;
  705. if (unlikely (stream == NULL)) {
  706. stream = iso_stream_alloc(GFP_ATOMIC);
  707. if (likely (stream != NULL)) {
  708. /* dev->ep owns the initial refcount */
  709. ep->hcpriv = stream;
  710. stream->ep = ep;
  711. iso_stream_init(ehci, stream, urb->dev, urb->pipe,
  712. urb->interval);
  713. }
  714. /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
  715. } else if (unlikely (stream->hw_info1 != 0)) {
  716. ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
  717. urb->dev->devpath, epnum,
  718. usb_pipein(urb->pipe) ? "in" : "out");
  719. stream = NULL;
  720. }
  721. /* caller guarantees an eventual matching iso_stream_put */
  722. stream = iso_stream_get (stream);
  723. spin_unlock_irqrestore (&ehci->lock, flags);
  724. return stream;
  725. }
  726. /*-------------------------------------------------------------------------*/
  727. /* ehci_iso_sched ops can be ITD-only or SITD-only */
  728. static struct ehci_iso_sched *
  729. iso_sched_alloc (unsigned packets, unsigned mem_flags)
  730. {
  731. struct ehci_iso_sched *iso_sched;
  732. int size = sizeof *iso_sched;
  733. size += packets * sizeof (struct ehci_iso_packet);
  734. iso_sched = kmalloc (size, mem_flags);
  735. if (likely (iso_sched != NULL)) {
  736. memset(iso_sched, 0, size);
  737. INIT_LIST_HEAD (&iso_sched->td_list);
  738. }
  739. return iso_sched;
  740. }
  741. static inline void
  742. itd_sched_init (
  743. struct ehci_iso_sched *iso_sched,
  744. struct ehci_iso_stream *stream,
  745. struct urb *urb
  746. )
  747. {
  748. unsigned i;
  749. dma_addr_t dma = urb->transfer_dma;
  750. /* how many uframes are needed for these transfers */
  751. iso_sched->span = urb->number_of_packets * stream->interval;
  752. /* figure out per-uframe itd fields that we'll need later
  753. * when we fit new itds into the schedule.
  754. */
  755. for (i = 0; i < urb->number_of_packets; i++) {
  756. struct ehci_iso_packet *uframe = &iso_sched->packet [i];
  757. unsigned length;
  758. dma_addr_t buf;
  759. u32 trans;
  760. length = urb->iso_frame_desc [i].length;
  761. buf = dma + urb->iso_frame_desc [i].offset;
  762. trans = EHCI_ISOC_ACTIVE;
  763. trans |= buf & 0x0fff;
  764. if (unlikely (((i + 1) == urb->number_of_packets))
  765. && !(urb->transfer_flags & URB_NO_INTERRUPT))
  766. trans |= EHCI_ITD_IOC;
  767. trans |= length << 16;
  768. uframe->transaction = cpu_to_le32 (trans);
  769. /* might need to cross a buffer page within a uframe */
  770. uframe->bufp = (buf & ~(u64)0x0fff);
  771. buf += length;
  772. if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
  773. uframe->cross = 1;
  774. }
  775. }
  776. static void
  777. iso_sched_free (
  778. struct ehci_iso_stream *stream,
  779. struct ehci_iso_sched *iso_sched
  780. )
  781. {
  782. if (!iso_sched)
  783. return;
  784. // caller must hold ehci->lock!
  785. list_splice (&iso_sched->td_list, &stream->free_list);
  786. kfree (iso_sched);
  787. }
  788. static int
  789. itd_urb_transaction (
  790. struct ehci_iso_stream *stream,
  791. struct ehci_hcd *ehci,
  792. struct urb *urb,
  793. unsigned mem_flags
  794. )
  795. {
  796. struct ehci_itd *itd;
  797. dma_addr_t itd_dma;
  798. int i;
  799. unsigned num_itds;
  800. struct ehci_iso_sched *sched;
  801. unsigned long flags;
  802. sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
  803. if (unlikely (sched == NULL))
  804. return -ENOMEM;
  805. itd_sched_init (sched, stream, urb);
  806. if (urb->interval < 8)
  807. num_itds = 1 + (sched->span + 7) / 8;
  808. else
  809. num_itds = urb->number_of_packets;
  810. /* allocate/init ITDs */
  811. spin_lock_irqsave (&ehci->lock, flags);
  812. for (i = 0; i < num_itds; i++) {
  813. /* free_list.next might be cache-hot ... but maybe
  814. * the HC caches it too. avoid that issue for now.
  815. */
  816. /* prefer previously-allocated itds */
  817. if (likely (!list_empty(&stream->free_list))) {
  818. itd = list_entry (stream->free_list.prev,
  819. struct ehci_itd, itd_list);
  820. list_del (&itd->itd_list);
  821. itd_dma = itd->itd_dma;
  822. } else
  823. itd = NULL;
  824. if (!itd) {
  825. spin_unlock_irqrestore (&ehci->lock, flags);
  826. itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
  827. &itd_dma);
  828. spin_lock_irqsave (&ehci->lock, flags);
  829. }
  830. if (unlikely (NULL == itd)) {
  831. iso_sched_free (stream, sched);
  832. spin_unlock_irqrestore (&ehci->lock, flags);
  833. return -ENOMEM;
  834. }
  835. memset (itd, 0, sizeof *itd);
  836. itd->itd_dma = itd_dma;
  837. list_add (&itd->itd_list, &sched->td_list);
  838. }
  839. spin_unlock_irqrestore (&ehci->lock, flags);
  840. /* temporarily store schedule info in hcpriv */
  841. urb->hcpriv = sched;
  842. urb->error_count = 0;
  843. return 0;
  844. }
  845. /*-------------------------------------------------------------------------*/
  846. static inline int
  847. itd_slot_ok (
  848. struct ehci_hcd *ehci,
  849. u32 mod,
  850. u32 uframe,
  851. u8 usecs,
  852. u32 period
  853. )
  854. {
  855. uframe %= period;
  856. do {
  857. /* can't commit more than 80% periodic == 100 usec */
  858. if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
  859. > (100 - usecs))
  860. return 0;
  861. /* we know urb->interval is 2^N uframes */
  862. uframe += period;
  863. } while (uframe < mod);
  864. return 1;
  865. }
  866. static inline int
  867. sitd_slot_ok (
  868. struct ehci_hcd *ehci,
  869. u32 mod,
  870. struct ehci_iso_stream *stream,
  871. u32 uframe,
  872. struct ehci_iso_sched *sched,
  873. u32 period_uframes
  874. )
  875. {
  876. u32 mask, tmp;
  877. u32 frame, uf;
  878. mask = stream->raw_mask << (uframe & 7);
  879. /* for IN, don't wrap CSPLIT into the next frame */
  880. if (mask & ~0xffff)
  881. return 0;
  882. /* this multi-pass logic is simple, but performance may
  883. * suffer when the schedule data isn't cached.
  884. */
  885. /* check bandwidth */
  886. uframe %= period_uframes;
  887. do {
  888. u32 max_used;
  889. frame = uframe >> 3;
  890. uf = uframe & 7;
  891. /* tt must be idle for start(s), any gap, and csplit.
  892. * assume scheduling slop leaves 10+% for control/bulk.
  893. */
  894. if (!tt_no_collision (ehci, period_uframes << 3,
  895. stream->udev, frame, mask))
  896. return 0;
  897. /* check starts (OUT uses more than one) */
  898. max_used = 100 - stream->usecs;
  899. for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
  900. if (periodic_usecs (ehci, frame, uf) > max_used)
  901. return 0;
  902. }
  903. /* for IN, check CSPLIT */
  904. if (stream->c_usecs) {
  905. max_used = 100 - stream->c_usecs;
  906. do {
  907. tmp = 1 << uf;
  908. tmp <<= 8;
  909. if ((stream->raw_mask & tmp) == 0)
  910. continue;
  911. if (periodic_usecs (ehci, frame, uf)
  912. > max_used)
  913. return 0;
  914. } while (++uf < 8);
  915. }
  916. /* we know urb->interval is 2^N uframes */
  917. uframe += period_uframes;
  918. } while (uframe < mod);
  919. stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7));
  920. return 1;
  921. }
  922. /*
  923. * This scheduler plans almost as far into the future as it has actual
  924. * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
  925. * "as small as possible" to be cache-friendlier.) That limits the size
  926. * transfers you can stream reliably; avoid more than 64 msec per urb.
  927. * Also avoid queue depths of less than ehci's worst irq latency (affected
  928. * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
  929. * and other factors); or more than about 230 msec total (for portability,
  930. * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
  931. */
  932. #define SCHEDULE_SLOP 10 /* frames */
  933. static int
  934. iso_stream_schedule (
  935. struct ehci_hcd *ehci,
  936. struct urb *urb,
  937. struct ehci_iso_stream *stream
  938. )
  939. {
  940. u32 now, start, max, period;
  941. int status;
  942. unsigned mod = ehci->periodic_size << 3;
  943. struct ehci_iso_sched *sched = urb->hcpriv;
  944. if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
  945. ehci_dbg (ehci, "iso request %p too long\n", urb);
  946. status = -EFBIG;
  947. goto fail;
  948. }
  949. if ((stream->depth + sched->span) > mod) {
  950. ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
  951. urb, stream->depth, sched->span, mod);
  952. status = -EFBIG;
  953. goto fail;
  954. }
  955. now = readl (&ehci->regs->frame_index) % mod;
  956. /* when's the last uframe this urb could start? */
  957. max = now + mod;
  958. /* typical case: reuse current schedule. stream is still active,
  959. * and no gaps from host falling behind (irq delays etc)
  960. */
  961. if (likely (!list_empty (&stream->td_list))) {
  962. start = stream->next_uframe;
  963. if (start < now)
  964. start += mod;
  965. if (likely ((start + sched->span) < max))
  966. goto ready;
  967. /* else fell behind; someday, try to reschedule */
  968. status = -EL2NSYNC;
  969. goto fail;
  970. }
  971. /* need to schedule; when's the next (u)frame we could start?
  972. * this is bigger than ehci->i_thresh allows; scheduling itself
  973. * isn't free, the slop should handle reasonably slow cpus. it
  974. * can also help high bandwidth if the dma and irq loads don't
  975. * jump until after the queue is primed.
  976. */
  977. start = SCHEDULE_SLOP * 8 + (now & ~0x07);
  978. start %= mod;
  979. stream->next_uframe = start;
  980. /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
  981. period = urb->interval;
  982. if (!stream->highspeed)
  983. period <<= 3;
  984. /* find a uframe slot with enough bandwidth */
  985. for (; start < (stream->next_uframe + period); start++) {
  986. int enough_space;
  987. /* check schedule: enough space? */
  988. if (stream->highspeed)
  989. enough_space = itd_slot_ok (ehci, mod, start,
  990. stream->usecs, period);
  991. else {
  992. if ((start % 8) >= 6)
  993. continue;
  994. enough_space = sitd_slot_ok (ehci, mod, stream,
  995. start, sched, period);
  996. }
  997. /* schedule it here if there's enough bandwidth */
  998. if (enough_space) {
  999. stream->next_uframe = start % mod;
  1000. goto ready;
  1001. }
  1002. }
  1003. /* no room in the schedule */
  1004. ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
  1005. list_empty (&stream->td_list) ? "" : "re",
  1006. urb, now, max);
  1007. status = -ENOSPC;
  1008. fail:
  1009. iso_sched_free (stream, sched);
  1010. urb->hcpriv = NULL;
  1011. return status;
  1012. ready:
  1013. /* report high speed start in uframes; full speed, in frames */
  1014. urb->start_frame = stream->next_uframe;
  1015. if (!stream->highspeed)
  1016. urb->start_frame >>= 3;
  1017. return 0;
  1018. }
  1019. /*-------------------------------------------------------------------------*/
  1020. static inline void
  1021. itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
  1022. {
  1023. int i;
  1024. /* it's been recently zeroed */
  1025. itd->hw_next = EHCI_LIST_END;
  1026. itd->hw_bufp [0] = stream->buf0;
  1027. itd->hw_bufp [1] = stream->buf1;
  1028. itd->hw_bufp [2] = stream->buf2;
  1029. for (i = 0; i < 8; i++)
  1030. itd->index[i] = -1;
  1031. /* All other fields are filled when scheduling */
  1032. }
  1033. static inline void
  1034. itd_patch (
  1035. struct ehci_itd *itd,
  1036. struct ehci_iso_sched *iso_sched,
  1037. unsigned index,
  1038. u16 uframe
  1039. )
  1040. {
  1041. struct ehci_iso_packet *uf = &iso_sched->packet [index];
  1042. unsigned pg = itd->pg;
  1043. // BUG_ON (pg == 6 && uf->cross);
  1044. uframe &= 0x07;
  1045. itd->index [uframe] = index;
  1046. itd->hw_transaction [uframe] = uf->transaction;
  1047. itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);
  1048. itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);
  1049. itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
  1050. /* iso_frame_desc[].offset must be strictly increasing */
  1051. if (unlikely (uf->cross)) {
  1052. u64 bufp = uf->bufp + 4096;
  1053. itd->pg = ++pg;
  1054. itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
  1055. itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));
  1056. }
  1057. }
  1058. static inline void
  1059. itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
  1060. {
  1061. /* always prepend ITD/SITD ... only QH tree is order-sensitive */
  1062. itd->itd_next = ehci->pshadow [frame];
  1063. itd->hw_next = ehci->periodic [frame];
  1064. ehci->pshadow [frame].itd = itd;
  1065. itd->frame = frame;
  1066. wmb ();
  1067. ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
  1068. }
  1069. /* fit urb's itds into the selected schedule slot; activate as needed */
  1070. static int
  1071. itd_link_urb (
  1072. struct ehci_hcd *ehci,
  1073. struct urb *urb,
  1074. unsigned mod,
  1075. struct ehci_iso_stream *stream
  1076. )
  1077. {
  1078. int packet;
  1079. unsigned next_uframe, uframe, frame;
  1080. struct ehci_iso_sched *iso_sched = urb->hcpriv;
  1081. struct ehci_itd *itd;
  1082. next_uframe = stream->next_uframe % mod;
  1083. if (unlikely (list_empty(&stream->td_list))) {
  1084. ehci_to_hcd(ehci)->self.bandwidth_allocated
  1085. += stream->bandwidth;
  1086. ehci_vdbg (ehci,
  1087. "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
  1088. urb->dev->devpath, stream->bEndpointAddress & 0x0f,
  1089. (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
  1090. urb->interval,
  1091. next_uframe >> 3, next_uframe & 0x7);
  1092. stream->start = jiffies;
  1093. }
  1094. ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
  1095. /* fill iTDs uframe by uframe */
  1096. for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
  1097. if (itd == NULL) {
  1098. /* ASSERT: we have all necessary itds */
  1099. // BUG_ON (list_empty (&iso_sched->td_list));
  1100. /* ASSERT: no itds for this endpoint in this uframe */
  1101. itd = list_entry (iso_sched->td_list.next,
  1102. struct ehci_itd, itd_list);
  1103. list_move_tail (&itd->itd_list, &stream->td_list);
  1104. itd->stream = iso_stream_get (stream);
  1105. itd->urb = usb_get_urb (urb);
  1106. itd_init (stream, itd);
  1107. }
  1108. uframe = next_uframe & 0x07;
  1109. frame = next_uframe >> 3;
  1110. itd->usecs [uframe] = stream->usecs;
  1111. itd_patch (itd, iso_sched, packet, uframe);
  1112. next_uframe += stream->interval;
  1113. stream->depth += stream->interval;
  1114. next_uframe %= mod;
  1115. packet++;
  1116. /* link completed itds into the schedule */
  1117. if (((next_uframe >> 3) != frame)
  1118. || packet == urb->number_of_packets) {
  1119. itd_link (ehci, frame % ehci->periodic_size, itd);
  1120. itd = NULL;
  1121. }
  1122. }
  1123. stream->next_uframe = next_uframe;
  1124. /* don't need that schedule data any more */
  1125. iso_sched_free (stream, iso_sched);
  1126. urb->hcpriv = NULL;
  1127. timer_action (ehci, TIMER_IO_WATCHDOG);
  1128. if (unlikely (!ehci->periodic_sched++))
  1129. return enable_periodic (ehci);
  1130. return 0;
  1131. }
  1132. #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
  1133. static unsigned
  1134. itd_complete (
  1135. struct ehci_hcd *ehci,
  1136. struct ehci_itd *itd,
  1137. struct pt_regs *regs
  1138. ) {
  1139. struct urb *urb = itd->urb;
  1140. struct usb_iso_packet_descriptor *desc;
  1141. u32 t;
  1142. unsigned uframe;
  1143. int urb_index = -1;
  1144. struct ehci_iso_stream *stream = itd->stream;
  1145. struct usb_device *dev;
  1146. /* for each uframe with a packet */
  1147. for (uframe = 0; uframe < 8; uframe++) {
  1148. if (likely (itd->index[uframe] == -1))
  1149. continue;
  1150. urb_index = itd->index[uframe];
  1151. desc = &urb->iso_frame_desc [urb_index];
  1152. t = le32_to_cpup (&itd->hw_transaction [uframe]);
  1153. itd->hw_transaction [uframe] = 0;
  1154. stream->depth -= stream->interval;
  1155. /* report transfer status */
  1156. if (unlikely (t & ISO_ERRS)) {
  1157. urb->error_count++;
  1158. if (t & EHCI_ISOC_BUF_ERR)
  1159. desc->status = usb_pipein (urb->pipe)
  1160. ? -ENOSR /* hc couldn't read */
  1161. : -ECOMM; /* hc couldn't write */
  1162. else if (t & EHCI_ISOC_BABBLE)
  1163. desc->status = -EOVERFLOW;
  1164. else /* (t & EHCI_ISOC_XACTERR) */
  1165. desc->status = -EPROTO;
  1166. /* HC need not update length with this error */
  1167. if (!(t & EHCI_ISOC_BABBLE))
  1168. desc->actual_length = EHCI_ITD_LENGTH (t);
  1169. } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
  1170. desc->status = 0;
  1171. desc->actual_length = EHCI_ITD_LENGTH (t);
  1172. }
  1173. }
  1174. usb_put_urb (urb);
  1175. itd->urb = NULL;
  1176. itd->stream = NULL;
  1177. list_move (&itd->itd_list, &stream->free_list);
  1178. iso_stream_put (ehci, stream);
  1179. /* handle completion now? */
  1180. if (likely ((urb_index + 1) != urb->number_of_packets))
  1181. return 0;
  1182. /* ASSERT: it's really the last itd for this urb
  1183. list_for_each_entry (itd, &stream->td_list, itd_list)
  1184. BUG_ON (itd->urb == urb);
  1185. */
  1186. /* give urb back to the driver ... can be out-of-order */
  1187. dev = usb_get_dev (urb->dev);
  1188. ehci_urb_done (ehci, urb, regs);
  1189. urb = NULL;
  1190. /* defer stopping schedule; completion can submit */
  1191. ehci->periodic_sched--;
  1192. if (unlikely (!ehci->periodic_sched))
  1193. (void) disable_periodic (ehci);
  1194. ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
  1195. if (unlikely (list_empty (&stream->td_list))) {
  1196. ehci_to_hcd(ehci)->self.bandwidth_allocated
  1197. -= stream->bandwidth;
  1198. ehci_vdbg (ehci,
  1199. "deschedule devp %s ep%d%s-iso\n",
  1200. dev->devpath, stream->bEndpointAddress & 0x0f,
  1201. (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
  1202. }
  1203. iso_stream_put (ehci, stream);
  1204. usb_put_dev (dev);
  1205. return 1;
  1206. }
  1207. /*-------------------------------------------------------------------------*/
  1208. static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
  1209. unsigned mem_flags)
  1210. {
  1211. int status = -EINVAL;
  1212. unsigned long flags;
  1213. struct ehci_iso_stream *stream;
  1214. /* Get iso_stream head */
  1215. stream = iso_stream_find (ehci, urb);
  1216. if (unlikely (stream == NULL)) {
  1217. ehci_dbg (ehci, "can't get iso stream\n");
  1218. return -ENOMEM;
  1219. }
  1220. if (unlikely (urb->interval != stream->interval)) {
  1221. ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
  1222. stream->interval, urb->interval);
  1223. goto done;
  1224. }
  1225. #ifdef EHCI_URB_TRACE
  1226. ehci_dbg (ehci,
  1227. "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
  1228. __FUNCTION__, urb->dev->devpath, urb,
  1229. usb_pipeendpoint (urb->pipe),
  1230. usb_pipein (urb->pipe) ? "in" : "out",
  1231. urb->transfer_buffer_length,
  1232. urb->number_of_packets, urb->interval,
  1233. stream);
  1234. #endif
  1235. /* allocate ITDs w/o locking anything */
  1236. status = itd_urb_transaction (stream, ehci, urb, mem_flags);
  1237. if (unlikely (status < 0)) {
  1238. ehci_dbg (ehci, "can't init itds\n");
  1239. goto done;
  1240. }
  1241. /* schedule ... need to lock */
  1242. spin_lock_irqsave (&ehci->lock, flags);
  1243. status = iso_stream_schedule (ehci, urb, stream);
  1244. if (likely (status == 0))
  1245. itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
  1246. spin_unlock_irqrestore (&ehci->lock, flags);
  1247. done:
  1248. if (unlikely (status < 0))
  1249. iso_stream_put (ehci, stream);
  1250. return status;
  1251. }
  1252. #ifdef CONFIG_USB_EHCI_SPLIT_ISO
  1253. /*-------------------------------------------------------------------------*/
  1254. /*
  1255. * "Split ISO TDs" ... used for USB 1.1 devices going through the
  1256. * TTs in USB 2.0 hubs. These need microframe scheduling.
  1257. */
  1258. static inline void
  1259. sitd_sched_init (
  1260. struct ehci_iso_sched *iso_sched,
  1261. struct ehci_iso_stream *stream,
  1262. struct urb *urb
  1263. )
  1264. {
  1265. unsigned i;
  1266. dma_addr_t dma = urb->transfer_dma;
  1267. /* how many frames are needed for these transfers */
  1268. iso_sched->span = urb->number_of_packets * stream->interval;
  1269. /* figure out per-frame sitd fields that we'll need later
  1270. * when we fit new sitds into the schedule.
  1271. */
  1272. for (i = 0; i < urb->number_of_packets; i++) {
  1273. struct ehci_iso_packet *packet = &iso_sched->packet [i];
  1274. unsigned length;
  1275. dma_addr_t buf;
  1276. u32 trans;
  1277. length = urb->iso_frame_desc [i].length & 0x03ff;
  1278. buf = dma + urb->iso_frame_desc [i].offset;
  1279. trans = SITD_STS_ACTIVE;
  1280. if (((i + 1) == urb->number_of_packets)
  1281. && !(urb->transfer_flags & URB_NO_INTERRUPT))
  1282. trans |= SITD_IOC;
  1283. trans |= length << 16;
  1284. packet->transaction = cpu_to_le32 (trans);
  1285. /* might need to cross a buffer page within a td */
  1286. packet->bufp = buf;
  1287. packet->buf1 = (buf + length) & ~0x0fff;
  1288. if (packet->buf1 != (buf & ~(u64)0x0fff))
  1289. packet->cross = 1;
  1290. /* OUT uses multiple start-splits */
  1291. if (stream->bEndpointAddress & USB_DIR_IN)
  1292. continue;
  1293. length = (length + 187) / 188;
  1294. if (length > 1) /* BEGIN vs ALL */
  1295. length |= 1 << 3;
  1296. packet->buf1 |= length;
  1297. }
  1298. }
  1299. static int
  1300. sitd_urb_transaction (
  1301. struct ehci_iso_stream *stream,
  1302. struct ehci_hcd *ehci,
  1303. struct urb *urb,
  1304. unsigned mem_flags
  1305. )
  1306. {
  1307. struct ehci_sitd *sitd;
  1308. dma_addr_t sitd_dma;
  1309. int i;
  1310. struct ehci_iso_sched *iso_sched;
  1311. unsigned long flags;
  1312. iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
  1313. if (iso_sched == NULL)
  1314. return -ENOMEM;
  1315. sitd_sched_init (iso_sched, stream, urb);
  1316. /* allocate/init sITDs */
  1317. spin_lock_irqsave (&ehci->lock, flags);
  1318. for (i = 0; i < urb->number_of_packets; i++) {
  1319. /* NOTE: for now, we don't try to handle wraparound cases
  1320. * for IN (using sitd->hw_backpointer, like a FSTN), which
  1321. * means we never need two sitds for full speed packets.
  1322. */
  1323. /* free_list.next might be cache-hot ... but maybe
  1324. * the HC caches it too. avoid that issue for now.
  1325. */
  1326. /* prefer previously-allocated sitds */
  1327. if (!list_empty(&stream->free_list)) {
  1328. sitd = list_entry (stream->free_list.prev,
  1329. struct ehci_sitd, sitd_list);
  1330. list_del (&sitd->sitd_list);
  1331. sitd_dma = sitd->sitd_dma;
  1332. } else
  1333. sitd = NULL;
  1334. if (!sitd) {
  1335. spin_unlock_irqrestore (&ehci->lock, flags);
  1336. sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
  1337. &sitd_dma);
  1338. spin_lock_irqsave (&ehci->lock, flags);
  1339. }
  1340. if (!sitd) {
  1341. iso_sched_free (stream, iso_sched);
  1342. spin_unlock_irqrestore (&ehci->lock, flags);
  1343. return -ENOMEM;
  1344. }
  1345. memset (sitd, 0, sizeof *sitd);
  1346. sitd->sitd_dma = sitd_dma;
  1347. list_add (&sitd->sitd_list, &iso_sched->td_list);
  1348. }
  1349. /* temporarily store schedule info in hcpriv */
  1350. urb->hcpriv = iso_sched;
  1351. urb->error_count = 0;
  1352. spin_unlock_irqrestore (&ehci->lock, flags);
  1353. return 0;
  1354. }
  1355. /*-------------------------------------------------------------------------*/
  1356. static inline void
  1357. sitd_patch (
  1358. struct ehci_iso_stream *stream,
  1359. struct ehci_sitd *sitd,
  1360. struct ehci_iso_sched *iso_sched,
  1361. unsigned index
  1362. )
  1363. {
  1364. struct ehci_iso_packet *uf = &iso_sched->packet [index];
  1365. u64 bufp = uf->bufp;
  1366. sitd->hw_next = EHCI_LIST_END;
  1367. sitd->hw_fullspeed_ep = stream->address;
  1368. sitd->hw_uframe = stream->splits;
  1369. sitd->hw_results = uf->transaction;
  1370. sitd->hw_backpointer = EHCI_LIST_END;
  1371. bufp = uf->bufp;
  1372. sitd->hw_buf [0] = cpu_to_le32 (bufp);
  1373. sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32);
  1374. sitd->hw_buf [1] = cpu_to_le32 (uf->buf1);
  1375. if (uf->cross)
  1376. bufp += 4096;
  1377. sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32);
  1378. sitd->index = index;
  1379. }
  1380. static inline void
  1381. sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
  1382. {
  1383. /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
  1384. sitd->sitd_next = ehci->pshadow [frame];
  1385. sitd->hw_next = ehci->periodic [frame];
  1386. ehci->pshadow [frame].sitd = sitd;
  1387. sitd->frame = frame;
  1388. wmb ();
  1389. ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD;
  1390. }
  1391. /* fit urb's sitds into the selected schedule slot; activate as needed */
  1392. static int
  1393. sitd_link_urb (
  1394. struct ehci_hcd *ehci,
  1395. struct urb *urb,
  1396. unsigned mod,
  1397. struct ehci_iso_stream *stream
  1398. )
  1399. {
  1400. int packet;
  1401. unsigned next_uframe;
  1402. struct ehci_iso_sched *sched = urb->hcpriv;
  1403. struct ehci_sitd *sitd;
  1404. next_uframe = stream->next_uframe;
  1405. if (list_empty(&stream->td_list)) {
  1406. /* usbfs ignores TT bandwidth */
  1407. ehci_to_hcd(ehci)->self.bandwidth_allocated
  1408. += stream->bandwidth;
  1409. ehci_vdbg (ehci,
  1410. "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
  1411. urb->dev->devpath, stream->bEndpointAddress & 0x0f,
  1412. (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
  1413. (next_uframe >> 3) % ehci->periodic_size,
  1414. stream->interval, le32_to_cpu (stream->splits));
  1415. stream->start = jiffies;
  1416. }
  1417. ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
  1418. /* fill sITDs frame by frame */
  1419. for (packet = 0, sitd = NULL;
  1420. packet < urb->number_of_packets;
  1421. packet++) {
  1422. /* ASSERT: we have all necessary sitds */
  1423. BUG_ON (list_empty (&sched->td_list));
  1424. /* ASSERT: no itds for this endpoint in this frame */
  1425. sitd = list_entry (sched->td_list.next,
  1426. struct ehci_sitd, sitd_list);
  1427. list_move_tail (&sitd->sitd_list, &stream->td_list);
  1428. sitd->stream = iso_stream_get (stream);
  1429. sitd->urb = usb_get_urb (urb);
  1430. sitd_patch (stream, sitd, sched, packet);
  1431. sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
  1432. sitd);
  1433. next_uframe += stream->interval << 3;
  1434. stream->depth += stream->interval << 3;
  1435. }
  1436. stream->next_uframe = next_uframe % mod;
  1437. /* don't need that schedule data any more */
  1438. iso_sched_free (stream, sched);
  1439. urb->hcpriv = NULL;
  1440. timer_action (ehci, TIMER_IO_WATCHDOG);
  1441. if (!ehci->periodic_sched++)
  1442. return enable_periodic (ehci);
  1443. return 0;
  1444. }
  1445. /*-------------------------------------------------------------------------*/
  1446. #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
  1447. | SITD_STS_XACT | SITD_STS_MMF)
  1448. static unsigned
  1449. sitd_complete (
  1450. struct ehci_hcd *ehci,
  1451. struct ehci_sitd *sitd,
  1452. struct pt_regs *regs
  1453. ) {
  1454. struct urb *urb = sitd->urb;
  1455. struct usb_iso_packet_descriptor *desc;
  1456. u32 t;
  1457. int urb_index = -1;
  1458. struct ehci_iso_stream *stream = sitd->stream;
  1459. struct usb_device *dev;
  1460. urb_index = sitd->index;
  1461. desc = &urb->iso_frame_desc [urb_index];
  1462. t = le32_to_cpup (&sitd->hw_results);
  1463. /* report transfer status */
  1464. if (t & SITD_ERRS) {
  1465. urb->error_count++;
  1466. if (t & SITD_STS_DBE)
  1467. desc->status = usb_pipein (urb->pipe)
  1468. ? -ENOSR /* hc couldn't read */
  1469. : -ECOMM; /* hc couldn't write */
  1470. else if (t & SITD_STS_BABBLE)
  1471. desc->status = -EOVERFLOW;
  1472. else /* XACT, MMF, etc */
  1473. desc->status = -EPROTO;
  1474. } else {
  1475. desc->status = 0;
  1476. desc->actual_length = desc->length - SITD_LENGTH (t);
  1477. }
  1478. usb_put_urb (urb);
  1479. sitd->urb = NULL;
  1480. sitd->stream = NULL;
  1481. list_move (&sitd->sitd_list, &stream->free_list);
  1482. stream->depth -= stream->interval << 3;
  1483. iso_stream_put (ehci, stream);
  1484. /* handle completion now? */
  1485. if ((urb_index + 1) != urb->number_of_packets)
  1486. return 0;
  1487. /* ASSERT: it's really the last sitd for this urb
  1488. list_for_each_entry (sitd, &stream->td_list, sitd_list)
  1489. BUG_ON (sitd->urb == urb);
  1490. */
  1491. /* give urb back to the driver */
  1492. dev = usb_get_dev (urb->dev);
  1493. ehci_urb_done (ehci, urb, regs);
  1494. urb = NULL;
  1495. /* defer stopping schedule; completion can submit */
  1496. ehci->periodic_sched--;
  1497. if (!ehci->periodic_sched)
  1498. (void) disable_periodic (ehci);
  1499. ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
  1500. if (list_empty (&stream->td_list)) {
  1501. ehci_to_hcd(ehci)->self.bandwidth_allocated
  1502. -= stream->bandwidth;
  1503. ehci_vdbg (ehci,
  1504. "deschedule devp %s ep%d%s-iso\n",
  1505. dev->devpath, stream->bEndpointAddress & 0x0f,
  1506. (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
  1507. }
  1508. iso_stream_put (ehci, stream);
  1509. usb_put_dev (dev);
  1510. return 1;
  1511. }
  1512. static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
  1513. unsigned mem_flags)
  1514. {
  1515. int status = -EINVAL;
  1516. unsigned long flags;
  1517. struct ehci_iso_stream *stream;
  1518. /* Get iso_stream head */
  1519. stream = iso_stream_find (ehci, urb);
  1520. if (stream == NULL) {
  1521. ehci_dbg (ehci, "can't get iso stream\n");
  1522. return -ENOMEM;
  1523. }
  1524. if (urb->interval != stream->interval) {
  1525. ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
  1526. stream->interval, urb->interval);
  1527. goto done;
  1528. }
  1529. #ifdef EHCI_URB_TRACE
  1530. ehci_dbg (ehci,
  1531. "submit %p dev%s ep%d%s-iso len %d\n",
  1532. urb, urb->dev->devpath,
  1533. usb_pipeendpoint (urb->pipe),
  1534. usb_pipein (urb->pipe) ? "in" : "out",
  1535. urb->transfer_buffer_length);
  1536. #endif
  1537. /* allocate SITDs */
  1538. status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
  1539. if (status < 0) {
  1540. ehci_dbg (ehci, "can't init sitds\n");
  1541. goto done;
  1542. }
  1543. /* schedule ... need to lock */
  1544. spin_lock_irqsave (&ehci->lock, flags);
  1545. status = iso_stream_schedule (ehci, urb, stream);
  1546. if (status == 0)
  1547. sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
  1548. spin_unlock_irqrestore (&ehci->lock, flags);
  1549. done:
  1550. if (status < 0)
  1551. iso_stream_put (ehci, stream);
  1552. return status;
  1553. }
  1554. #else
  1555. static inline int
  1556. sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
  1557. unsigned mem_flags)
  1558. {
  1559. ehci_dbg (ehci, "split iso support is disabled\n");
  1560. return -ENOSYS;
  1561. }
  1562. static inline unsigned
  1563. sitd_complete (
  1564. struct ehci_hcd *ehci,
  1565. struct ehci_sitd *sitd,
  1566. struct pt_regs *regs
  1567. ) {
  1568. ehci_err (ehci, "sitd_complete %p?\n", sitd);
  1569. return 0;
  1570. }
  1571. #endif /* USB_EHCI_SPLIT_ISO */
  1572. /*-------------------------------------------------------------------------*/
  1573. static void
  1574. scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
  1575. {
  1576. unsigned frame, clock, now_uframe, mod;
  1577. unsigned modified;
  1578. mod = ehci->periodic_size << 3;
  1579. /*
  1580. * When running, scan from last scan point up to "now"
  1581. * else clean up by scanning everything that's left.
  1582. * Touches as few pages as possible: cache-friendly.
  1583. */
  1584. now_uframe = ehci->next_uframe;
  1585. if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
  1586. clock = readl (&ehci->regs->frame_index);
  1587. else
  1588. clock = now_uframe + mod - 1;
  1589. clock %= mod;
  1590. for (;;) {
  1591. union ehci_shadow q, *q_p;
  1592. __le32 type, *hw_p;
  1593. unsigned uframes;
  1594. /* don't scan past the live uframe */
  1595. frame = now_uframe >> 3;
  1596. if (frame == (clock >> 3))
  1597. uframes = now_uframe & 0x07;
  1598. else {
  1599. /* safe to scan the whole frame at once */
  1600. now_uframe |= 0x07;
  1601. uframes = 8;
  1602. }
  1603. restart:
  1604. /* scan each element in frame's queue for completions */
  1605. q_p = &ehci->pshadow [frame];
  1606. hw_p = &ehci->periodic [frame];
  1607. q.ptr = q_p->ptr;
  1608. type = Q_NEXT_TYPE (*hw_p);
  1609. modified = 0;
  1610. while (q.ptr != NULL) {
  1611. unsigned uf;
  1612. union ehci_shadow temp;
  1613. int live;
  1614. live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
  1615. switch (type) {
  1616. case Q_TYPE_QH:
  1617. /* handle any completions */
  1618. temp.qh = qh_get (q.qh);
  1619. type = Q_NEXT_TYPE (q.qh->hw_next);
  1620. q = q.qh->qh_next;
  1621. modified = qh_completions (ehci, temp.qh, regs);
  1622. if (unlikely (list_empty (&temp.qh->qtd_list)))
  1623. intr_deschedule (ehci, temp.qh);
  1624. qh_put (temp.qh);
  1625. break;
  1626. case Q_TYPE_FSTN:
  1627. /* for "save place" FSTNs, look at QH entries
  1628. * in the previous frame for completions.
  1629. */
  1630. if (q.fstn->hw_prev != EHCI_LIST_END) {
  1631. dbg ("ignoring completions from FSTNs");
  1632. }
  1633. type = Q_NEXT_TYPE (q.fstn->hw_next);
  1634. q = q.fstn->fstn_next;
  1635. break;
  1636. case Q_TYPE_ITD:
  1637. /* skip itds for later in the frame */
  1638. rmb ();
  1639. for (uf = live ? uframes : 8; uf < 8; uf++) {
  1640. if (0 == (q.itd->hw_transaction [uf]
  1641. & ITD_ACTIVE))
  1642. continue;
  1643. q_p = &q.itd->itd_next;
  1644. hw_p = &q.itd->hw_next;
  1645. type = Q_NEXT_TYPE (q.itd->hw_next);
  1646. q = *q_p;
  1647. break;
  1648. }
  1649. if (uf != 8)
  1650. break;
  1651. /* this one's ready ... HC won't cache the
  1652. * pointer for much longer, if at all.
  1653. */
  1654. *q_p = q.itd->itd_next;
  1655. *hw_p = q.itd->hw_next;
  1656. type = Q_NEXT_TYPE (q.itd->hw_next);
  1657. wmb();
  1658. modified = itd_complete (ehci, q.itd, regs);
  1659. q = *q_p;
  1660. break;
  1661. case Q_TYPE_SITD:
  1662. if ((q.sitd->hw_results & SITD_ACTIVE)
  1663. && live) {
  1664. q_p = &q.sitd->sitd_next;
  1665. hw_p = &q.sitd->hw_next;
  1666. type = Q_NEXT_TYPE (q.sitd->hw_next);
  1667. q = *q_p;
  1668. break;
  1669. }
  1670. *q_p = q.sitd->sitd_next;
  1671. *hw_p = q.sitd->hw_next;
  1672. type = Q_NEXT_TYPE (q.sitd->hw_next);
  1673. wmb();
  1674. modified = sitd_complete (ehci, q.sitd, regs);
  1675. q = *q_p;
  1676. break;
  1677. default:
  1678. dbg ("corrupt type %d frame %d shadow %p",
  1679. type, frame, q.ptr);
  1680. // BUG ();
  1681. q.ptr = NULL;
  1682. }
  1683. /* assume completion callbacks modify the queue */
  1684. if (unlikely (modified))
  1685. goto restart;
  1686. }
  1687. /* stop when we catch up to the HC */
  1688. // FIXME: this assumes we won't get lapped when
  1689. // latencies climb; that should be rare, but...
  1690. // detect it, and just go all the way around.
  1691. // FLR might help detect this case, so long as latencies
  1692. // don't exceed periodic_size msec (default 1.024 sec).
  1693. // FIXME: likewise assumes HC doesn't halt mid-scan
  1694. if (now_uframe == clock) {
  1695. unsigned now;
  1696. if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
  1697. break;
  1698. ehci->next_uframe = now_uframe;
  1699. now = readl (&ehci->regs->frame_index) % mod;
  1700. if (now_uframe == now)
  1701. break;
  1702. /* rescan the rest of this frame, then ... */
  1703. clock = now;
  1704. } else {
  1705. now_uframe++;
  1706. now_uframe %= mod;
  1707. }
  1708. }
  1709. }