oxu210hp-hcd.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971
  1. /*
  2. * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
  3. * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
  4. *
  5. * This code is *strongly* based on EHCI-HCD code by David Brownell since
  6. * the chip is a quasi-EHCI compatible.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/dmapool.h>
  25. #include <linux/kernel.h>
  26. #include <linux/delay.h>
  27. #include <linux/ioport.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <linux/errno.h>
  31. #include <linux/init.h>
  32. #include <linux/timer.h>
  33. #include <linux/list.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/usb.h>
  36. #include <linux/usb/hcd.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/io.h>
  40. #include <asm/irq.h>
  41. #include <asm/system.h>
  42. #include <asm/unaligned.h>
  43. #include <linux/irq.h>
  44. #include <linux/platform_device.h>
  45. #include "oxu210hp.h"
  46. #define DRIVER_VERSION "0.0.50"
  47. /*
  48. * Main defines
  49. */
  50. #define oxu_dbg(oxu, fmt, args...) \
  51. dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  52. #define oxu_err(oxu, fmt, args...) \
  53. dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  54. #define oxu_info(oxu, fmt, args...) \
  55. dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  56. static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
  57. {
  58. return container_of((void *) oxu, struct usb_hcd, hcd_priv);
  59. }
  60. static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
  61. {
  62. return (struct oxu_hcd *) (hcd->hcd_priv);
  63. }
  64. /*
  65. * Debug stuff
  66. */
  67. #undef OXU_URB_TRACE
  68. #undef OXU_VERBOSE_DEBUG
  69. #ifdef OXU_VERBOSE_DEBUG
  70. #define oxu_vdbg oxu_dbg
  71. #else
  72. #define oxu_vdbg(oxu, fmt, args...) /* Nop */
  73. #endif
  74. #ifdef DEBUG
  75. static int __attribute__((__unused__))
  76. dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
  77. {
  78. return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
  79. label, label[0] ? " " : "", status,
  80. (status & STS_ASS) ? " Async" : "",
  81. (status & STS_PSS) ? " Periodic" : "",
  82. (status & STS_RECL) ? " Recl" : "",
  83. (status & STS_HALT) ? " Halt" : "",
  84. (status & STS_IAA) ? " IAA" : "",
  85. (status & STS_FATAL) ? " FATAL" : "",
  86. (status & STS_FLR) ? " FLR" : "",
  87. (status & STS_PCD) ? " PCD" : "",
  88. (status & STS_ERR) ? " ERR" : "",
  89. (status & STS_INT) ? " INT" : ""
  90. );
  91. }
  92. static int __attribute__((__unused__))
  93. dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
  94. {
  95. return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
  96. label, label[0] ? " " : "", enable,
  97. (enable & STS_IAA) ? " IAA" : "",
  98. (enable & STS_FATAL) ? " FATAL" : "",
  99. (enable & STS_FLR) ? " FLR" : "",
  100. (enable & STS_PCD) ? " PCD" : "",
  101. (enable & STS_ERR) ? " ERR" : "",
  102. (enable & STS_INT) ? " INT" : ""
  103. );
  104. }
  105. static const char *const fls_strings[] =
  106. { "1024", "512", "256", "??" };
  107. static int dbg_command_buf(char *buf, unsigned len,
  108. const char *label, u32 command)
  109. {
  110. return scnprintf(buf, len,
  111. "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
  112. label, label[0] ? " " : "", command,
  113. (command & CMD_PARK) ? "park" : "(park)",
  114. CMD_PARK_CNT(command),
  115. (command >> 16) & 0x3f,
  116. (command & CMD_LRESET) ? " LReset" : "",
  117. (command & CMD_IAAD) ? " IAAD" : "",
  118. (command & CMD_ASE) ? " Async" : "",
  119. (command & CMD_PSE) ? " Periodic" : "",
  120. fls_strings[(command >> 2) & 0x3],
  121. (command & CMD_RESET) ? " Reset" : "",
  122. (command & CMD_RUN) ? "RUN" : "HALT"
  123. );
  124. }
  125. static int dbg_port_buf(char *buf, unsigned len, const char *label,
  126. int port, u32 status)
  127. {
  128. char *sig;
  129. /* signaling state */
  130. switch (status & (3 << 10)) {
  131. case 0 << 10:
  132. sig = "se0";
  133. break;
  134. case 1 << 10:
  135. sig = "k"; /* low speed */
  136. break;
  137. case 2 << 10:
  138. sig = "j";
  139. break;
  140. default:
  141. sig = "?";
  142. break;
  143. }
  144. return scnprintf(buf, len,
  145. "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
  146. label, label[0] ? " " : "", port, status,
  147. (status & PORT_POWER) ? " POWER" : "",
  148. (status & PORT_OWNER) ? " OWNER" : "",
  149. sig,
  150. (status & PORT_RESET) ? " RESET" : "",
  151. (status & PORT_SUSPEND) ? " SUSPEND" : "",
  152. (status & PORT_RESUME) ? " RESUME" : "",
  153. (status & PORT_OCC) ? " OCC" : "",
  154. (status & PORT_OC) ? " OC" : "",
  155. (status & PORT_PEC) ? " PEC" : "",
  156. (status & PORT_PE) ? " PE" : "",
  157. (status & PORT_CSC) ? " CSC" : "",
  158. (status & PORT_CONNECT) ? " CONNECT" : ""
  159. );
  160. }
  161. #else
  162. static inline int __attribute__((__unused__))
  163. dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
  164. { return 0; }
  165. static inline int __attribute__((__unused__))
  166. dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
  167. { return 0; }
  168. static inline int __attribute__((__unused__))
  169. dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
  170. { return 0; }
  171. static inline int __attribute__((__unused__))
  172. dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
  173. { return 0; }
  174. #endif /* DEBUG */
  175. /* functions have the "wrong" filename when they're output... */
  176. #define dbg_status(oxu, label, status) { \
  177. char _buf[80]; \
  178. dbg_status_buf(_buf, sizeof _buf, label, status); \
  179. oxu_dbg(oxu, "%s\n", _buf); \
  180. }
  181. #define dbg_cmd(oxu, label, command) { \
  182. char _buf[80]; \
  183. dbg_command_buf(_buf, sizeof _buf, label, command); \
  184. oxu_dbg(oxu, "%s\n", _buf); \
  185. }
  186. #define dbg_port(oxu, label, port, status) { \
  187. char _buf[80]; \
  188. dbg_port_buf(_buf, sizeof _buf, label, port, status); \
  189. oxu_dbg(oxu, "%s\n", _buf); \
  190. }
  191. /*
  192. * Module parameters
  193. */
  194. /* Initial IRQ latency: faster than hw default */
  195. static int log2_irq_thresh; /* 0 to 6 */
  196. module_param(log2_irq_thresh, int, S_IRUGO);
  197. MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
  198. /* Initial park setting: slower than hw default */
  199. static unsigned park;
  200. module_param(park, uint, S_IRUGO);
  201. MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
  202. /* For flakey hardware, ignore overcurrent indicators */
  203. static int ignore_oc;
  204. module_param(ignore_oc, bool, S_IRUGO);
  205. MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
  206. static void ehci_work(struct oxu_hcd *oxu);
  207. static int oxu_hub_control(struct usb_hcd *hcd,
  208. u16 typeReq, u16 wValue, u16 wIndex,
  209. char *buf, u16 wLength);
  210. /*
  211. * Local functions
  212. */
  213. /* Low level read/write registers functions */
  214. static inline u32 oxu_readl(void *base, u32 reg)
  215. {
  216. return readl(base + reg);
  217. }
  218. static inline void oxu_writel(void *base, u32 reg, u32 val)
  219. {
  220. writel(val, base + reg);
  221. }
  222. static inline void timer_action_done(struct oxu_hcd *oxu,
  223. enum ehci_timer_action action)
  224. {
  225. clear_bit(action, &oxu->actions);
  226. }
  227. static inline void timer_action(struct oxu_hcd *oxu,
  228. enum ehci_timer_action action)
  229. {
  230. if (!test_and_set_bit(action, &oxu->actions)) {
  231. unsigned long t;
  232. switch (action) {
  233. case TIMER_IAA_WATCHDOG:
  234. t = EHCI_IAA_JIFFIES;
  235. break;
  236. case TIMER_IO_WATCHDOG:
  237. t = EHCI_IO_JIFFIES;
  238. break;
  239. case TIMER_ASYNC_OFF:
  240. t = EHCI_ASYNC_JIFFIES;
  241. break;
  242. case TIMER_ASYNC_SHRINK:
  243. default:
  244. t = EHCI_SHRINK_JIFFIES;
  245. break;
  246. }
  247. t += jiffies;
  248. /* all timings except IAA watchdog can be overridden.
  249. * async queue SHRINK often precedes IAA. while it's ready
  250. * to go OFF neither can matter, and afterwards the IO
  251. * watchdog stops unless there's still periodic traffic.
  252. */
  253. if (action != TIMER_IAA_WATCHDOG
  254. && t > oxu->watchdog.expires
  255. && timer_pending(&oxu->watchdog))
  256. return;
  257. mod_timer(&oxu->watchdog, t);
  258. }
  259. }
  260. /*
  261. * handshake - spin reading hc until handshake completes or fails
  262. * @ptr: address of hc register to be read
  263. * @mask: bits to look at in result of read
  264. * @done: value of those bits when handshake succeeds
  265. * @usec: timeout in microseconds
  266. *
  267. * Returns negative errno, or zero on success
  268. *
  269. * Success happens when the "mask" bits have the specified value (hardware
  270. * handshake done). There are two failure modes: "usec" have passed (major
  271. * hardware flakeout), or the register reads as all-ones (hardware removed).
  272. *
  273. * That last failure should_only happen in cases like physical cardbus eject
  274. * before driver shutdown. But it also seems to be caused by bugs in cardbus
  275. * bridge shutdown: shutting down the bridge before the devices using it.
  276. */
  277. static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
  278. u32 mask, u32 done, int usec)
  279. {
  280. u32 result;
  281. do {
  282. result = readl(ptr);
  283. if (result == ~(u32)0) /* card removed */
  284. return -ENODEV;
  285. result &= mask;
  286. if (result == done)
  287. return 0;
  288. udelay(1);
  289. usec--;
  290. } while (usec > 0);
  291. return -ETIMEDOUT;
  292. }
  293. /* Force HC to halt state from unknown (EHCI spec section 2.3) */
  294. static int ehci_halt(struct oxu_hcd *oxu)
  295. {
  296. u32 temp = readl(&oxu->regs->status);
  297. /* disable any irqs left enabled by previous code */
  298. writel(0, &oxu->regs->intr_enable);
  299. if ((temp & STS_HALT) != 0)
  300. return 0;
  301. temp = readl(&oxu->regs->command);
  302. temp &= ~CMD_RUN;
  303. writel(temp, &oxu->regs->command);
  304. return handshake(oxu, &oxu->regs->status,
  305. STS_HALT, STS_HALT, 16 * 125);
  306. }
  307. /* Put TDI/ARC silicon into EHCI mode */
  308. static void tdi_reset(struct oxu_hcd *oxu)
  309. {
  310. u32 __iomem *reg_ptr;
  311. u32 tmp;
  312. reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
  313. tmp = readl(reg_ptr);
  314. tmp |= 0x3;
  315. writel(tmp, reg_ptr);
  316. }
  317. /* Reset a non-running (STS_HALT == 1) controller */
  318. static int ehci_reset(struct oxu_hcd *oxu)
  319. {
  320. int retval;
  321. u32 command = readl(&oxu->regs->command);
  322. command |= CMD_RESET;
  323. dbg_cmd(oxu, "reset", command);
  324. writel(command, &oxu->regs->command);
  325. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  326. oxu->next_statechange = jiffies;
  327. retval = handshake(oxu, &oxu->regs->command,
  328. CMD_RESET, 0, 250 * 1000);
  329. if (retval)
  330. return retval;
  331. tdi_reset(oxu);
  332. return retval;
  333. }
  334. /* Idle the controller (from running) */
  335. static void ehci_quiesce(struct oxu_hcd *oxu)
  336. {
  337. u32 temp;
  338. #ifdef DEBUG
  339. if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  340. BUG();
  341. #endif
  342. /* wait for any schedule enables/disables to take effect */
  343. temp = readl(&oxu->regs->command) << 10;
  344. temp &= STS_ASS | STS_PSS;
  345. if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
  346. temp, 16 * 125) != 0) {
  347. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  348. return;
  349. }
  350. /* then disable anything that's still active */
  351. temp = readl(&oxu->regs->command);
  352. temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
  353. writel(temp, &oxu->regs->command);
  354. /* hardware can take 16 microframes to turn off ... */
  355. if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
  356. 0, 16 * 125) != 0) {
  357. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  358. return;
  359. }
  360. }
  361. static int check_reset_complete(struct oxu_hcd *oxu, int index,
  362. u32 __iomem *status_reg, int port_status)
  363. {
  364. if (!(port_status & PORT_CONNECT)) {
  365. oxu->reset_done[index] = 0;
  366. return port_status;
  367. }
  368. /* if reset finished and it's still not enabled -- handoff */
  369. if (!(port_status & PORT_PE)) {
  370. oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
  371. index+1);
  372. return port_status;
  373. } else
  374. oxu_dbg(oxu, "port %d high speed\n", index + 1);
  375. return port_status;
  376. }
  377. static void ehci_hub_descriptor(struct oxu_hcd *oxu,
  378. struct usb_hub_descriptor *desc)
  379. {
  380. int ports = HCS_N_PORTS(oxu->hcs_params);
  381. u16 temp;
  382. desc->bDescriptorType = 0x29;
  383. desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
  384. desc->bHubContrCurrent = 0;
  385. desc->bNbrPorts = ports;
  386. temp = 1 + (ports / 8);
  387. desc->bDescLength = 7 + 2 * temp;
  388. /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
  389. memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
  390. memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
  391. temp = 0x0008; /* per-port overcurrent reporting */
  392. if (HCS_PPC(oxu->hcs_params))
  393. temp |= 0x0001; /* per-port power control */
  394. else
  395. temp |= 0x0002; /* no power switching */
  396. desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
  397. }
  398. /* Allocate an OXU210HP on-chip memory data buffer
  399. *
  400. * An on-chip memory data buffer is required for each OXU210HP USB transfer.
  401. * Each transfer descriptor has one or more on-chip memory data buffers.
  402. *
  403. * Data buffers are allocated from a fix sized pool of data blocks.
  404. * To minimise fragmentation and give reasonable memory utlisation,
  405. * data buffers are allocated with sizes the power of 2 multiples of
  406. * the block size, starting on an address a multiple of the allocated size.
  407. *
  408. * FIXME: callers of this function require a buffer to be allocated for
  409. * len=0. This is a waste of on-chip memory and should be fix. Then this
  410. * function should be changed to not allocate a buffer for len=0.
  411. */
  412. static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
  413. {
  414. int n_blocks; /* minium blocks needed to hold len */
  415. int a_blocks; /* blocks allocated */
  416. int i, j;
  417. /* Don't allocte bigger than supported */
  418. if (len > BUFFER_SIZE * BUFFER_NUM) {
  419. oxu_err(oxu, "buffer too big (%d)\n", len);
  420. return -ENOMEM;
  421. }
  422. spin_lock(&oxu->mem_lock);
  423. /* Number of blocks needed to hold len */
  424. n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
  425. /* Round the number of blocks up to the power of 2 */
  426. for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
  427. ;
  428. /* Find a suitable available data buffer */
  429. for (i = 0; i < BUFFER_NUM;
  430. i += max(a_blocks, (int)oxu->db_used[i])) {
  431. /* Check all the required blocks are available */
  432. for (j = 0; j < a_blocks; j++)
  433. if (oxu->db_used[i + j])
  434. break;
  435. if (j != a_blocks)
  436. continue;
  437. /* Allocate blocks found! */
  438. qtd->buffer = (void *) &oxu->mem->db_pool[i];
  439. qtd->buffer_dma = virt_to_phys(qtd->buffer);
  440. qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
  441. oxu->db_used[i] = a_blocks;
  442. spin_unlock(&oxu->mem_lock);
  443. return 0;
  444. }
  445. /* Failed */
  446. spin_unlock(&oxu->mem_lock);
  447. return -ENOMEM;
  448. }
  449. static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
  450. {
  451. int index;
  452. spin_lock(&oxu->mem_lock);
  453. index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
  454. / BUFFER_SIZE;
  455. oxu->db_used[index] = 0;
  456. qtd->qtd_buffer_len = 0;
  457. qtd->buffer_dma = 0;
  458. qtd->buffer = NULL;
  459. spin_unlock(&oxu->mem_lock);
  460. }
  461. static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
  462. {
  463. memset(qtd, 0, sizeof *qtd);
  464. qtd->qtd_dma = dma;
  465. qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
  466. qtd->hw_next = EHCI_LIST_END;
  467. qtd->hw_alt_next = EHCI_LIST_END;
  468. INIT_LIST_HEAD(&qtd->qtd_list);
  469. }
  470. static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
  471. {
  472. int index;
  473. if (qtd->buffer)
  474. oxu_buf_free(oxu, qtd);
  475. spin_lock(&oxu->mem_lock);
  476. index = qtd - &oxu->mem->qtd_pool[0];
  477. oxu->qtd_used[index] = 0;
  478. spin_unlock(&oxu->mem_lock);
  479. }
  480. static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
  481. {
  482. int i;
  483. struct ehci_qtd *qtd = NULL;
  484. spin_lock(&oxu->mem_lock);
  485. for (i = 0; i < QTD_NUM; i++)
  486. if (!oxu->qtd_used[i])
  487. break;
  488. if (i < QTD_NUM) {
  489. qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
  490. memset(qtd, 0, sizeof *qtd);
  491. qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
  492. qtd->hw_next = EHCI_LIST_END;
  493. qtd->hw_alt_next = EHCI_LIST_END;
  494. INIT_LIST_HEAD(&qtd->qtd_list);
  495. qtd->qtd_dma = virt_to_phys(qtd);
  496. oxu->qtd_used[i] = 1;
  497. }
  498. spin_unlock(&oxu->mem_lock);
  499. return qtd;
  500. }
  501. static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
  502. {
  503. int index;
  504. spin_lock(&oxu->mem_lock);
  505. index = qh - &oxu->mem->qh_pool[0];
  506. oxu->qh_used[index] = 0;
  507. spin_unlock(&oxu->mem_lock);
  508. }
  509. static void qh_destroy(struct kref *kref)
  510. {
  511. struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
  512. struct oxu_hcd *oxu = qh->oxu;
  513. /* clean qtds first, and know this is not linked */
  514. if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
  515. oxu_dbg(oxu, "unused qh not empty!\n");
  516. BUG();
  517. }
  518. if (qh->dummy)
  519. oxu_qtd_free(oxu, qh->dummy);
  520. oxu_qh_free(oxu, qh);
  521. }
  522. static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
  523. {
  524. int i;
  525. struct ehci_qh *qh = NULL;
  526. spin_lock(&oxu->mem_lock);
  527. for (i = 0; i < QHEAD_NUM; i++)
  528. if (!oxu->qh_used[i])
  529. break;
  530. if (i < QHEAD_NUM) {
  531. qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
  532. memset(qh, 0, sizeof *qh);
  533. kref_init(&qh->kref);
  534. qh->oxu = oxu;
  535. qh->qh_dma = virt_to_phys(qh);
  536. INIT_LIST_HEAD(&qh->qtd_list);
  537. /* dummy td enables safe urb queuing */
  538. qh->dummy = ehci_qtd_alloc(oxu);
  539. if (qh->dummy == NULL) {
  540. oxu_dbg(oxu, "no dummy td\n");
  541. oxu->qh_used[i] = 0;
  542. qh = NULL;
  543. goto unlock;
  544. }
  545. oxu->qh_used[i] = 1;
  546. }
  547. unlock:
  548. spin_unlock(&oxu->mem_lock);
  549. return qh;
  550. }
  551. /* to share a qh (cpu threads, or hc) */
  552. static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
  553. {
  554. kref_get(&qh->kref);
  555. return qh;
  556. }
  557. static inline void qh_put(struct ehci_qh *qh)
  558. {
  559. kref_put(&qh->kref, qh_destroy);
  560. }
  561. static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
  562. {
  563. int index;
  564. spin_lock(&oxu->mem_lock);
  565. index = murb - &oxu->murb_pool[0];
  566. oxu->murb_used[index] = 0;
  567. spin_unlock(&oxu->mem_lock);
  568. }
  569. static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
  570. {
  571. int i;
  572. struct oxu_murb *murb = NULL;
  573. spin_lock(&oxu->mem_lock);
  574. for (i = 0; i < MURB_NUM; i++)
  575. if (!oxu->murb_used[i])
  576. break;
  577. if (i < MURB_NUM) {
  578. murb = &(oxu->murb_pool)[i];
  579. oxu->murb_used[i] = 1;
  580. }
  581. spin_unlock(&oxu->mem_lock);
  582. return murb;
  583. }
  584. /* The queue heads and transfer descriptors are managed from pools tied
  585. * to each of the "per device" structures.
  586. * This is the initialisation and cleanup code.
  587. */
  588. static void ehci_mem_cleanup(struct oxu_hcd *oxu)
  589. {
  590. kfree(oxu->murb_pool);
  591. oxu->murb_pool = NULL;
  592. if (oxu->async)
  593. qh_put(oxu->async);
  594. oxu->async = NULL;
  595. del_timer(&oxu->urb_timer);
  596. oxu->periodic = NULL;
  597. /* shadow periodic table */
  598. kfree(oxu->pshadow);
  599. oxu->pshadow = NULL;
  600. }
  601. /* Remember to add cleanup code (above) if you add anything here.
  602. */
  603. static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
  604. {
  605. int i;
  606. for (i = 0; i < oxu->periodic_size; i++)
  607. oxu->mem->frame_list[i] = EHCI_LIST_END;
  608. for (i = 0; i < QHEAD_NUM; i++)
  609. oxu->qh_used[i] = 0;
  610. for (i = 0; i < QTD_NUM; i++)
  611. oxu->qtd_used[i] = 0;
  612. oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
  613. if (!oxu->murb_pool)
  614. goto fail;
  615. for (i = 0; i < MURB_NUM; i++)
  616. oxu->murb_used[i] = 0;
  617. oxu->async = oxu_qh_alloc(oxu);
  618. if (!oxu->async)
  619. goto fail;
  620. oxu->periodic = (__le32 *) &oxu->mem->frame_list;
  621. oxu->periodic_dma = virt_to_phys(oxu->periodic);
  622. for (i = 0; i < oxu->periodic_size; i++)
  623. oxu->periodic[i] = EHCI_LIST_END;
  624. /* software shadow of hardware table */
  625. oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
  626. if (oxu->pshadow != NULL)
  627. return 0;
  628. fail:
  629. oxu_dbg(oxu, "couldn't init memory\n");
  630. ehci_mem_cleanup(oxu);
  631. return -ENOMEM;
  632. }
  633. /* Fill a qtd, returning how much of the buffer we were able to queue up.
  634. */
  635. static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
  636. int token, int maxpacket)
  637. {
  638. int i, count;
  639. u64 addr = buf;
  640. /* one buffer entry per 4K ... first might be short or unaligned */
  641. qtd->hw_buf[0] = cpu_to_le32((u32)addr);
  642. qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
  643. count = 0x1000 - (buf & 0x0fff); /* rest of that page */
  644. if (likely(len < count)) /* ... iff needed */
  645. count = len;
  646. else {
  647. buf += 0x1000;
  648. buf &= ~0x0fff;
  649. /* per-qtd limit: from 16K to 20K (best alignment) */
  650. for (i = 1; count < len && i < 5; i++) {
  651. addr = buf;
  652. qtd->hw_buf[i] = cpu_to_le32((u32)addr);
  653. qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
  654. buf += 0x1000;
  655. if ((count + 0x1000) < len)
  656. count += 0x1000;
  657. else
  658. count = len;
  659. }
  660. /* short packets may only terminate transfers */
  661. if (count != len)
  662. count -= (count % maxpacket);
  663. }
  664. qtd->hw_token = cpu_to_le32((count << 16) | token);
  665. qtd->length = count;
  666. return count;
  667. }
  668. static inline void qh_update(struct oxu_hcd *oxu,
  669. struct ehci_qh *qh, struct ehci_qtd *qtd)
  670. {
  671. /* writes to an active overlay are unsafe */
  672. BUG_ON(qh->qh_state != QH_STATE_IDLE);
  673. qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
  674. qh->hw_alt_next = EHCI_LIST_END;
  675. /* Except for control endpoints, we make hardware maintain data
  676. * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
  677. * and set the pseudo-toggle in udev. Only usb_clear_halt() will
  678. * ever clear it.
  679. */
  680. if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
  681. unsigned is_out, epnum;
  682. is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
  683. epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
  684. if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
  685. qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
  686. usb_settoggle(qh->dev, epnum, is_out, 1);
  687. }
  688. }
  689. /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
  690. wmb();
  691. qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
  692. }
  693. /* If it weren't for a common silicon quirk (writing the dummy into the qh
  694. * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
  695. * recovery (including urb dequeue) would need software changes to a QH...
  696. */
  697. static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
  698. {
  699. struct ehci_qtd *qtd;
  700. if (list_empty(&qh->qtd_list))
  701. qtd = qh->dummy;
  702. else {
  703. qtd = list_entry(qh->qtd_list.next,
  704. struct ehci_qtd, qtd_list);
  705. /* first qtd may already be partially processed */
  706. if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
  707. qtd = NULL;
  708. }
  709. if (qtd)
  710. qh_update(oxu, qh, qtd);
  711. }
  712. static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
  713. size_t length, u32 token)
  714. {
  715. /* count IN/OUT bytes, not SETUP (even short packets) */
  716. if (likely(QTD_PID(token) != 2))
  717. urb->actual_length += length - QTD_LENGTH(token);
  718. /* don't modify error codes */
  719. if (unlikely(urb->status != -EINPROGRESS))
  720. return;
  721. /* force cleanup after short read; not always an error */
  722. if (unlikely(IS_SHORT_READ(token)))
  723. urb->status = -EREMOTEIO;
  724. /* serious "can't proceed" faults reported by the hardware */
  725. if (token & QTD_STS_HALT) {
  726. if (token & QTD_STS_BABBLE) {
  727. /* FIXME "must" disable babbling device's port too */
  728. urb->status = -EOVERFLOW;
  729. } else if (token & QTD_STS_MMF) {
  730. /* fs/ls interrupt xfer missed the complete-split */
  731. urb->status = -EPROTO;
  732. } else if (token & QTD_STS_DBE) {
  733. urb->status = (QTD_PID(token) == 1) /* IN ? */
  734. ? -ENOSR /* hc couldn't read data */
  735. : -ECOMM; /* hc couldn't write data */
  736. } else if (token & QTD_STS_XACT) {
  737. /* timeout, bad crc, wrong PID, etc; retried */
  738. if (QTD_CERR(token))
  739. urb->status = -EPIPE;
  740. else {
  741. oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
  742. urb->dev->devpath,
  743. usb_pipeendpoint(urb->pipe),
  744. usb_pipein(urb->pipe) ? "in" : "out");
  745. urb->status = -EPROTO;
  746. }
  747. /* CERR nonzero + no errors + halt --> stall */
  748. } else if (QTD_CERR(token))
  749. urb->status = -EPIPE;
  750. else /* unknown */
  751. urb->status = -EPROTO;
  752. oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
  753. usb_pipedevice(urb->pipe),
  754. usb_pipeendpoint(urb->pipe),
  755. usb_pipein(urb->pipe) ? "in" : "out",
  756. token, urb->status);
  757. }
  758. }
  759. static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
  760. __releases(oxu->lock)
  761. __acquires(oxu->lock)
  762. {
  763. if (likely(urb->hcpriv != NULL)) {
  764. struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
  765. /* S-mask in a QH means it's an interrupt urb */
  766. if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
  767. /* ... update hc-wide periodic stats (for usbfs) */
  768. oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
  769. }
  770. qh_put(qh);
  771. }
  772. urb->hcpriv = NULL;
  773. switch (urb->status) {
  774. case -EINPROGRESS: /* success */
  775. urb->status = 0;
  776. default: /* fault */
  777. break;
  778. case -EREMOTEIO: /* fault or normal */
  779. if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
  780. urb->status = 0;
  781. break;
  782. case -ECONNRESET: /* canceled */
  783. case -ENOENT:
  784. break;
  785. }
  786. #ifdef OXU_URB_TRACE
  787. oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
  788. __func__, urb->dev->devpath, urb,
  789. usb_pipeendpoint(urb->pipe),
  790. usb_pipein(urb->pipe) ? "in" : "out",
  791. urb->status,
  792. urb->actual_length, urb->transfer_buffer_length);
  793. #endif
  794. /* complete() can reenter this HCD */
  795. spin_unlock(&oxu->lock);
  796. usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
  797. spin_lock(&oxu->lock);
  798. }
  799. static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
  800. static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
  801. static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
  802. static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
  803. #define HALT_BIT cpu_to_le32(QTD_STS_HALT)
  804. /* Process and free completed qtds for a qh, returning URBs to drivers.
  805. * Chases up to qh->hw_current. Returns number of completions called,
  806. * indicating how much "real" work we did.
  807. */
  808. static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
  809. {
  810. struct ehci_qtd *last = NULL, *end = qh->dummy;
  811. struct list_head *entry, *tmp;
  812. int stopped;
  813. unsigned count = 0;
  814. int do_status = 0;
  815. u8 state;
  816. struct oxu_murb *murb = NULL;
  817. if (unlikely(list_empty(&qh->qtd_list)))
  818. return count;
  819. /* completions (or tasks on other cpus) must never clobber HALT
  820. * till we've gone through and cleaned everything up, even when
  821. * they add urbs to this qh's queue or mark them for unlinking.
  822. *
  823. * NOTE: unlinking expects to be done in queue order.
  824. */
  825. state = qh->qh_state;
  826. qh->qh_state = QH_STATE_COMPLETING;
  827. stopped = (state == QH_STATE_IDLE);
  828. /* remove de-activated QTDs from front of queue.
  829. * after faults (including short reads), cleanup this urb
  830. * then let the queue advance.
  831. * if queue is stopped, handles unlinks.
  832. */
  833. list_for_each_safe(entry, tmp, &qh->qtd_list) {
  834. struct ehci_qtd *qtd;
  835. struct urb *urb;
  836. u32 token = 0;
  837. qtd = list_entry(entry, struct ehci_qtd, qtd_list);
  838. urb = qtd->urb;
  839. /* Clean up any state from previous QTD ...*/
  840. if (last) {
  841. if (likely(last->urb != urb)) {
  842. if (last->urb->complete == NULL) {
  843. murb = (struct oxu_murb *) last->urb;
  844. last->urb = murb->main;
  845. if (murb->last) {
  846. ehci_urb_done(oxu, last->urb);
  847. count++;
  848. }
  849. oxu_murb_free(oxu, murb);
  850. } else {
  851. ehci_urb_done(oxu, last->urb);
  852. count++;
  853. }
  854. }
  855. oxu_qtd_free(oxu, last);
  856. last = NULL;
  857. }
  858. /* ignore urbs submitted during completions we reported */
  859. if (qtd == end)
  860. break;
  861. /* hardware copies qtd out of qh overlay */
  862. rmb();
  863. token = le32_to_cpu(qtd->hw_token);
  864. /* always clean up qtds the hc de-activated */
  865. if ((token & QTD_STS_ACTIVE) == 0) {
  866. if ((token & QTD_STS_HALT) != 0) {
  867. stopped = 1;
  868. /* magic dummy for some short reads; qh won't advance.
  869. * that silicon quirk can kick in with this dummy too.
  870. */
  871. } else if (IS_SHORT_READ(token) &&
  872. !(qtd->hw_alt_next & EHCI_LIST_END)) {
  873. stopped = 1;
  874. goto halt;
  875. }
  876. /* stop scanning when we reach qtds the hc is using */
  877. } else if (likely(!stopped &&
  878. HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
  879. break;
  880. } else {
  881. stopped = 1;
  882. if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
  883. urb->status = -ESHUTDOWN;
  884. /* ignore active urbs unless some previous qtd
  885. * for the urb faulted (including short read) or
  886. * its urb was canceled. we may patch qh or qtds.
  887. */
  888. if (likely(urb->status == -EINPROGRESS))
  889. continue;
  890. /* issue status after short control reads */
  891. if (unlikely(do_status != 0)
  892. && QTD_PID(token) == 0 /* OUT */) {
  893. do_status = 0;
  894. continue;
  895. }
  896. /* token in overlay may be most current */
  897. if (state == QH_STATE_IDLE
  898. && cpu_to_le32(qtd->qtd_dma)
  899. == qh->hw_current)
  900. token = le32_to_cpu(qh->hw_token);
  901. /* force halt for unlinked or blocked qh, so we'll
  902. * patch the qh later and so that completions can't
  903. * activate it while we "know" it's stopped.
  904. */
  905. if ((HALT_BIT & qh->hw_token) == 0) {
  906. halt:
  907. qh->hw_token |= HALT_BIT;
  908. wmb();
  909. }
  910. }
  911. /* Remove it from the queue */
  912. qtd_copy_status(oxu, urb->complete ?
  913. urb : ((struct oxu_murb *) urb)->main,
  914. qtd->length, token);
  915. if ((usb_pipein(qtd->urb->pipe)) &&
  916. (NULL != qtd->transfer_buffer))
  917. memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
  918. do_status = (urb->status == -EREMOTEIO)
  919. && usb_pipecontrol(urb->pipe);
  920. if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
  921. last = list_entry(qtd->qtd_list.prev,
  922. struct ehci_qtd, qtd_list);
  923. last->hw_next = qtd->hw_next;
  924. }
  925. list_del(&qtd->qtd_list);
  926. last = qtd;
  927. }
  928. /* last urb's completion might still need calling */
  929. if (likely(last != NULL)) {
  930. if (last->urb->complete == NULL) {
  931. murb = (struct oxu_murb *) last->urb;
  932. last->urb = murb->main;
  933. if (murb->last) {
  934. ehci_urb_done(oxu, last->urb);
  935. count++;
  936. }
  937. oxu_murb_free(oxu, murb);
  938. } else {
  939. ehci_urb_done(oxu, last->urb);
  940. count++;
  941. }
  942. oxu_qtd_free(oxu, last);
  943. }
  944. /* restore original state; caller must unlink or relink */
  945. qh->qh_state = state;
  946. /* be sure the hardware's done with the qh before refreshing
  947. * it after fault cleanup, or recovering from silicon wrongly
  948. * overlaying the dummy qtd (which reduces DMA chatter).
  949. */
  950. if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
  951. switch (state) {
  952. case QH_STATE_IDLE:
  953. qh_refresh(oxu, qh);
  954. break;
  955. case QH_STATE_LINKED:
  956. /* should be rare for periodic transfers,
  957. * except maybe high bandwidth ...
  958. */
  959. if ((cpu_to_le32(QH_SMASK)
  960. & qh->hw_info2) != 0) {
  961. intr_deschedule(oxu, qh);
  962. (void) qh_schedule(oxu, qh);
  963. } else
  964. unlink_async(oxu, qh);
  965. break;
  966. /* otherwise, unlink already started */
  967. }
  968. }
  969. return count;
  970. }
  971. /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
  972. #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
  973. /* ... and packet size, for any kind of endpoint descriptor */
  974. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  975. /* Reverse of qh_urb_transaction: free a list of TDs.
  976. * used for cleanup after errors, before HC sees an URB's TDs.
  977. */
  978. static void qtd_list_free(struct oxu_hcd *oxu,
  979. struct urb *urb, struct list_head *qtd_list)
  980. {
  981. struct list_head *entry, *temp;
  982. list_for_each_safe(entry, temp, qtd_list) {
  983. struct ehci_qtd *qtd;
  984. qtd = list_entry(entry, struct ehci_qtd, qtd_list);
  985. list_del(&qtd->qtd_list);
  986. oxu_qtd_free(oxu, qtd);
  987. }
  988. }
  989. /* Create a list of filled qtds for this URB; won't link into qh.
  990. */
  991. static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
  992. struct urb *urb,
  993. struct list_head *head,
  994. gfp_t flags)
  995. {
  996. struct ehci_qtd *qtd, *qtd_prev;
  997. dma_addr_t buf;
  998. int len, maxpacket;
  999. int is_input;
  1000. u32 token;
  1001. void *transfer_buf = NULL;
  1002. int ret;
  1003. /*
  1004. * URBs map to sequences of QTDs: one logical transaction
  1005. */
  1006. qtd = ehci_qtd_alloc(oxu);
  1007. if (unlikely(!qtd))
  1008. return NULL;
  1009. list_add_tail(&qtd->qtd_list, head);
  1010. qtd->urb = urb;
  1011. token = QTD_STS_ACTIVE;
  1012. token |= (EHCI_TUNE_CERR << 10);
  1013. /* for split transactions, SplitXState initialized to zero */
  1014. len = urb->transfer_buffer_length;
  1015. is_input = usb_pipein(urb->pipe);
  1016. if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
  1017. urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
  1018. if (usb_pipecontrol(urb->pipe)) {
  1019. /* SETUP pid */
  1020. ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
  1021. if (ret)
  1022. goto cleanup;
  1023. qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
  1024. token | (2 /* "setup" */ << 8), 8);
  1025. memcpy(qtd->buffer, qtd->urb->setup_packet,
  1026. sizeof(struct usb_ctrlrequest));
  1027. /* ... and always at least one more pid */
  1028. token ^= QTD_TOGGLE;
  1029. qtd_prev = qtd;
  1030. qtd = ehci_qtd_alloc(oxu);
  1031. if (unlikely(!qtd))
  1032. goto cleanup;
  1033. qtd->urb = urb;
  1034. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1035. list_add_tail(&qtd->qtd_list, head);
  1036. /* for zero length DATA stages, STATUS is always IN */
  1037. if (len == 0)
  1038. token |= (1 /* "in" */ << 8);
  1039. }
  1040. /*
  1041. * Data transfer stage: buffer setup
  1042. */
  1043. ret = oxu_buf_alloc(oxu, qtd, len);
  1044. if (ret)
  1045. goto cleanup;
  1046. buf = qtd->buffer_dma;
  1047. transfer_buf = urb->transfer_buffer;
  1048. if (!is_input)
  1049. memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
  1050. if (is_input)
  1051. token |= (1 /* "in" */ << 8);
  1052. /* else it's already initted to "out" pid (0 << 8) */
  1053. maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
  1054. /*
  1055. * buffer gets wrapped in one or more qtds;
  1056. * last one may be "short" (including zero len)
  1057. * and may serve as a control status ack
  1058. */
  1059. for (;;) {
  1060. int this_qtd_len;
  1061. this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
  1062. qtd->transfer_buffer = transfer_buf;
  1063. len -= this_qtd_len;
  1064. buf += this_qtd_len;
  1065. transfer_buf += this_qtd_len;
  1066. if (is_input)
  1067. qtd->hw_alt_next = oxu->async->hw_alt_next;
  1068. /* qh makes control packets use qtd toggle; maybe switch it */
  1069. if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
  1070. token ^= QTD_TOGGLE;
  1071. if (likely(len <= 0))
  1072. break;
  1073. qtd_prev = qtd;
  1074. qtd = ehci_qtd_alloc(oxu);
  1075. if (unlikely(!qtd))
  1076. goto cleanup;
  1077. if (likely(len > 0)) {
  1078. ret = oxu_buf_alloc(oxu, qtd, len);
  1079. if (ret)
  1080. goto cleanup;
  1081. }
  1082. qtd->urb = urb;
  1083. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1084. list_add_tail(&qtd->qtd_list, head);
  1085. }
  1086. /* unless the bulk/interrupt caller wants a chance to clean
  1087. * up after short reads, hc should advance qh past this urb
  1088. */
  1089. if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
  1090. || usb_pipecontrol(urb->pipe)))
  1091. qtd->hw_alt_next = EHCI_LIST_END;
  1092. /*
  1093. * control requests may need a terminating data "status" ack;
  1094. * bulk ones may need a terminating short packet (zero length).
  1095. */
  1096. if (likely(urb->transfer_buffer_length != 0)) {
  1097. int one_more = 0;
  1098. if (usb_pipecontrol(urb->pipe)) {
  1099. one_more = 1;
  1100. token ^= 0x0100; /* "in" <--> "out" */
  1101. token |= QTD_TOGGLE; /* force DATA1 */
  1102. } else if (usb_pipebulk(urb->pipe)
  1103. && (urb->transfer_flags & URB_ZERO_PACKET)
  1104. && !(urb->transfer_buffer_length % maxpacket)) {
  1105. one_more = 1;
  1106. }
  1107. if (one_more) {
  1108. qtd_prev = qtd;
  1109. qtd = ehci_qtd_alloc(oxu);
  1110. if (unlikely(!qtd))
  1111. goto cleanup;
  1112. qtd->urb = urb;
  1113. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1114. list_add_tail(&qtd->qtd_list, head);
  1115. /* never any data in such packets */
  1116. qtd_fill(qtd, 0, 0, token, 0);
  1117. }
  1118. }
  1119. /* by default, enable interrupt on urb completion */
  1120. qtd->hw_token |= cpu_to_le32(QTD_IOC);
  1121. return head;
  1122. cleanup:
  1123. qtd_list_free(oxu, urb, head);
  1124. return NULL;
  1125. }
  1126. /* Each QH holds a qtd list; a QH is used for everything except iso.
  1127. *
  1128. * For interrupt urbs, the scheduler must set the microframe scheduling
  1129. * mask(s) each time the QH gets scheduled. For highspeed, that's
  1130. * just one microframe in the s-mask. For split interrupt transactions
  1131. * there are additional complications: c-mask, maybe FSTNs.
  1132. */
  1133. static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
  1134. struct urb *urb, gfp_t flags)
  1135. {
  1136. struct ehci_qh *qh = oxu_qh_alloc(oxu);
  1137. u32 info1 = 0, info2 = 0;
  1138. int is_input, type;
  1139. int maxp = 0;
  1140. if (!qh)
  1141. return qh;
  1142. /*
  1143. * init endpoint/device data for this QH
  1144. */
  1145. info1 |= usb_pipeendpoint(urb->pipe) << 8;
  1146. info1 |= usb_pipedevice(urb->pipe) << 0;
  1147. is_input = usb_pipein(urb->pipe);
  1148. type = usb_pipetype(urb->pipe);
  1149. maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
  1150. /* Compute interrupt scheduling parameters just once, and save.
  1151. * - allowing for high bandwidth, how many nsec/uframe are used?
  1152. * - split transactions need a second CSPLIT uframe; same question
  1153. * - splits also need a schedule gap (for full/low speed I/O)
  1154. * - qh has a polling interval
  1155. *
  1156. * For control/bulk requests, the HC or TT handles these.
  1157. */
  1158. if (type == PIPE_INTERRUPT) {
  1159. qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
  1160. is_input, 0,
  1161. hb_mult(maxp) * max_packet(maxp)));
  1162. qh->start = NO_FRAME;
  1163. if (urb->dev->speed == USB_SPEED_HIGH) {
  1164. qh->c_usecs = 0;
  1165. qh->gap_uf = 0;
  1166. qh->period = urb->interval >> 3;
  1167. if (qh->period == 0 && urb->interval != 1) {
  1168. /* NOTE interval 2 or 4 uframes could work.
  1169. * But interval 1 scheduling is simpler, and
  1170. * includes high bandwidth.
  1171. */
  1172. dbg("intr period %d uframes, NYET!",
  1173. urb->interval);
  1174. goto done;
  1175. }
  1176. } else {
  1177. struct usb_tt *tt = urb->dev->tt;
  1178. int think_time;
  1179. /* gap is f(FS/LS transfer times) */
  1180. qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
  1181. is_input, 0, maxp) / (125 * 1000);
  1182. /* FIXME this just approximates SPLIT/CSPLIT times */
  1183. if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
  1184. qh->c_usecs = qh->usecs + HS_USECS(0);
  1185. qh->usecs = HS_USECS(1);
  1186. } else { /* SPLIT+DATA, gap, CSPLIT */
  1187. qh->usecs += HS_USECS(1);
  1188. qh->c_usecs = HS_USECS(0);
  1189. }
  1190. think_time = tt ? tt->think_time : 0;
  1191. qh->tt_usecs = NS_TO_US(think_time +
  1192. usb_calc_bus_time(urb->dev->speed,
  1193. is_input, 0, max_packet(maxp)));
  1194. qh->period = urb->interval;
  1195. }
  1196. }
  1197. /* support for tt scheduling, and access to toggles */
  1198. qh->dev = urb->dev;
  1199. /* using TT? */
  1200. switch (urb->dev->speed) {
  1201. case USB_SPEED_LOW:
  1202. info1 |= (1 << 12); /* EPS "low" */
  1203. /* FALL THROUGH */
  1204. case USB_SPEED_FULL:
  1205. /* EPS 0 means "full" */
  1206. if (type != PIPE_INTERRUPT)
  1207. info1 |= (EHCI_TUNE_RL_TT << 28);
  1208. if (type == PIPE_CONTROL) {
  1209. info1 |= (1 << 27); /* for TT */
  1210. info1 |= 1 << 14; /* toggle from qtd */
  1211. }
  1212. info1 |= maxp << 16;
  1213. info2 |= (EHCI_TUNE_MULT_TT << 30);
  1214. info2 |= urb->dev->ttport << 23;
  1215. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
  1216. break;
  1217. case USB_SPEED_HIGH: /* no TT involved */
  1218. info1 |= (2 << 12); /* EPS "high" */
  1219. if (type == PIPE_CONTROL) {
  1220. info1 |= (EHCI_TUNE_RL_HS << 28);
  1221. info1 |= 64 << 16; /* usb2 fixed maxpacket */
  1222. info1 |= 1 << 14; /* toggle from qtd */
  1223. info2 |= (EHCI_TUNE_MULT_HS << 30);
  1224. } else if (type == PIPE_BULK) {
  1225. info1 |= (EHCI_TUNE_RL_HS << 28);
  1226. info1 |= 512 << 16; /* usb2 fixed maxpacket */
  1227. info2 |= (EHCI_TUNE_MULT_HS << 30);
  1228. } else { /* PIPE_INTERRUPT */
  1229. info1 |= max_packet(maxp) << 16;
  1230. info2 |= hb_mult(maxp) << 30;
  1231. }
  1232. break;
  1233. default:
  1234. dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);
  1235. done:
  1236. qh_put(qh);
  1237. return NULL;
  1238. }
  1239. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
  1240. /* init as live, toggle clear, advance to dummy */
  1241. qh->qh_state = QH_STATE_IDLE;
  1242. qh->hw_info1 = cpu_to_le32(info1);
  1243. qh->hw_info2 = cpu_to_le32(info2);
  1244. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
  1245. qh_refresh(oxu, qh);
  1246. return qh;
  1247. }
  1248. /* Move qh (and its qtds) onto async queue; maybe enable queue.
  1249. */
  1250. static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1251. {
  1252. __le32 dma = QH_NEXT(qh->qh_dma);
  1253. struct ehci_qh *head;
  1254. /* (re)start the async schedule? */
  1255. head = oxu->async;
  1256. timer_action_done(oxu, TIMER_ASYNC_OFF);
  1257. if (!head->qh_next.qh) {
  1258. u32 cmd = readl(&oxu->regs->command);
  1259. if (!(cmd & CMD_ASE)) {
  1260. /* in case a clear of CMD_ASE didn't take yet */
  1261. (void)handshake(oxu, &oxu->regs->status,
  1262. STS_ASS, 0, 150);
  1263. cmd |= CMD_ASE | CMD_RUN;
  1264. writel(cmd, &oxu->regs->command);
  1265. oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
  1266. /* posted write need not be known to HC yet ... */
  1267. }
  1268. }
  1269. /* clear halt and/or toggle; and maybe recover from silicon quirk */
  1270. if (qh->qh_state == QH_STATE_IDLE)
  1271. qh_refresh(oxu, qh);
  1272. /* splice right after start */
  1273. qh->qh_next = head->qh_next;
  1274. qh->hw_next = head->hw_next;
  1275. wmb();
  1276. head->qh_next.qh = qh;
  1277. head->hw_next = dma;
  1278. qh->qh_state = QH_STATE_LINKED;
  1279. /* qtd completions reported later by interrupt */
  1280. }
  1281. #define QH_ADDR_MASK cpu_to_le32(0x7f)
  1282. /*
  1283. * For control/bulk/interrupt, return QH with these TDs appended.
  1284. * Allocates and initializes the QH if necessary.
  1285. * Returns null if it can't allocate a QH it needs to.
  1286. * If the QH has TDs (urbs) already, that's great.
  1287. */
  1288. static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
  1289. struct urb *urb, struct list_head *qtd_list,
  1290. int epnum, void **ptr)
  1291. {
  1292. struct ehci_qh *qh = NULL;
  1293. qh = (struct ehci_qh *) *ptr;
  1294. if (unlikely(qh == NULL)) {
  1295. /* can't sleep here, we have oxu->lock... */
  1296. qh = qh_make(oxu, urb, GFP_ATOMIC);
  1297. *ptr = qh;
  1298. }
  1299. if (likely(qh != NULL)) {
  1300. struct ehci_qtd *qtd;
  1301. if (unlikely(list_empty(qtd_list)))
  1302. qtd = NULL;
  1303. else
  1304. qtd = list_entry(qtd_list->next, struct ehci_qtd,
  1305. qtd_list);
  1306. /* control qh may need patching ... */
  1307. if (unlikely(epnum == 0)) {
  1308. /* usb_reset_device() briefly reverts to address 0 */
  1309. if (usb_pipedevice(urb->pipe) == 0)
  1310. qh->hw_info1 &= ~QH_ADDR_MASK;
  1311. }
  1312. /* just one way to queue requests: swap with the dummy qtd.
  1313. * only hc or qh_refresh() ever modify the overlay.
  1314. */
  1315. if (likely(qtd != NULL)) {
  1316. struct ehci_qtd *dummy;
  1317. dma_addr_t dma;
  1318. __le32 token;
  1319. /* to avoid racing the HC, use the dummy td instead of
  1320. * the first td of our list (becomes new dummy). both
  1321. * tds stay deactivated until we're done, when the
  1322. * HC is allowed to fetch the old dummy (4.10.2).
  1323. */
  1324. token = qtd->hw_token;
  1325. qtd->hw_token = HALT_BIT;
  1326. wmb();
  1327. dummy = qh->dummy;
  1328. dma = dummy->qtd_dma;
  1329. *dummy = *qtd;
  1330. dummy->qtd_dma = dma;
  1331. list_del(&qtd->qtd_list);
  1332. list_add(&dummy->qtd_list, qtd_list);
  1333. list_splice(qtd_list, qh->qtd_list.prev);
  1334. ehci_qtd_init(qtd, qtd->qtd_dma);
  1335. qh->dummy = qtd;
  1336. /* hc must see the new dummy at list end */
  1337. dma = qtd->qtd_dma;
  1338. qtd = list_entry(qh->qtd_list.prev,
  1339. struct ehci_qtd, qtd_list);
  1340. qtd->hw_next = QTD_NEXT(dma);
  1341. /* let the hc process these next qtds */
  1342. dummy->hw_token = (token & ~(0x80));
  1343. wmb();
  1344. dummy->hw_token = token;
  1345. urb->hcpriv = qh_get(qh);
  1346. }
  1347. }
  1348. return qh;
  1349. }
  1350. static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
  1351. struct list_head *qtd_list, gfp_t mem_flags)
  1352. {
  1353. struct ehci_qtd *qtd;
  1354. int epnum;
  1355. unsigned long flags;
  1356. struct ehci_qh *qh = NULL;
  1357. int rc = 0;
  1358. qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
  1359. epnum = urb->ep->desc.bEndpointAddress;
  1360. #ifdef OXU_URB_TRACE
  1361. oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
  1362. __func__, urb->dev->devpath, urb,
  1363. epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
  1364. urb->transfer_buffer_length,
  1365. qtd, urb->ep->hcpriv);
  1366. #endif
  1367. spin_lock_irqsave(&oxu->lock, flags);
  1368. if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
  1369. rc = -ESHUTDOWN;
  1370. goto done;
  1371. }
  1372. qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
  1373. if (unlikely(qh == NULL)) {
  1374. rc = -ENOMEM;
  1375. goto done;
  1376. }
  1377. /* Control/bulk operations through TTs don't need scheduling,
  1378. * the HC and TT handle it when the TT has a buffer ready.
  1379. */
  1380. if (likely(qh->qh_state == QH_STATE_IDLE))
  1381. qh_link_async(oxu, qh_get(qh));
  1382. done:
  1383. spin_unlock_irqrestore(&oxu->lock, flags);
  1384. if (unlikely(qh == NULL))
  1385. qtd_list_free(oxu, urb, qtd_list);
  1386. return rc;
  1387. }
  1388. /* The async qh for the qtds being reclaimed are now unlinked from the HC */
  1389. static void end_unlink_async(struct oxu_hcd *oxu)
  1390. {
  1391. struct ehci_qh *qh = oxu->reclaim;
  1392. struct ehci_qh *next;
  1393. timer_action_done(oxu, TIMER_IAA_WATCHDOG);
  1394. qh->qh_state = QH_STATE_IDLE;
  1395. qh->qh_next.qh = NULL;
  1396. qh_put(qh); /* refcount from reclaim */
  1397. /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
  1398. next = qh->reclaim;
  1399. oxu->reclaim = next;
  1400. oxu->reclaim_ready = 0;
  1401. qh->reclaim = NULL;
  1402. qh_completions(oxu, qh);
  1403. if (!list_empty(&qh->qtd_list)
  1404. && HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1405. qh_link_async(oxu, qh);
  1406. else {
  1407. qh_put(qh); /* refcount from async list */
  1408. /* it's not free to turn the async schedule on/off; leave it
  1409. * active but idle for a while once it empties.
  1410. */
  1411. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
  1412. && oxu->async->qh_next.qh == NULL)
  1413. timer_action(oxu, TIMER_ASYNC_OFF);
  1414. }
  1415. if (next) {
  1416. oxu->reclaim = NULL;
  1417. start_unlink_async(oxu, next);
  1418. }
  1419. }
  1420. /* makes sure the async qh will become idle */
  1421. /* caller must own oxu->lock */
  1422. static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1423. {
  1424. int cmd = readl(&oxu->regs->command);
  1425. struct ehci_qh *prev;
  1426. #ifdef DEBUG
  1427. assert_spin_locked(&oxu->lock);
  1428. if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
  1429. && qh->qh_state != QH_STATE_UNLINK_WAIT))
  1430. BUG();
  1431. #endif
  1432. /* stop async schedule right now? */
  1433. if (unlikely(qh == oxu->async)) {
  1434. /* can't get here without STS_ASS set */
  1435. if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
  1436. && !oxu->reclaim) {
  1437. /* ... and CMD_IAAD clear */
  1438. writel(cmd & ~CMD_ASE, &oxu->regs->command);
  1439. wmb();
  1440. /* handshake later, if we need to */
  1441. timer_action_done(oxu, TIMER_ASYNC_OFF);
  1442. }
  1443. return;
  1444. }
  1445. qh->qh_state = QH_STATE_UNLINK;
  1446. oxu->reclaim = qh = qh_get(qh);
  1447. prev = oxu->async;
  1448. while (prev->qh_next.qh != qh)
  1449. prev = prev->qh_next.qh;
  1450. prev->hw_next = qh->hw_next;
  1451. prev->qh_next = qh->qh_next;
  1452. wmb();
  1453. if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
  1454. /* if (unlikely(qh->reclaim != 0))
  1455. * this will recurse, probably not much
  1456. */
  1457. end_unlink_async(oxu);
  1458. return;
  1459. }
  1460. oxu->reclaim_ready = 0;
  1461. cmd |= CMD_IAAD;
  1462. writel(cmd, &oxu->regs->command);
  1463. (void) readl(&oxu->regs->command);
  1464. timer_action(oxu, TIMER_IAA_WATCHDOG);
  1465. }
  1466. static void scan_async(struct oxu_hcd *oxu)
  1467. {
  1468. struct ehci_qh *qh;
  1469. enum ehci_timer_action action = TIMER_IO_WATCHDOG;
  1470. if (!++(oxu->stamp))
  1471. oxu->stamp++;
  1472. timer_action_done(oxu, TIMER_ASYNC_SHRINK);
  1473. rescan:
  1474. qh = oxu->async->qh_next.qh;
  1475. if (likely(qh != NULL)) {
  1476. do {
  1477. /* clean any finished work for this qh */
  1478. if (!list_empty(&qh->qtd_list)
  1479. && qh->stamp != oxu->stamp) {
  1480. int temp;
  1481. /* unlinks could happen here; completion
  1482. * reporting drops the lock. rescan using
  1483. * the latest schedule, but don't rescan
  1484. * qhs we already finished (no looping).
  1485. */
  1486. qh = qh_get(qh);
  1487. qh->stamp = oxu->stamp;
  1488. temp = qh_completions(oxu, qh);
  1489. qh_put(qh);
  1490. if (temp != 0)
  1491. goto rescan;
  1492. }
  1493. /* unlink idle entries, reducing HC PCI usage as well
  1494. * as HCD schedule-scanning costs. delay for any qh
  1495. * we just scanned, there's a not-unusual case that it
  1496. * doesn't stay idle for long.
  1497. * (plus, avoids some kind of re-activation race.)
  1498. */
  1499. if (list_empty(&qh->qtd_list)) {
  1500. if (qh->stamp == oxu->stamp)
  1501. action = TIMER_ASYNC_SHRINK;
  1502. else if (!oxu->reclaim
  1503. && qh->qh_state == QH_STATE_LINKED)
  1504. start_unlink_async(oxu, qh);
  1505. }
  1506. qh = qh->qh_next.qh;
  1507. } while (qh);
  1508. }
  1509. if (action == TIMER_ASYNC_SHRINK)
  1510. timer_action(oxu, TIMER_ASYNC_SHRINK);
  1511. }
  1512. /*
  1513. * periodic_next_shadow - return "next" pointer on shadow list
  1514. * @periodic: host pointer to qh/itd/sitd
  1515. * @tag: hardware tag for type of this record
  1516. */
  1517. static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
  1518. __le32 tag)
  1519. {
  1520. switch (tag) {
  1521. default:
  1522. case Q_TYPE_QH:
  1523. return &periodic->qh->qh_next;
  1524. }
  1525. }
  1526. /* caller must hold oxu->lock */
  1527. static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
  1528. {
  1529. union ehci_shadow *prev_p = &oxu->pshadow[frame];
  1530. __le32 *hw_p = &oxu->periodic[frame];
  1531. union ehci_shadow here = *prev_p;
  1532. /* find predecessor of "ptr"; hw and shadow lists are in sync */
  1533. while (here.ptr && here.ptr != ptr) {
  1534. prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
  1535. hw_p = here.hw_next;
  1536. here = *prev_p;
  1537. }
  1538. /* an interrupt entry (at list end) could have been shared */
  1539. if (!here.ptr)
  1540. return;
  1541. /* update shadow and hardware lists ... the old "next" pointers
  1542. * from ptr may still be in use, the caller updates them.
  1543. */
  1544. *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
  1545. *hw_p = *here.hw_next;
  1546. }
  1547. /* how many of the uframe's 125 usecs are allocated? */
  1548. static unsigned short periodic_usecs(struct oxu_hcd *oxu,
  1549. unsigned frame, unsigned uframe)
  1550. {
  1551. __le32 *hw_p = &oxu->periodic[frame];
  1552. union ehci_shadow *q = &oxu->pshadow[frame];
  1553. unsigned usecs = 0;
  1554. while (q->ptr) {
  1555. switch (Q_NEXT_TYPE(*hw_p)) {
  1556. case Q_TYPE_QH:
  1557. default:
  1558. /* is it in the S-mask? */
  1559. if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
  1560. usecs += q->qh->usecs;
  1561. /* ... or C-mask? */
  1562. if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
  1563. usecs += q->qh->c_usecs;
  1564. hw_p = &q->qh->hw_next;
  1565. q = &q->qh->qh_next;
  1566. break;
  1567. }
  1568. }
  1569. #ifdef DEBUG
  1570. if (usecs > 100)
  1571. oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
  1572. frame * 8 + uframe, usecs);
  1573. #endif
  1574. return usecs;
  1575. }
  1576. static int enable_periodic(struct oxu_hcd *oxu)
  1577. {
  1578. u32 cmd;
  1579. int status;
  1580. /* did clearing PSE did take effect yet?
  1581. * takes effect only at frame boundaries...
  1582. */
  1583. status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
  1584. if (status != 0) {
  1585. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  1586. return status;
  1587. }
  1588. cmd = readl(&oxu->regs->command) | CMD_PSE;
  1589. writel(cmd, &oxu->regs->command);
  1590. /* posted write ... PSS happens later */
  1591. oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
  1592. /* make sure ehci_work scans these */
  1593. oxu->next_uframe = readl(&oxu->regs->frame_index)
  1594. % (oxu->periodic_size << 3);
  1595. return 0;
  1596. }
  1597. static int disable_periodic(struct oxu_hcd *oxu)
  1598. {
  1599. u32 cmd;
  1600. int status;
  1601. /* did setting PSE not take effect yet?
  1602. * takes effect only at frame boundaries...
  1603. */
  1604. status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
  1605. if (status != 0) {
  1606. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  1607. return status;
  1608. }
  1609. cmd = readl(&oxu->regs->command) & ~CMD_PSE;
  1610. writel(cmd, &oxu->regs->command);
  1611. /* posted write ... */
  1612. oxu->next_uframe = -1;
  1613. return 0;
  1614. }
  1615. /* periodic schedule slots have iso tds (normal or split) first, then a
  1616. * sparse tree for active interrupt transfers.
  1617. *
  1618. * this just links in a qh; caller guarantees uframe masks are set right.
  1619. * no FSTN support (yet; oxu 0.96+)
  1620. */
  1621. static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1622. {
  1623. unsigned i;
  1624. unsigned period = qh->period;
  1625. dev_dbg(&qh->dev->dev,
  1626. "link qh%d-%04x/%p start %d [%d/%d us]\n",
  1627. period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  1628. qh, qh->start, qh->usecs, qh->c_usecs);
  1629. /* high bandwidth, or otherwise every microframe */
  1630. if (period == 0)
  1631. period = 1;
  1632. for (i = qh->start; i < oxu->periodic_size; i += period) {
  1633. union ehci_shadow *prev = &oxu->pshadow[i];
  1634. __le32 *hw_p = &oxu->periodic[i];
  1635. union ehci_shadow here = *prev;
  1636. __le32 type = 0;
  1637. /* skip the iso nodes at list head */
  1638. while (here.ptr) {
  1639. type = Q_NEXT_TYPE(*hw_p);
  1640. if (type == Q_TYPE_QH)
  1641. break;
  1642. prev = periodic_next_shadow(prev, type);
  1643. hw_p = &here.qh->hw_next;
  1644. here = *prev;
  1645. }
  1646. /* sorting each branch by period (slow-->fast)
  1647. * enables sharing interior tree nodes
  1648. */
  1649. while (here.ptr && qh != here.qh) {
  1650. if (qh->period > here.qh->period)
  1651. break;
  1652. prev = &here.qh->qh_next;
  1653. hw_p = &here.qh->hw_next;
  1654. here = *prev;
  1655. }
  1656. /* link in this qh, unless some earlier pass did that */
  1657. if (qh != here.qh) {
  1658. qh->qh_next = here;
  1659. if (here.qh)
  1660. qh->hw_next = *hw_p;
  1661. wmb();
  1662. prev->qh = qh;
  1663. *hw_p = QH_NEXT(qh->qh_dma);
  1664. }
  1665. }
  1666. qh->qh_state = QH_STATE_LINKED;
  1667. qh_get(qh);
  1668. /* update per-qh bandwidth for usbfs */
  1669. oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
  1670. ? ((qh->usecs + qh->c_usecs) / qh->period)
  1671. : (qh->usecs * 8);
  1672. /* maybe enable periodic schedule processing */
  1673. if (!oxu->periodic_sched++)
  1674. return enable_periodic(oxu);
  1675. return 0;
  1676. }
  1677. static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1678. {
  1679. unsigned i;
  1680. unsigned period;
  1681. /* FIXME:
  1682. * IF this isn't high speed
  1683. * and this qh is active in the current uframe
  1684. * (and overlay token SplitXstate is false?)
  1685. * THEN
  1686. * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
  1687. */
  1688. /* high bandwidth, or otherwise part of every microframe */
  1689. period = qh->period;
  1690. if (period == 0)
  1691. period = 1;
  1692. for (i = qh->start; i < oxu->periodic_size; i += period)
  1693. periodic_unlink(oxu, i, qh);
  1694. /* update per-qh bandwidth for usbfs */
  1695. oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
  1696. ? ((qh->usecs + qh->c_usecs) / qh->period)
  1697. : (qh->usecs * 8);
  1698. dev_dbg(&qh->dev->dev,
  1699. "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
  1700. qh->period,
  1701. le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  1702. qh, qh->start, qh->usecs, qh->c_usecs);
  1703. /* qh->qh_next still "live" to HC */
  1704. qh->qh_state = QH_STATE_UNLINK;
  1705. qh->qh_next.ptr = NULL;
  1706. qh_put(qh);
  1707. /* maybe turn off periodic schedule */
  1708. oxu->periodic_sched--;
  1709. if (!oxu->periodic_sched)
  1710. (void) disable_periodic(oxu);
  1711. }
  1712. static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1713. {
  1714. unsigned wait;
  1715. qh_unlink_periodic(oxu, qh);
  1716. /* simple/paranoid: always delay, expecting the HC needs to read
  1717. * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
  1718. * expect khubd to clean up after any CSPLITs we won't issue.
  1719. * active high speed queues may need bigger delays...
  1720. */
  1721. if (list_empty(&qh->qtd_list)
  1722. || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
  1723. wait = 2;
  1724. else
  1725. wait = 55; /* worst case: 3 * 1024 */
  1726. udelay(wait);
  1727. qh->qh_state = QH_STATE_IDLE;
  1728. qh->hw_next = EHCI_LIST_END;
  1729. wmb();
  1730. }
  1731. static int check_period(struct oxu_hcd *oxu,
  1732. unsigned frame, unsigned uframe,
  1733. unsigned period, unsigned usecs)
  1734. {
  1735. int claimed;
  1736. /* complete split running into next frame?
  1737. * given FSTN support, we could sometimes check...
  1738. */
  1739. if (uframe >= 8)
  1740. return 0;
  1741. /*
  1742. * 80% periodic == 100 usec/uframe available
  1743. * convert "usecs we need" to "max already claimed"
  1744. */
  1745. usecs = 100 - usecs;
  1746. /* we "know" 2 and 4 uframe intervals were rejected; so
  1747. * for period 0, check _every_ microframe in the schedule.
  1748. */
  1749. if (unlikely(period == 0)) {
  1750. do {
  1751. for (uframe = 0; uframe < 7; uframe++) {
  1752. claimed = periodic_usecs(oxu, frame, uframe);
  1753. if (claimed > usecs)
  1754. return 0;
  1755. }
  1756. } while ((frame += 1) < oxu->periodic_size);
  1757. /* just check the specified uframe, at that period */
  1758. } else {
  1759. do {
  1760. claimed = periodic_usecs(oxu, frame, uframe);
  1761. if (claimed > usecs)
  1762. return 0;
  1763. } while ((frame += period) < oxu->periodic_size);
  1764. }
  1765. return 1;
  1766. }
  1767. static int check_intr_schedule(struct oxu_hcd *oxu,
  1768. unsigned frame, unsigned uframe,
  1769. const struct ehci_qh *qh, __le32 *c_maskp)
  1770. {
  1771. int retval = -ENOSPC;
  1772. if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
  1773. goto done;
  1774. if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
  1775. goto done;
  1776. if (!qh->c_usecs) {
  1777. retval = 0;
  1778. *c_maskp = 0;
  1779. goto done;
  1780. }
  1781. done:
  1782. return retval;
  1783. }
  1784. /* "first fit" scheduling policy used the first time through,
  1785. * or when the previous schedule slot can't be re-used.
  1786. */
  1787. static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1788. {
  1789. int status;
  1790. unsigned uframe;
  1791. __le32 c_mask;
  1792. unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
  1793. qh_refresh(oxu, qh);
  1794. qh->hw_next = EHCI_LIST_END;
  1795. frame = qh->start;
  1796. /* reuse the previous schedule slots, if we can */
  1797. if (frame < qh->period) {
  1798. uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
  1799. status = check_intr_schedule(oxu, frame, --uframe,
  1800. qh, &c_mask);
  1801. } else {
  1802. uframe = 0;
  1803. c_mask = 0;
  1804. status = -ENOSPC;
  1805. }
  1806. /* else scan the schedule to find a group of slots such that all
  1807. * uframes have enough periodic bandwidth available.
  1808. */
  1809. if (status) {
  1810. /* "normal" case, uframing flexible except with splits */
  1811. if (qh->period) {
  1812. frame = qh->period - 1;
  1813. do {
  1814. for (uframe = 0; uframe < 8; uframe++) {
  1815. status = check_intr_schedule(oxu,
  1816. frame, uframe, qh,
  1817. &c_mask);
  1818. if (status == 0)
  1819. break;
  1820. }
  1821. } while (status && frame--);
  1822. /* qh->period == 0 means every uframe */
  1823. } else {
  1824. frame = 0;
  1825. status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
  1826. }
  1827. if (status)
  1828. goto done;
  1829. qh->start = frame;
  1830. /* reset S-frame and (maybe) C-frame masks */
  1831. qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
  1832. qh->hw_info2 |= qh->period
  1833. ? cpu_to_le32(1 << uframe)
  1834. : cpu_to_le32(QH_SMASK);
  1835. qh->hw_info2 |= c_mask;
  1836. } else
  1837. oxu_dbg(oxu, "reused qh %p schedule\n", qh);
  1838. /* stuff into the periodic schedule */
  1839. status = qh_link_periodic(oxu, qh);
  1840. done:
  1841. return status;
  1842. }
  1843. static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
  1844. struct list_head *qtd_list, gfp_t mem_flags)
  1845. {
  1846. unsigned epnum;
  1847. unsigned long flags;
  1848. struct ehci_qh *qh;
  1849. int status = 0;
  1850. struct list_head empty;
  1851. /* get endpoint and transfer/schedule data */
  1852. epnum = urb->ep->desc.bEndpointAddress;
  1853. spin_lock_irqsave(&oxu->lock, flags);
  1854. if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
  1855. status = -ESHUTDOWN;
  1856. goto done;
  1857. }
  1858. /* get qh and force any scheduling errors */
  1859. INIT_LIST_HEAD(&empty);
  1860. qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
  1861. if (qh == NULL) {
  1862. status = -ENOMEM;
  1863. goto done;
  1864. }
  1865. if (qh->qh_state == QH_STATE_IDLE) {
  1866. status = qh_schedule(oxu, qh);
  1867. if (status != 0)
  1868. goto done;
  1869. }
  1870. /* then queue the urb's tds to the qh */
  1871. qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
  1872. BUG_ON(qh == NULL);
  1873. /* ... update usbfs periodic stats */
  1874. oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
  1875. done:
  1876. spin_unlock_irqrestore(&oxu->lock, flags);
  1877. if (status)
  1878. qtd_list_free(oxu, urb, qtd_list);
  1879. return status;
  1880. }
  1881. static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
  1882. gfp_t mem_flags)
  1883. {
  1884. oxu_dbg(oxu, "iso support is missing!\n");
  1885. return -ENOSYS;
  1886. }
  1887. static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
  1888. gfp_t mem_flags)
  1889. {
  1890. oxu_dbg(oxu, "split iso support is missing!\n");
  1891. return -ENOSYS;
  1892. }
  1893. static void scan_periodic(struct oxu_hcd *oxu)
  1894. {
  1895. unsigned frame, clock, now_uframe, mod;
  1896. unsigned modified;
  1897. mod = oxu->periodic_size << 3;
  1898. /*
  1899. * When running, scan from last scan point up to "now"
  1900. * else clean up by scanning everything that's left.
  1901. * Touches as few pages as possible: cache-friendly.
  1902. */
  1903. now_uframe = oxu->next_uframe;
  1904. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1905. clock = readl(&oxu->regs->frame_index);
  1906. else
  1907. clock = now_uframe + mod - 1;
  1908. clock %= mod;
  1909. for (;;) {
  1910. union ehci_shadow q, *q_p;
  1911. __le32 type, *hw_p;
  1912. unsigned uframes;
  1913. /* don't scan past the live uframe */
  1914. frame = now_uframe >> 3;
  1915. if (frame == (clock >> 3))
  1916. uframes = now_uframe & 0x07;
  1917. else {
  1918. /* safe to scan the whole frame at once */
  1919. now_uframe |= 0x07;
  1920. uframes = 8;
  1921. }
  1922. restart:
  1923. /* scan each element in frame's queue for completions */
  1924. q_p = &oxu->pshadow[frame];
  1925. hw_p = &oxu->periodic[frame];
  1926. q.ptr = q_p->ptr;
  1927. type = Q_NEXT_TYPE(*hw_p);
  1928. modified = 0;
  1929. while (q.ptr != NULL) {
  1930. union ehci_shadow temp;
  1931. int live;
  1932. live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
  1933. switch (type) {
  1934. case Q_TYPE_QH:
  1935. /* handle any completions */
  1936. temp.qh = qh_get(q.qh);
  1937. type = Q_NEXT_TYPE(q.qh->hw_next);
  1938. q = q.qh->qh_next;
  1939. modified = qh_completions(oxu, temp.qh);
  1940. if (unlikely(list_empty(&temp.qh->qtd_list)))
  1941. intr_deschedule(oxu, temp.qh);
  1942. qh_put(temp.qh);
  1943. break;
  1944. default:
  1945. dbg("corrupt type %d frame %d shadow %p",
  1946. type, frame, q.ptr);
  1947. q.ptr = NULL;
  1948. }
  1949. /* assume completion callbacks modify the queue */
  1950. if (unlikely(modified))
  1951. goto restart;
  1952. }
  1953. /* Stop when we catch up to the HC */
  1954. /* FIXME: this assumes we won't get lapped when
  1955. * latencies climb; that should be rare, but...
  1956. * detect it, and just go all the way around.
  1957. * FLR might help detect this case, so long as latencies
  1958. * don't exceed periodic_size msec (default 1.024 sec).
  1959. */
  1960. /* FIXME: likewise assumes HC doesn't halt mid-scan */
  1961. if (now_uframe == clock) {
  1962. unsigned now;
  1963. if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1964. break;
  1965. oxu->next_uframe = now_uframe;
  1966. now = readl(&oxu->regs->frame_index) % mod;
  1967. if (now_uframe == now)
  1968. break;
  1969. /* rescan the rest of this frame, then ... */
  1970. clock = now;
  1971. } else {
  1972. now_uframe++;
  1973. now_uframe %= mod;
  1974. }
  1975. }
  1976. }
  1977. /* On some systems, leaving remote wakeup enabled prevents system shutdown.
  1978. * The firmware seems to think that powering off is a wakeup event!
  1979. * This routine turns off remote wakeup and everything else, on all ports.
  1980. */
  1981. static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
  1982. {
  1983. int port = HCS_N_PORTS(oxu->hcs_params);
  1984. while (port--)
  1985. writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
  1986. }
  1987. static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
  1988. {
  1989. unsigned port;
  1990. if (!HCS_PPC(oxu->hcs_params))
  1991. return;
  1992. oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
  1993. for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
  1994. (void) oxu_hub_control(oxu_to_hcd(oxu),
  1995. is_on ? SetPortFeature : ClearPortFeature,
  1996. USB_PORT_FEAT_POWER,
  1997. port--, NULL, 0);
  1998. msleep(20);
  1999. }
  2000. /* Called from some interrupts, timers, and so on.
  2001. * It calls driver completion functions, after dropping oxu->lock.
  2002. */
  2003. static void ehci_work(struct oxu_hcd *oxu)
  2004. {
  2005. timer_action_done(oxu, TIMER_IO_WATCHDOG);
  2006. if (oxu->reclaim_ready)
  2007. end_unlink_async(oxu);
  2008. /* another CPU may drop oxu->lock during a schedule scan while
  2009. * it reports urb completions. this flag guards against bogus
  2010. * attempts at re-entrant schedule scanning.
  2011. */
  2012. if (oxu->scanning)
  2013. return;
  2014. oxu->scanning = 1;
  2015. scan_async(oxu);
  2016. if (oxu->next_uframe != -1)
  2017. scan_periodic(oxu);
  2018. oxu->scanning = 0;
  2019. /* the IO watchdog guards against hardware or driver bugs that
  2020. * misplace IRQs, and should let us run completely without IRQs.
  2021. * such lossage has been observed on both VT6202 and VT8235.
  2022. */
  2023. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
  2024. (oxu->async->qh_next.ptr != NULL ||
  2025. oxu->periodic_sched != 0))
  2026. timer_action(oxu, TIMER_IO_WATCHDOG);
  2027. }
  2028. static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  2029. {
  2030. /* if we need to use IAA and it's busy, defer */
  2031. if (qh->qh_state == QH_STATE_LINKED
  2032. && oxu->reclaim
  2033. && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
  2034. struct ehci_qh *last;
  2035. for (last = oxu->reclaim;
  2036. last->reclaim;
  2037. last = last->reclaim)
  2038. continue;
  2039. qh->qh_state = QH_STATE_UNLINK_WAIT;
  2040. last->reclaim = qh;
  2041. /* bypass IAA if the hc can't care */
  2042. } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
  2043. end_unlink_async(oxu);
  2044. /* something else might have unlinked the qh by now */
  2045. if (qh->qh_state == QH_STATE_LINKED)
  2046. start_unlink_async(oxu, qh);
  2047. }
  2048. /*
  2049. * USB host controller methods
  2050. */
  2051. static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
  2052. {
  2053. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2054. u32 status, pcd_status = 0;
  2055. int bh;
  2056. spin_lock(&oxu->lock);
  2057. status = readl(&oxu->regs->status);
  2058. /* e.g. cardbus physical eject */
  2059. if (status == ~(u32) 0) {
  2060. oxu_dbg(oxu, "device removed\n");
  2061. goto dead;
  2062. }
  2063. status &= INTR_MASK;
  2064. if (!status) { /* irq sharing? */
  2065. spin_unlock(&oxu->lock);
  2066. return IRQ_NONE;
  2067. }
  2068. /* clear (just) interrupts */
  2069. writel(status, &oxu->regs->status);
  2070. readl(&oxu->regs->command); /* unblock posted write */
  2071. bh = 0;
  2072. #ifdef OXU_VERBOSE_DEBUG
  2073. /* unrequested/ignored: Frame List Rollover */
  2074. dbg_status(oxu, "irq", status);
  2075. #endif
  2076. /* INT, ERR, and IAA interrupt rates can be throttled */
  2077. /* normal [4.15.1.2] or error [4.15.1.1] completion */
  2078. if (likely((status & (STS_INT|STS_ERR)) != 0))
  2079. bh = 1;
  2080. /* complete the unlinking of some qh [4.15.2.3] */
  2081. if (status & STS_IAA) {
  2082. oxu->reclaim_ready = 1;
  2083. bh = 1;
  2084. }
  2085. /* remote wakeup [4.3.1] */
  2086. if (status & STS_PCD) {
  2087. unsigned i = HCS_N_PORTS(oxu->hcs_params);
  2088. pcd_status = status;
  2089. /* resume root hub? */
  2090. if (!(readl(&oxu->regs->command) & CMD_RUN))
  2091. usb_hcd_resume_root_hub(hcd);
  2092. while (i--) {
  2093. int pstatus = readl(&oxu->regs->port_status[i]);
  2094. if (pstatus & PORT_OWNER)
  2095. continue;
  2096. if (!(pstatus & PORT_RESUME)
  2097. || oxu->reset_done[i] != 0)
  2098. continue;
  2099. /* start 20 msec resume signaling from this port,
  2100. * and make khubd collect PORT_STAT_C_SUSPEND to
  2101. * stop that signaling.
  2102. */
  2103. oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
  2104. oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
  2105. mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
  2106. }
  2107. }
  2108. /* PCI errors [4.15.2.4] */
  2109. if (unlikely((status & STS_FATAL) != 0)) {
  2110. /* bogus "fatal" IRQs appear on some chips... why? */
  2111. status = readl(&oxu->regs->status);
  2112. dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
  2113. dbg_status(oxu, "fatal", status);
  2114. if (status & STS_HALT) {
  2115. oxu_err(oxu, "fatal error\n");
  2116. dead:
  2117. ehci_reset(oxu);
  2118. writel(0, &oxu->regs->configured_flag);
  2119. /* generic layer kills/unlinks all urbs, then
  2120. * uses oxu_stop to clean up the rest
  2121. */
  2122. bh = 1;
  2123. }
  2124. }
  2125. if (bh)
  2126. ehci_work(oxu);
  2127. spin_unlock(&oxu->lock);
  2128. if (pcd_status & STS_PCD)
  2129. usb_hcd_poll_rh_status(hcd);
  2130. return IRQ_HANDLED;
  2131. }
  2132. static irqreturn_t oxu_irq(struct usb_hcd *hcd)
  2133. {
  2134. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2135. int ret = IRQ_HANDLED;
  2136. u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
  2137. u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
  2138. /* Disable all interrupt */
  2139. oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
  2140. if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
  2141. (!oxu->is_otg && (status & OXU_USBSPHI)))
  2142. oxu210_hcd_irq(hcd);
  2143. else
  2144. ret = IRQ_NONE;
  2145. /* Enable all interrupt back */
  2146. oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
  2147. return ret;
  2148. }
  2149. static void oxu_watchdog(unsigned long param)
  2150. {
  2151. struct oxu_hcd *oxu = (struct oxu_hcd *) param;
  2152. unsigned long flags;
  2153. spin_lock_irqsave(&oxu->lock, flags);
  2154. /* lost IAA irqs wedge things badly; seen with a vt8235 */
  2155. if (oxu->reclaim) {
  2156. u32 status = readl(&oxu->regs->status);
  2157. if (status & STS_IAA) {
  2158. oxu_vdbg(oxu, "lost IAA\n");
  2159. writel(STS_IAA, &oxu->regs->status);
  2160. oxu->reclaim_ready = 1;
  2161. }
  2162. }
  2163. /* stop async processing after it's idled a bit */
  2164. if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
  2165. start_unlink_async(oxu, oxu->async);
  2166. /* oxu could run by timer, without IRQs ... */
  2167. ehci_work(oxu);
  2168. spin_unlock_irqrestore(&oxu->lock, flags);
  2169. }
  2170. /* One-time init, only for memory state.
  2171. */
  2172. static int oxu_hcd_init(struct usb_hcd *hcd)
  2173. {
  2174. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2175. u32 temp;
  2176. int retval;
  2177. u32 hcc_params;
  2178. spin_lock_init(&oxu->lock);
  2179. init_timer(&oxu->watchdog);
  2180. oxu->watchdog.function = oxu_watchdog;
  2181. oxu->watchdog.data = (unsigned long) oxu;
  2182. /*
  2183. * hw default: 1K periodic list heads, one per frame.
  2184. * periodic_size can shrink by USBCMD update if hcc_params allows.
  2185. */
  2186. oxu->periodic_size = DEFAULT_I_TDPS;
  2187. retval = ehci_mem_init(oxu, GFP_KERNEL);
  2188. if (retval < 0)
  2189. return retval;
  2190. /* controllers may cache some of the periodic schedule ... */
  2191. hcc_params = readl(&oxu->caps->hcc_params);
  2192. if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
  2193. oxu->i_thresh = 8;
  2194. else /* N microframes cached */
  2195. oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
  2196. oxu->reclaim = NULL;
  2197. oxu->reclaim_ready = 0;
  2198. oxu->next_uframe = -1;
  2199. /*
  2200. * dedicate a qh for the async ring head, since we couldn't unlink
  2201. * a 'real' qh without stopping the async schedule [4.8]. use it
  2202. * as the 'reclamation list head' too.
  2203. * its dummy is used in hw_alt_next of many tds, to prevent the qh
  2204. * from automatically advancing to the next td after short reads.
  2205. */
  2206. oxu->async->qh_next.qh = NULL;
  2207. oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
  2208. oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
  2209. oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
  2210. oxu->async->hw_qtd_next = EHCI_LIST_END;
  2211. oxu->async->qh_state = QH_STATE_LINKED;
  2212. oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
  2213. /* clear interrupt enables, set irq latency */
  2214. if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
  2215. log2_irq_thresh = 0;
  2216. temp = 1 << (16 + log2_irq_thresh);
  2217. if (HCC_CANPARK(hcc_params)) {
  2218. /* HW default park == 3, on hardware that supports it (like
  2219. * NVidia and ALI silicon), maximizes throughput on the async
  2220. * schedule by avoiding QH fetches between transfers.
  2221. *
  2222. * With fast usb storage devices and NForce2, "park" seems to
  2223. * make problems: throughput reduction (!), data errors...
  2224. */
  2225. if (park) {
  2226. park = min(park, (unsigned) 3);
  2227. temp |= CMD_PARK;
  2228. temp |= park << 8;
  2229. }
  2230. oxu_dbg(oxu, "park %d\n", park);
  2231. }
  2232. if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
  2233. /* periodic schedule size can be smaller than default */
  2234. temp &= ~(3 << 2);
  2235. temp |= (EHCI_TUNE_FLS << 2);
  2236. }
  2237. oxu->command = temp;
  2238. return 0;
  2239. }
  2240. /* Called during probe() after chip reset completes.
  2241. */
  2242. static int oxu_reset(struct usb_hcd *hcd)
  2243. {
  2244. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2245. int ret;
  2246. spin_lock_init(&oxu->mem_lock);
  2247. INIT_LIST_HEAD(&oxu->urb_list);
  2248. oxu->urb_len = 0;
  2249. /* FIMXE */
  2250. hcd->self.controller->dma_mask = NULL;
  2251. if (oxu->is_otg) {
  2252. oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
  2253. oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
  2254. HC_LENGTH(readl(&oxu->caps->hc_capbase));
  2255. oxu->mem = hcd->regs + OXU_SPH_MEM;
  2256. } else {
  2257. oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
  2258. oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
  2259. HC_LENGTH(readl(&oxu->caps->hc_capbase));
  2260. oxu->mem = hcd->regs + OXU_OTG_MEM;
  2261. }
  2262. oxu->hcs_params = readl(&oxu->caps->hcs_params);
  2263. oxu->sbrn = 0x20;
  2264. ret = oxu_hcd_init(hcd);
  2265. if (ret)
  2266. return ret;
  2267. return 0;
  2268. }
  2269. static int oxu_run(struct usb_hcd *hcd)
  2270. {
  2271. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2272. int retval;
  2273. u32 temp, hcc_params;
  2274. hcd->uses_new_polling = 1;
  2275. /* EHCI spec section 4.1 */
  2276. retval = ehci_reset(oxu);
  2277. if (retval != 0) {
  2278. ehci_mem_cleanup(oxu);
  2279. return retval;
  2280. }
  2281. writel(oxu->periodic_dma, &oxu->regs->frame_list);
  2282. writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
  2283. /* hcc_params controls whether oxu->regs->segment must (!!!)
  2284. * be used; it constrains QH/ITD/SITD and QTD locations.
  2285. * pci_pool consistent memory always uses segment zero.
  2286. * streaming mappings for I/O buffers, like pci_map_single(),
  2287. * can return segments above 4GB, if the device allows.
  2288. *
  2289. * NOTE: the dma mask is visible through dma_supported(), so
  2290. * drivers can pass this info along ... like NETIF_F_HIGHDMA,
  2291. * Scsi_Host.highmem_io, and so forth. It's readonly to all
  2292. * host side drivers though.
  2293. */
  2294. hcc_params = readl(&oxu->caps->hcc_params);
  2295. if (HCC_64BIT_ADDR(hcc_params))
  2296. writel(0, &oxu->regs->segment);
  2297. oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
  2298. CMD_ASE | CMD_RESET);
  2299. oxu->command |= CMD_RUN;
  2300. writel(oxu->command, &oxu->regs->command);
  2301. dbg_cmd(oxu, "init", oxu->command);
  2302. /*
  2303. * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
  2304. * are explicitly handed to companion controller(s), so no TT is
  2305. * involved with the root hub. (Except where one is integrated,
  2306. * and there's no companion controller unless maybe for USB OTG.)
  2307. */
  2308. hcd->state = HC_STATE_RUNNING;
  2309. writel(FLAG_CF, &oxu->regs->configured_flag);
  2310. readl(&oxu->regs->command); /* unblock posted writes */
  2311. temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
  2312. oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
  2313. ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
  2314. temp >> 8, temp & 0xff, DRIVER_VERSION,
  2315. ignore_oc ? ", overcurrent ignored" : "");
  2316. writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
  2317. return 0;
  2318. }
  2319. static void oxu_stop(struct usb_hcd *hcd)
  2320. {
  2321. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2322. /* Turn off port power on all root hub ports. */
  2323. ehci_port_power(oxu, 0);
  2324. /* no more interrupts ... */
  2325. del_timer_sync(&oxu->watchdog);
  2326. spin_lock_irq(&oxu->lock);
  2327. if (HC_IS_RUNNING(hcd->state))
  2328. ehci_quiesce(oxu);
  2329. ehci_reset(oxu);
  2330. writel(0, &oxu->regs->intr_enable);
  2331. spin_unlock_irq(&oxu->lock);
  2332. /* let companion controllers work when we aren't */
  2333. writel(0, &oxu->regs->configured_flag);
  2334. /* root hub is shut down separately (first, when possible) */
  2335. spin_lock_irq(&oxu->lock);
  2336. if (oxu->async)
  2337. ehci_work(oxu);
  2338. spin_unlock_irq(&oxu->lock);
  2339. ehci_mem_cleanup(oxu);
  2340. dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
  2341. }
  2342. /* Kick in for silicon on any bus (not just pci, etc).
  2343. * This forcibly disables dma and IRQs, helping kexec and other cases
  2344. * where the next system software may expect clean state.
  2345. */
  2346. static void oxu_shutdown(struct usb_hcd *hcd)
  2347. {
  2348. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2349. (void) ehci_halt(oxu);
  2350. ehci_turn_off_all_ports(oxu);
  2351. /* make BIOS/etc use companion controller during reboot */
  2352. writel(0, &oxu->regs->configured_flag);
  2353. /* unblock posted writes */
  2354. readl(&oxu->regs->configured_flag);
  2355. }
  2356. /* Non-error returns are a promise to giveback() the urb later
  2357. * we drop ownership so next owner (or urb unlink) can get it
  2358. *
  2359. * urb + dev is in hcd.self.controller.urb_list
  2360. * we're queueing TDs onto software and hardware lists
  2361. *
  2362. * hcd-specific init for hcpriv hasn't been done yet
  2363. *
  2364. * NOTE: control, bulk, and interrupt share the same code to append TDs
  2365. * to a (possibly active) QH, and the same QH scanning code.
  2366. */
  2367. static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2368. gfp_t mem_flags)
  2369. {
  2370. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2371. struct list_head qtd_list;
  2372. INIT_LIST_HEAD(&qtd_list);
  2373. switch (usb_pipetype(urb->pipe)) {
  2374. case PIPE_CONTROL:
  2375. case PIPE_BULK:
  2376. default:
  2377. if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
  2378. return -ENOMEM;
  2379. return submit_async(oxu, urb, &qtd_list, mem_flags);
  2380. case PIPE_INTERRUPT:
  2381. if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
  2382. return -ENOMEM;
  2383. return intr_submit(oxu, urb, &qtd_list, mem_flags);
  2384. case PIPE_ISOCHRONOUS:
  2385. if (urb->dev->speed == USB_SPEED_HIGH)
  2386. return itd_submit(oxu, urb, mem_flags);
  2387. else
  2388. return sitd_submit(oxu, urb, mem_flags);
  2389. }
  2390. }
  2391. /* This function is responsible for breaking URBs with big data size
  2392. * into smaller size and processing small urbs in sequence.
  2393. */
  2394. static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2395. gfp_t mem_flags)
  2396. {
  2397. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2398. int num, rem;
  2399. int transfer_buffer_length;
  2400. void *transfer_buffer;
  2401. struct urb *murb;
  2402. int i, ret;
  2403. /* If not bulk pipe just enqueue the URB */
  2404. if (!usb_pipebulk(urb->pipe))
  2405. return __oxu_urb_enqueue(hcd, urb, mem_flags);
  2406. /* Otherwise we should verify the USB transfer buffer size! */
  2407. transfer_buffer = urb->transfer_buffer;
  2408. transfer_buffer_length = urb->transfer_buffer_length;
  2409. num = urb->transfer_buffer_length / 4096;
  2410. rem = urb->transfer_buffer_length % 4096;
  2411. if (rem != 0)
  2412. num++;
  2413. /* If URB is smaller than 4096 bytes just enqueue it! */
  2414. if (num == 1)
  2415. return __oxu_urb_enqueue(hcd, urb, mem_flags);
  2416. /* Ok, we have more job to do! :) */
  2417. for (i = 0; i < num - 1; i++) {
  2418. /* Get free micro URB poll till a free urb is recieved */
  2419. do {
  2420. murb = (struct urb *) oxu_murb_alloc(oxu);
  2421. if (!murb)
  2422. schedule();
  2423. } while (!murb);
  2424. /* Coping the urb */
  2425. memcpy(murb, urb, sizeof(struct urb));
  2426. murb->transfer_buffer_length = 4096;
  2427. murb->transfer_buffer = transfer_buffer + i * 4096;
  2428. /* Null pointer for the encodes that this is a micro urb */
  2429. murb->complete = NULL;
  2430. ((struct oxu_murb *) murb)->main = urb;
  2431. ((struct oxu_murb *) murb)->last = 0;
  2432. /* This loop is to guarantee urb to be processed when there's
  2433. * not enough resources at a particular time by retrying.
  2434. */
  2435. do {
  2436. ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
  2437. if (ret)
  2438. schedule();
  2439. } while (ret);
  2440. }
  2441. /* Last urb requires special handling */
  2442. /* Get free micro URB poll till a free urb is recieved */
  2443. do {
  2444. murb = (struct urb *) oxu_murb_alloc(oxu);
  2445. if (!murb)
  2446. schedule();
  2447. } while (!murb);
  2448. /* Coping the urb */
  2449. memcpy(murb, urb, sizeof(struct urb));
  2450. murb->transfer_buffer_length = rem > 0 ? rem : 4096;
  2451. murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
  2452. /* Null pointer for the encodes that this is a micro urb */
  2453. murb->complete = NULL;
  2454. ((struct oxu_murb *) murb)->main = urb;
  2455. ((struct oxu_murb *) murb)->last = 1;
  2456. do {
  2457. ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
  2458. if (ret)
  2459. schedule();
  2460. } while (ret);
  2461. return ret;
  2462. }
  2463. /* Remove from hardware lists.
  2464. * Completions normally happen asynchronously
  2465. */
  2466. static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  2467. {
  2468. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2469. struct ehci_qh *qh;
  2470. unsigned long flags;
  2471. spin_lock_irqsave(&oxu->lock, flags);
  2472. switch (usb_pipetype(urb->pipe)) {
  2473. case PIPE_CONTROL:
  2474. case PIPE_BULK:
  2475. default:
  2476. qh = (struct ehci_qh *) urb->hcpriv;
  2477. if (!qh)
  2478. break;
  2479. unlink_async(oxu, qh);
  2480. break;
  2481. case PIPE_INTERRUPT:
  2482. qh = (struct ehci_qh *) urb->hcpriv;
  2483. if (!qh)
  2484. break;
  2485. switch (qh->qh_state) {
  2486. case QH_STATE_LINKED:
  2487. intr_deschedule(oxu, qh);
  2488. /* FALL THROUGH */
  2489. case QH_STATE_IDLE:
  2490. qh_completions(oxu, qh);
  2491. break;
  2492. default:
  2493. oxu_dbg(oxu, "bogus qh %p state %d\n",
  2494. qh, qh->qh_state);
  2495. goto done;
  2496. }
  2497. /* reschedule QH iff another request is queued */
  2498. if (!list_empty(&qh->qtd_list)
  2499. && HC_IS_RUNNING(hcd->state)) {
  2500. int status;
  2501. status = qh_schedule(oxu, qh);
  2502. spin_unlock_irqrestore(&oxu->lock, flags);
  2503. if (status != 0) {
  2504. /* shouldn't happen often, but ...
  2505. * FIXME kill those tds' urbs
  2506. */
  2507. err("can't reschedule qh %p, err %d",
  2508. qh, status);
  2509. }
  2510. return status;
  2511. }
  2512. break;
  2513. }
  2514. done:
  2515. spin_unlock_irqrestore(&oxu->lock, flags);
  2516. return 0;
  2517. }
  2518. /* Bulk qh holds the data toggle */
  2519. static void oxu_endpoint_disable(struct usb_hcd *hcd,
  2520. struct usb_host_endpoint *ep)
  2521. {
  2522. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2523. unsigned long flags;
  2524. struct ehci_qh *qh, *tmp;
  2525. /* ASSERT: any requests/urbs are being unlinked */
  2526. /* ASSERT: nobody can be submitting urbs for this any more */
  2527. rescan:
  2528. spin_lock_irqsave(&oxu->lock, flags);
  2529. qh = ep->hcpriv;
  2530. if (!qh)
  2531. goto done;
  2532. /* endpoints can be iso streams. for now, we don't
  2533. * accelerate iso completions ... so spin a while.
  2534. */
  2535. if (qh->hw_info1 == 0) {
  2536. oxu_vdbg(oxu, "iso delay\n");
  2537. goto idle_timeout;
  2538. }
  2539. if (!HC_IS_RUNNING(hcd->state))
  2540. qh->qh_state = QH_STATE_IDLE;
  2541. switch (qh->qh_state) {
  2542. case QH_STATE_LINKED:
  2543. for (tmp = oxu->async->qh_next.qh;
  2544. tmp && tmp != qh;
  2545. tmp = tmp->qh_next.qh)
  2546. continue;
  2547. /* periodic qh self-unlinks on empty */
  2548. if (!tmp)
  2549. goto nogood;
  2550. unlink_async(oxu, qh);
  2551. /* FALL THROUGH */
  2552. case QH_STATE_UNLINK: /* wait for hw to finish? */
  2553. idle_timeout:
  2554. spin_unlock_irqrestore(&oxu->lock, flags);
  2555. schedule_timeout_uninterruptible(1);
  2556. goto rescan;
  2557. case QH_STATE_IDLE: /* fully unlinked */
  2558. if (list_empty(&qh->qtd_list)) {
  2559. qh_put(qh);
  2560. break;
  2561. }
  2562. /* else FALL THROUGH */
  2563. default:
  2564. nogood:
  2565. /* caller was supposed to have unlinked any requests;
  2566. * that's not our job. just leak this memory.
  2567. */
  2568. oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
  2569. qh, ep->desc.bEndpointAddress, qh->qh_state,
  2570. list_empty(&qh->qtd_list) ? "" : "(has tds)");
  2571. break;
  2572. }
  2573. ep->hcpriv = NULL;
  2574. done:
  2575. spin_unlock_irqrestore(&oxu->lock, flags);
  2576. }
  2577. static int oxu_get_frame(struct usb_hcd *hcd)
  2578. {
  2579. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2580. return (readl(&oxu->regs->frame_index) >> 3) %
  2581. oxu->periodic_size;
  2582. }
  2583. /* Build "status change" packet (one or two bytes) from HC registers */
  2584. static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
  2585. {
  2586. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2587. u32 temp, mask, status = 0;
  2588. int ports, i, retval = 1;
  2589. unsigned long flags;
  2590. /* if !USB_SUSPEND, root hub timers won't get shut down ... */
  2591. if (!HC_IS_RUNNING(hcd->state))
  2592. return 0;
  2593. /* init status to no-changes */
  2594. buf[0] = 0;
  2595. ports = HCS_N_PORTS(oxu->hcs_params);
  2596. if (ports > 7) {
  2597. buf[1] = 0;
  2598. retval++;
  2599. }
  2600. /* Some boards (mostly VIA?) report bogus overcurrent indications,
  2601. * causing massive log spam unless we completely ignore them. It
  2602. * may be relevant that VIA VT8235 controllers, where PORT_POWER is
  2603. * always set, seem to clear PORT_OCC and PORT_CSC when writing to
  2604. * PORT_POWER; that's surprising, but maybe within-spec.
  2605. */
  2606. if (!ignore_oc)
  2607. mask = PORT_CSC | PORT_PEC | PORT_OCC;
  2608. else
  2609. mask = PORT_CSC | PORT_PEC;
  2610. /* no hub change reports (bit 0) for now (power, ...) */
  2611. /* port N changes (bit N)? */
  2612. spin_lock_irqsave(&oxu->lock, flags);
  2613. for (i = 0; i < ports; i++) {
  2614. temp = readl(&oxu->regs->port_status[i]);
  2615. /*
  2616. * Return status information even for ports with OWNER set.
  2617. * Otherwise khubd wouldn't see the disconnect event when a
  2618. * high-speed device is switched over to the companion
  2619. * controller by the user.
  2620. */
  2621. if (!(temp & PORT_CONNECT))
  2622. oxu->reset_done[i] = 0;
  2623. if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
  2624. time_after_eq(jiffies, oxu->reset_done[i]))) {
  2625. if (i < 7)
  2626. buf[0] |= 1 << (i + 1);
  2627. else
  2628. buf[1] |= 1 << (i - 7);
  2629. status = STS_PCD;
  2630. }
  2631. }
  2632. /* FIXME autosuspend idle root hubs */
  2633. spin_unlock_irqrestore(&oxu->lock, flags);
  2634. return status ? retval : 0;
  2635. }
  2636. /* Returns the speed of a device attached to a port on the root hub. */
  2637. static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
  2638. unsigned int portsc)
  2639. {
  2640. switch ((portsc >> 26) & 3) {
  2641. case 0:
  2642. return 0;
  2643. case 1:
  2644. return USB_PORT_STAT_LOW_SPEED;
  2645. case 2:
  2646. default:
  2647. return USB_PORT_STAT_HIGH_SPEED;
  2648. }
  2649. }
  2650. #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
  2651. static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
  2652. u16 wValue, u16 wIndex, char *buf, u16 wLength)
  2653. {
  2654. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2655. int ports = HCS_N_PORTS(oxu->hcs_params);
  2656. u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
  2657. u32 temp, status;
  2658. unsigned long flags;
  2659. int retval = 0;
  2660. unsigned selector;
  2661. /*
  2662. * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
  2663. * HCS_INDICATOR may say we can change LEDs to off/amber/green.
  2664. * (track current state ourselves) ... blink for diagnostics,
  2665. * power, "this is the one", etc. EHCI spec supports this.
  2666. */
  2667. spin_lock_irqsave(&oxu->lock, flags);
  2668. switch (typeReq) {
  2669. case ClearHubFeature:
  2670. switch (wValue) {
  2671. case C_HUB_LOCAL_POWER:
  2672. case C_HUB_OVER_CURRENT:
  2673. /* no hub-wide feature/status flags */
  2674. break;
  2675. default:
  2676. goto error;
  2677. }
  2678. break;
  2679. case ClearPortFeature:
  2680. if (!wIndex || wIndex > ports)
  2681. goto error;
  2682. wIndex--;
  2683. temp = readl(status_reg);
  2684. /*
  2685. * Even if OWNER is set, so the port is owned by the
  2686. * companion controller, khubd needs to be able to clear
  2687. * the port-change status bits (especially
  2688. * USB_PORT_STAT_C_CONNECTION).
  2689. */
  2690. switch (wValue) {
  2691. case USB_PORT_FEAT_ENABLE:
  2692. writel(temp & ~PORT_PE, status_reg);
  2693. break;
  2694. case USB_PORT_FEAT_C_ENABLE:
  2695. writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
  2696. break;
  2697. case USB_PORT_FEAT_SUSPEND:
  2698. if (temp & PORT_RESET)
  2699. goto error;
  2700. if (temp & PORT_SUSPEND) {
  2701. if ((temp & PORT_PE) == 0)
  2702. goto error;
  2703. /* resume signaling for 20 msec */
  2704. temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
  2705. writel(temp | PORT_RESUME, status_reg);
  2706. oxu->reset_done[wIndex] = jiffies
  2707. + msecs_to_jiffies(20);
  2708. }
  2709. break;
  2710. case USB_PORT_FEAT_C_SUSPEND:
  2711. /* we auto-clear this feature */
  2712. break;
  2713. case USB_PORT_FEAT_POWER:
  2714. if (HCS_PPC(oxu->hcs_params))
  2715. writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
  2716. status_reg);
  2717. break;
  2718. case USB_PORT_FEAT_C_CONNECTION:
  2719. writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
  2720. break;
  2721. case USB_PORT_FEAT_C_OVER_CURRENT:
  2722. writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
  2723. break;
  2724. case USB_PORT_FEAT_C_RESET:
  2725. /* GetPortStatus clears reset */
  2726. break;
  2727. default:
  2728. goto error;
  2729. }
  2730. readl(&oxu->regs->command); /* unblock posted write */
  2731. break;
  2732. case GetHubDescriptor:
  2733. ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
  2734. buf);
  2735. break;
  2736. case GetHubStatus:
  2737. /* no hub-wide feature/status flags */
  2738. memset(buf, 0, 4);
  2739. break;
  2740. case GetPortStatus:
  2741. if (!wIndex || wIndex > ports)
  2742. goto error;
  2743. wIndex--;
  2744. status = 0;
  2745. temp = readl(status_reg);
  2746. /* wPortChange bits */
  2747. if (temp & PORT_CSC)
  2748. status |= USB_PORT_STAT_C_CONNECTION << 16;
  2749. if (temp & PORT_PEC)
  2750. status |= USB_PORT_STAT_C_ENABLE << 16;
  2751. if ((temp & PORT_OCC) && !ignore_oc)
  2752. status |= USB_PORT_STAT_C_OVERCURRENT << 16;
  2753. /* whoever resumes must GetPortStatus to complete it!! */
  2754. if (temp & PORT_RESUME) {
  2755. /* Remote Wakeup received? */
  2756. if (!oxu->reset_done[wIndex]) {
  2757. /* resume signaling for 20 msec */
  2758. oxu->reset_done[wIndex] = jiffies
  2759. + msecs_to_jiffies(20);
  2760. /* check the port again */
  2761. mod_timer(&oxu_to_hcd(oxu)->rh_timer,
  2762. oxu->reset_done[wIndex]);
  2763. }
  2764. /* resume completed? */
  2765. else if (time_after_eq(jiffies,
  2766. oxu->reset_done[wIndex])) {
  2767. status |= USB_PORT_STAT_C_SUSPEND << 16;
  2768. oxu->reset_done[wIndex] = 0;
  2769. /* stop resume signaling */
  2770. temp = readl(status_reg);
  2771. writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
  2772. status_reg);
  2773. retval = handshake(oxu, status_reg,
  2774. PORT_RESUME, 0, 2000 /* 2msec */);
  2775. if (retval != 0) {
  2776. oxu_err(oxu,
  2777. "port %d resume error %d\n",
  2778. wIndex + 1, retval);
  2779. goto error;
  2780. }
  2781. temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
  2782. }
  2783. }
  2784. /* whoever resets must GetPortStatus to complete it!! */
  2785. if ((temp & PORT_RESET)
  2786. && time_after_eq(jiffies,
  2787. oxu->reset_done[wIndex])) {
  2788. status |= USB_PORT_STAT_C_RESET << 16;
  2789. oxu->reset_done[wIndex] = 0;
  2790. /* force reset to complete */
  2791. writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
  2792. status_reg);
  2793. /* REVISIT: some hardware needs 550+ usec to clear
  2794. * this bit; seems too long to spin routinely...
  2795. */
  2796. retval = handshake(oxu, status_reg,
  2797. PORT_RESET, 0, 750);
  2798. if (retval != 0) {
  2799. oxu_err(oxu, "port %d reset error %d\n",
  2800. wIndex + 1, retval);
  2801. goto error;
  2802. }
  2803. /* see what we found out */
  2804. temp = check_reset_complete(oxu, wIndex, status_reg,
  2805. readl(status_reg));
  2806. }
  2807. /* transfer dedicated ports to the companion hc */
  2808. if ((temp & PORT_CONNECT) &&
  2809. test_bit(wIndex, &oxu->companion_ports)) {
  2810. temp &= ~PORT_RWC_BITS;
  2811. temp |= PORT_OWNER;
  2812. writel(temp, status_reg);
  2813. oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
  2814. temp = readl(status_reg);
  2815. }
  2816. /*
  2817. * Even if OWNER is set, there's no harm letting khubd
  2818. * see the wPortStatus values (they should all be 0 except
  2819. * for PORT_POWER anyway).
  2820. */
  2821. if (temp & PORT_CONNECT) {
  2822. status |= USB_PORT_STAT_CONNECTION;
  2823. /* status may be from integrated TT */
  2824. status |= oxu_port_speed(oxu, temp);
  2825. }
  2826. if (temp & PORT_PE)
  2827. status |= USB_PORT_STAT_ENABLE;
  2828. if (temp & (PORT_SUSPEND|PORT_RESUME))
  2829. status |= USB_PORT_STAT_SUSPEND;
  2830. if (temp & PORT_OC)
  2831. status |= USB_PORT_STAT_OVERCURRENT;
  2832. if (temp & PORT_RESET)
  2833. status |= USB_PORT_STAT_RESET;
  2834. if (temp & PORT_POWER)
  2835. status |= USB_PORT_STAT_POWER;
  2836. #ifndef OXU_VERBOSE_DEBUG
  2837. if (status & ~0xffff) /* only if wPortChange is interesting */
  2838. #endif
  2839. dbg_port(oxu, "GetStatus", wIndex + 1, temp);
  2840. put_unaligned(cpu_to_le32(status), (__le32 *) buf);
  2841. break;
  2842. case SetHubFeature:
  2843. switch (wValue) {
  2844. case C_HUB_LOCAL_POWER:
  2845. case C_HUB_OVER_CURRENT:
  2846. /* no hub-wide feature/status flags */
  2847. break;
  2848. default:
  2849. goto error;
  2850. }
  2851. break;
  2852. case SetPortFeature:
  2853. selector = wIndex >> 8;
  2854. wIndex &= 0xff;
  2855. if (!wIndex || wIndex > ports)
  2856. goto error;
  2857. wIndex--;
  2858. temp = readl(status_reg);
  2859. if (temp & PORT_OWNER)
  2860. break;
  2861. temp &= ~PORT_RWC_BITS;
  2862. switch (wValue) {
  2863. case USB_PORT_FEAT_SUSPEND:
  2864. if ((temp & PORT_PE) == 0
  2865. || (temp & PORT_RESET) != 0)
  2866. goto error;
  2867. if (device_may_wakeup(&hcd->self.root_hub->dev))
  2868. temp |= PORT_WAKE_BITS;
  2869. writel(temp | PORT_SUSPEND, status_reg);
  2870. break;
  2871. case USB_PORT_FEAT_POWER:
  2872. if (HCS_PPC(oxu->hcs_params))
  2873. writel(temp | PORT_POWER, status_reg);
  2874. break;
  2875. case USB_PORT_FEAT_RESET:
  2876. if (temp & PORT_RESUME)
  2877. goto error;
  2878. /* line status bits may report this as low speed,
  2879. * which can be fine if this root hub has a
  2880. * transaction translator built in.
  2881. */
  2882. oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
  2883. temp |= PORT_RESET;
  2884. temp &= ~PORT_PE;
  2885. /*
  2886. * caller must wait, then call GetPortStatus
  2887. * usb 2.0 spec says 50 ms resets on root
  2888. */
  2889. oxu->reset_done[wIndex] = jiffies
  2890. + msecs_to_jiffies(50);
  2891. writel(temp, status_reg);
  2892. break;
  2893. /* For downstream facing ports (these): one hub port is put
  2894. * into test mode according to USB2 11.24.2.13, then the hub
  2895. * must be reset (which for root hub now means rmmod+modprobe,
  2896. * or else system reboot). See EHCI 2.3.9 and 4.14 for info
  2897. * about the EHCI-specific stuff.
  2898. */
  2899. case USB_PORT_FEAT_TEST:
  2900. if (!selector || selector > 5)
  2901. goto error;
  2902. ehci_quiesce(oxu);
  2903. ehci_halt(oxu);
  2904. temp |= selector << 16;
  2905. writel(temp, status_reg);
  2906. break;
  2907. default:
  2908. goto error;
  2909. }
  2910. readl(&oxu->regs->command); /* unblock posted writes */
  2911. break;
  2912. default:
  2913. error:
  2914. /* "stall" on error */
  2915. retval = -EPIPE;
  2916. }
  2917. spin_unlock_irqrestore(&oxu->lock, flags);
  2918. return retval;
  2919. }
  2920. #ifdef CONFIG_PM
  2921. static int oxu_bus_suspend(struct usb_hcd *hcd)
  2922. {
  2923. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2924. int port;
  2925. int mask;
  2926. oxu_dbg(oxu, "suspend root hub\n");
  2927. if (time_before(jiffies, oxu->next_statechange))
  2928. msleep(5);
  2929. port = HCS_N_PORTS(oxu->hcs_params);
  2930. spin_lock_irq(&oxu->lock);
  2931. /* stop schedules, clean any completed work */
  2932. if (HC_IS_RUNNING(hcd->state)) {
  2933. ehci_quiesce(oxu);
  2934. hcd->state = HC_STATE_QUIESCING;
  2935. }
  2936. oxu->command = readl(&oxu->regs->command);
  2937. if (oxu->reclaim)
  2938. oxu->reclaim_ready = 1;
  2939. ehci_work(oxu);
  2940. /* Unlike other USB host controller types, EHCI doesn't have
  2941. * any notion of "global" or bus-wide suspend. The driver has
  2942. * to manually suspend all the active unsuspended ports, and
  2943. * then manually resume them in the bus_resume() routine.
  2944. */
  2945. oxu->bus_suspended = 0;
  2946. while (port--) {
  2947. u32 __iomem *reg = &oxu->regs->port_status[port];
  2948. u32 t1 = readl(reg) & ~PORT_RWC_BITS;
  2949. u32 t2 = t1;
  2950. /* keep track of which ports we suspend */
  2951. if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
  2952. !(t1 & PORT_SUSPEND)) {
  2953. t2 |= PORT_SUSPEND;
  2954. set_bit(port, &oxu->bus_suspended);
  2955. }
  2956. /* enable remote wakeup on all ports */
  2957. if (device_may_wakeup(&hcd->self.root_hub->dev))
  2958. t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
  2959. else
  2960. t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
  2961. if (t1 != t2) {
  2962. oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
  2963. port + 1, t1, t2);
  2964. writel(t2, reg);
  2965. }
  2966. }
  2967. /* turn off now-idle HC */
  2968. del_timer_sync(&oxu->watchdog);
  2969. ehci_halt(oxu);
  2970. hcd->state = HC_STATE_SUSPENDED;
  2971. /* allow remote wakeup */
  2972. mask = INTR_MASK;
  2973. if (!device_may_wakeup(&hcd->self.root_hub->dev))
  2974. mask &= ~STS_PCD;
  2975. writel(mask, &oxu->regs->intr_enable);
  2976. readl(&oxu->regs->intr_enable);
  2977. oxu->next_statechange = jiffies + msecs_to_jiffies(10);
  2978. spin_unlock_irq(&oxu->lock);
  2979. return 0;
  2980. }
  2981. /* Caller has locked the root hub, and should reset/reinit on error */
  2982. static int oxu_bus_resume(struct usb_hcd *hcd)
  2983. {
  2984. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2985. u32 temp;
  2986. int i;
  2987. if (time_before(jiffies, oxu->next_statechange))
  2988. msleep(5);
  2989. spin_lock_irq(&oxu->lock);
  2990. /* Ideally and we've got a real resume here, and no port's power
  2991. * was lost. (For PCI, that means Vaux was maintained.) But we
  2992. * could instead be restoring a swsusp snapshot -- so that BIOS was
  2993. * the last user of the controller, not reset/pm hardware keeping
  2994. * state we gave to it.
  2995. */
  2996. temp = readl(&oxu->regs->intr_enable);
  2997. oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
  2998. /* at least some APM implementations will try to deliver
  2999. * IRQs right away, so delay them until we're ready.
  3000. */
  3001. writel(0, &oxu->regs->intr_enable);
  3002. /* re-init operational registers */
  3003. writel(0, &oxu->regs->segment);
  3004. writel(oxu->periodic_dma, &oxu->regs->frame_list);
  3005. writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
  3006. /* restore CMD_RUN, framelist size, and irq threshold */
  3007. writel(oxu->command, &oxu->regs->command);
  3008. /* Some controller/firmware combinations need a delay during which
  3009. * they set up the port statuses. See Bugzilla #8190. */
  3010. mdelay(8);
  3011. /* manually resume the ports we suspended during bus_suspend() */
  3012. i = HCS_N_PORTS(oxu->hcs_params);
  3013. while (i--) {
  3014. temp = readl(&oxu->regs->port_status[i]);
  3015. temp &= ~(PORT_RWC_BITS
  3016. | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
  3017. if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
  3018. oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
  3019. temp |= PORT_RESUME;
  3020. }
  3021. writel(temp, &oxu->regs->port_status[i]);
  3022. }
  3023. i = HCS_N_PORTS(oxu->hcs_params);
  3024. mdelay(20);
  3025. while (i--) {
  3026. temp = readl(&oxu->regs->port_status[i]);
  3027. if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
  3028. temp &= ~(PORT_RWC_BITS | PORT_RESUME);
  3029. writel(temp, &oxu->regs->port_status[i]);
  3030. oxu_vdbg(oxu, "resumed port %d\n", i + 1);
  3031. }
  3032. }
  3033. (void) readl(&oxu->regs->command);
  3034. /* maybe re-activate the schedule(s) */
  3035. temp = 0;
  3036. if (oxu->async->qh_next.qh)
  3037. temp |= CMD_ASE;
  3038. if (oxu->periodic_sched)
  3039. temp |= CMD_PSE;
  3040. if (temp) {
  3041. oxu->command |= temp;
  3042. writel(oxu->command, &oxu->regs->command);
  3043. }
  3044. oxu->next_statechange = jiffies + msecs_to_jiffies(5);
  3045. hcd->state = HC_STATE_RUNNING;
  3046. /* Now we can safely re-enable irqs */
  3047. writel(INTR_MASK, &oxu->regs->intr_enable);
  3048. spin_unlock_irq(&oxu->lock);
  3049. return 0;
  3050. }
  3051. #else
  3052. static int oxu_bus_suspend(struct usb_hcd *hcd)
  3053. {
  3054. return 0;
  3055. }
  3056. static int oxu_bus_resume(struct usb_hcd *hcd)
  3057. {
  3058. return 0;
  3059. }
  3060. #endif /* CONFIG_PM */
  3061. static const struct hc_driver oxu_hc_driver = {
  3062. .description = "oxu210hp_hcd",
  3063. .product_desc = "oxu210hp HCD",
  3064. .hcd_priv_size = sizeof(struct oxu_hcd),
  3065. /*
  3066. * Generic hardware linkage
  3067. */
  3068. .irq = oxu_irq,
  3069. .flags = HCD_MEMORY | HCD_USB2,
  3070. /*
  3071. * Basic lifecycle operations
  3072. */
  3073. .reset = oxu_reset,
  3074. .start = oxu_run,
  3075. .stop = oxu_stop,
  3076. .shutdown = oxu_shutdown,
  3077. /*
  3078. * Managing i/o requests and associated device resources
  3079. */
  3080. .urb_enqueue = oxu_urb_enqueue,
  3081. .urb_dequeue = oxu_urb_dequeue,
  3082. .endpoint_disable = oxu_endpoint_disable,
  3083. /*
  3084. * Scheduling support
  3085. */
  3086. .get_frame_number = oxu_get_frame,
  3087. /*
  3088. * Root hub support
  3089. */
  3090. .hub_status_data = oxu_hub_status_data,
  3091. .hub_control = oxu_hub_control,
  3092. .bus_suspend = oxu_bus_suspend,
  3093. .bus_resume = oxu_bus_resume,
  3094. };
  3095. /*
  3096. * Module stuff
  3097. */
  3098. static void oxu_configuration(struct platform_device *pdev, void *base)
  3099. {
  3100. u32 tmp;
  3101. /* Initialize top level registers.
  3102. * First write ever
  3103. */
  3104. oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
  3105. oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
  3106. oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
  3107. tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
  3108. oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
  3109. oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
  3110. OXU_COMPARATOR | OXU_ASO_OP);
  3111. tmp = oxu_readl(base, OXU_CLKCTRL_SET);
  3112. oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
  3113. /* Clear all top interrupt enable */
  3114. oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
  3115. /* Clear all top interrupt status */
  3116. oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
  3117. /* Enable all needed top interrupt except OTG SPH core */
  3118. oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
  3119. }
  3120. static int oxu_verify_id(struct platform_device *pdev, void *base)
  3121. {
  3122. u32 id;
  3123. static const char * const bo[] = {
  3124. "reserved",
  3125. "128-pin LQFP",
  3126. "84-pin TFBGA",
  3127. "reserved",
  3128. };
  3129. /* Read controller signature register to find a match */
  3130. id = oxu_readl(base, OXU_DEVICEID);
  3131. dev_info(&pdev->dev, "device ID %x\n", id);
  3132. if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
  3133. return -1;
  3134. dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
  3135. id >> OXU_REV_SHIFT,
  3136. bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
  3137. (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
  3138. (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
  3139. return 0;
  3140. }
  3141. static const struct hc_driver oxu_hc_driver;
  3142. static struct usb_hcd *oxu_create(struct platform_device *pdev,
  3143. unsigned long memstart, unsigned long memlen,
  3144. void *base, int irq, int otg)
  3145. {
  3146. struct device *dev = &pdev->dev;
  3147. struct usb_hcd *hcd;
  3148. struct oxu_hcd *oxu;
  3149. int ret;
  3150. /* Set endian mode and host mode */
  3151. oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
  3152. OXU_USBMODE,
  3153. OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
  3154. hcd = usb_create_hcd(&oxu_hc_driver, dev,
  3155. otg ? "oxu210hp_otg" : "oxu210hp_sph");
  3156. if (!hcd)
  3157. return ERR_PTR(-ENOMEM);
  3158. hcd->rsrc_start = memstart;
  3159. hcd->rsrc_len = memlen;
  3160. hcd->regs = base;
  3161. hcd->irq = irq;
  3162. hcd->state = HC_STATE_HALT;
  3163. oxu = hcd_to_oxu(hcd);
  3164. oxu->is_otg = otg;
  3165. ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
  3166. if (ret < 0)
  3167. return ERR_PTR(ret);
  3168. return hcd;
  3169. }
  3170. static int oxu_init(struct platform_device *pdev,
  3171. unsigned long memstart, unsigned long memlen,
  3172. void *base, int irq)
  3173. {
  3174. struct oxu_info *info = platform_get_drvdata(pdev);
  3175. struct usb_hcd *hcd;
  3176. int ret;
  3177. /* First time configuration at start up */
  3178. oxu_configuration(pdev, base);
  3179. ret = oxu_verify_id(pdev, base);
  3180. if (ret) {
  3181. dev_err(&pdev->dev, "no devices found!\n");
  3182. return -ENODEV;
  3183. }
  3184. /* Create the OTG controller */
  3185. hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
  3186. if (IS_ERR(hcd)) {
  3187. dev_err(&pdev->dev, "cannot create OTG controller!\n");
  3188. ret = PTR_ERR(hcd);
  3189. goto error_create_otg;
  3190. }
  3191. info->hcd[0] = hcd;
  3192. /* Create the SPH host controller */
  3193. hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
  3194. if (IS_ERR(hcd)) {
  3195. dev_err(&pdev->dev, "cannot create SPH controller!\n");
  3196. ret = PTR_ERR(hcd);
  3197. goto error_create_sph;
  3198. }
  3199. info->hcd[1] = hcd;
  3200. oxu_writel(base, OXU_CHIPIRQEN_SET,
  3201. oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
  3202. return 0;
  3203. error_create_sph:
  3204. usb_remove_hcd(info->hcd[0]);
  3205. usb_put_hcd(info->hcd[0]);
  3206. error_create_otg:
  3207. return ret;
  3208. }
  3209. static int oxu_drv_probe(struct platform_device *pdev)
  3210. {
  3211. struct resource *res;
  3212. void *base;
  3213. unsigned long memstart, memlen;
  3214. int irq, ret;
  3215. struct oxu_info *info;
  3216. if (usb_disabled())
  3217. return -ENODEV;
  3218. /*
  3219. * Get the platform resources
  3220. */
  3221. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  3222. if (!res) {
  3223. dev_err(&pdev->dev,
  3224. "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
  3225. return -ENODEV;
  3226. }
  3227. irq = res->start;
  3228. dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
  3229. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3230. if (!res) {
  3231. dev_err(&pdev->dev, "no registers address! Check %s setup!\n",
  3232. dev_name(&pdev->dev));
  3233. return -ENODEV;
  3234. }
  3235. memstart = res->start;
  3236. memlen = res->end - res->start + 1;
  3237. dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
  3238. if (!request_mem_region(memstart, memlen,
  3239. oxu_hc_driver.description)) {
  3240. dev_dbg(&pdev->dev, "memory area already in use\n");
  3241. return -EBUSY;
  3242. }
  3243. ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
  3244. if (ret) {
  3245. dev_err(&pdev->dev, "error setting irq type\n");
  3246. ret = -EFAULT;
  3247. goto error_set_irq_type;
  3248. }
  3249. base = ioremap(memstart, memlen);
  3250. if (!base) {
  3251. dev_dbg(&pdev->dev, "error mapping memory\n");
  3252. ret = -EFAULT;
  3253. goto error_ioremap;
  3254. }
  3255. /* Allocate a driver data struct to hold useful info for both
  3256. * SPH & OTG devices
  3257. */
  3258. info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL);
  3259. if (!info) {
  3260. dev_dbg(&pdev->dev, "error allocating memory\n");
  3261. ret = -EFAULT;
  3262. goto error_alloc;
  3263. }
  3264. platform_set_drvdata(pdev, info);
  3265. ret = oxu_init(pdev, memstart, memlen, base, irq);
  3266. if (ret < 0) {
  3267. dev_dbg(&pdev->dev, "cannot init USB devices\n");
  3268. goto error_init;
  3269. }
  3270. dev_info(&pdev->dev, "devices enabled and running\n");
  3271. platform_set_drvdata(pdev, info);
  3272. return 0;
  3273. error_init:
  3274. kfree(info);
  3275. platform_set_drvdata(pdev, NULL);
  3276. error_alloc:
  3277. iounmap(base);
  3278. error_set_irq_type:
  3279. error_ioremap:
  3280. release_mem_region(memstart, memlen);
  3281. dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
  3282. return ret;
  3283. }
  3284. static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
  3285. {
  3286. usb_remove_hcd(hcd);
  3287. usb_put_hcd(hcd);
  3288. }
  3289. static int oxu_drv_remove(struct platform_device *pdev)
  3290. {
  3291. struct oxu_info *info = platform_get_drvdata(pdev);
  3292. unsigned long memstart = info->hcd[0]->rsrc_start,
  3293. memlen = info->hcd[0]->rsrc_len;
  3294. void *base = info->hcd[0]->regs;
  3295. oxu_remove(pdev, info->hcd[0]);
  3296. oxu_remove(pdev, info->hcd[1]);
  3297. iounmap(base);
  3298. release_mem_region(memstart, memlen);
  3299. kfree(info);
  3300. platform_set_drvdata(pdev, NULL);
  3301. return 0;
  3302. }
  3303. static void oxu_drv_shutdown(struct platform_device *pdev)
  3304. {
  3305. oxu_drv_remove(pdev);
  3306. }
  3307. #if 0
  3308. /* FIXME: TODO */
  3309. static int oxu_drv_suspend(struct device *dev)
  3310. {
  3311. struct platform_device *pdev = to_platform_device(dev);
  3312. struct usb_hcd *hcd = dev_get_drvdata(dev);
  3313. return 0;
  3314. }
  3315. static int oxu_drv_resume(struct device *dev)
  3316. {
  3317. struct platform_device *pdev = to_platform_device(dev);
  3318. struct usb_hcd *hcd = dev_get_drvdata(dev);
  3319. return 0;
  3320. }
  3321. #else
  3322. #define oxu_drv_suspend NULL
  3323. #define oxu_drv_resume NULL
  3324. #endif
  3325. static struct platform_driver oxu_driver = {
  3326. .probe = oxu_drv_probe,
  3327. .remove = oxu_drv_remove,
  3328. .shutdown = oxu_drv_shutdown,
  3329. .suspend = oxu_drv_suspend,
  3330. .resume = oxu_drv_resume,
  3331. .driver = {
  3332. .name = "oxu210hp-hcd",
  3333. .bus = &platform_bus_type
  3334. }
  3335. };
  3336. static int __init oxu_module_init(void)
  3337. {
  3338. int retval = 0;
  3339. retval = platform_driver_register(&oxu_driver);
  3340. if (retval < 0)
  3341. return retval;
  3342. return retval;
  3343. }
  3344. static void __exit oxu_module_cleanup(void)
  3345. {
  3346. platform_driver_unregister(&oxu_driver);
  3347. }
  3348. module_init(oxu_module_init);
  3349. module_exit(oxu_module_cleanup);
  3350. MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
  3351. MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
  3352. MODULE_LICENSE("GPL");