oxu210hp-hcd.c 99 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957
  1. /*
  2. * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
  3. * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
  4. *
  5. * This code is *strongly* based on EHCI-HCD code by David Brownell since
  6. * the chip is a quasi-EHCI compatible.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/dmapool.h>
  25. #include <linux/kernel.h>
  26. #include <linux/delay.h>
  27. #include <linux/ioport.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <linux/errno.h>
  31. #include <linux/init.h>
  32. #include <linux/timer.h>
  33. #include <linux/list.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/usb.h>
  36. #include <linux/usb/hcd.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/io.h>
  40. #include <asm/irq.h>
  41. #include <asm/unaligned.h>
  42. #include <linux/irq.h>
  43. #include <linux/platform_device.h>
  44. #include "oxu210hp.h"
  45. #define DRIVER_VERSION "0.0.50"
  46. /*
  47. * Main defines
  48. */
  49. #define oxu_dbg(oxu, fmt, args...) \
  50. dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  51. #define oxu_err(oxu, fmt, args...) \
  52. dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  53. #define oxu_info(oxu, fmt, args...) \
  54. dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  55. static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
  56. {
  57. return container_of((void *) oxu, struct usb_hcd, hcd_priv);
  58. }
  59. static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
  60. {
  61. return (struct oxu_hcd *) (hcd->hcd_priv);
  62. }
  63. /*
  64. * Debug stuff
  65. */
  66. #undef OXU_URB_TRACE
  67. #undef OXU_VERBOSE_DEBUG
  68. #ifdef OXU_VERBOSE_DEBUG
  69. #define oxu_vdbg oxu_dbg
  70. #else
  71. #define oxu_vdbg(oxu, fmt, args...) /* Nop */
  72. #endif
  73. #ifdef DEBUG
  74. static int __attribute__((__unused__))
  75. dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
  76. {
  77. return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
  78. label, label[0] ? " " : "", status,
  79. (status & STS_ASS) ? " Async" : "",
  80. (status & STS_PSS) ? " Periodic" : "",
  81. (status & STS_RECL) ? " Recl" : "",
  82. (status & STS_HALT) ? " Halt" : "",
  83. (status & STS_IAA) ? " IAA" : "",
  84. (status & STS_FATAL) ? " FATAL" : "",
  85. (status & STS_FLR) ? " FLR" : "",
  86. (status & STS_PCD) ? " PCD" : "",
  87. (status & STS_ERR) ? " ERR" : "",
  88. (status & STS_INT) ? " INT" : ""
  89. );
  90. }
  91. static int __attribute__((__unused__))
  92. dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
  93. {
  94. return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
  95. label, label[0] ? " " : "", enable,
  96. (enable & STS_IAA) ? " IAA" : "",
  97. (enable & STS_FATAL) ? " FATAL" : "",
  98. (enable & STS_FLR) ? " FLR" : "",
  99. (enable & STS_PCD) ? " PCD" : "",
  100. (enable & STS_ERR) ? " ERR" : "",
  101. (enable & STS_INT) ? " INT" : ""
  102. );
  103. }
  104. static const char *const fls_strings[] =
  105. { "1024", "512", "256", "??" };
  106. static int dbg_command_buf(char *buf, unsigned len,
  107. const char *label, u32 command)
  108. {
  109. return scnprintf(buf, len,
  110. "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
  111. label, label[0] ? " " : "", command,
  112. (command & CMD_PARK) ? "park" : "(park)",
  113. CMD_PARK_CNT(command),
  114. (command >> 16) & 0x3f,
  115. (command & CMD_LRESET) ? " LReset" : "",
  116. (command & CMD_IAAD) ? " IAAD" : "",
  117. (command & CMD_ASE) ? " Async" : "",
  118. (command & CMD_PSE) ? " Periodic" : "",
  119. fls_strings[(command >> 2) & 0x3],
  120. (command & CMD_RESET) ? " Reset" : "",
  121. (command & CMD_RUN) ? "RUN" : "HALT"
  122. );
  123. }
  124. static int dbg_port_buf(char *buf, unsigned len, const char *label,
  125. int port, u32 status)
  126. {
  127. char *sig;
  128. /* signaling state */
  129. switch (status & (3 << 10)) {
  130. case 0 << 10:
  131. sig = "se0";
  132. break;
  133. case 1 << 10:
  134. sig = "k"; /* low speed */
  135. break;
  136. case 2 << 10:
  137. sig = "j";
  138. break;
  139. default:
  140. sig = "?";
  141. break;
  142. }
  143. return scnprintf(buf, len,
  144. "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
  145. label, label[0] ? " " : "", port, status,
  146. (status & PORT_POWER) ? " POWER" : "",
  147. (status & PORT_OWNER) ? " OWNER" : "",
  148. sig,
  149. (status & PORT_RESET) ? " RESET" : "",
  150. (status & PORT_SUSPEND) ? " SUSPEND" : "",
  151. (status & PORT_RESUME) ? " RESUME" : "",
  152. (status & PORT_OCC) ? " OCC" : "",
  153. (status & PORT_OC) ? " OC" : "",
  154. (status & PORT_PEC) ? " PEC" : "",
  155. (status & PORT_PE) ? " PE" : "",
  156. (status & PORT_CSC) ? " CSC" : "",
  157. (status & PORT_CONNECT) ? " CONNECT" : ""
  158. );
  159. }
  160. #else
  161. static inline int __attribute__((__unused__))
  162. dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
  163. { return 0; }
  164. static inline int __attribute__((__unused__))
  165. dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
  166. { return 0; }
  167. static inline int __attribute__((__unused__))
  168. dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
  169. { return 0; }
  170. static inline int __attribute__((__unused__))
  171. dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
  172. { return 0; }
  173. #endif /* DEBUG */
  174. /* functions have the "wrong" filename when they're output... */
  175. #define dbg_status(oxu, label, status) { \
  176. char _buf[80]; \
  177. dbg_status_buf(_buf, sizeof _buf, label, status); \
  178. oxu_dbg(oxu, "%s\n", _buf); \
  179. }
  180. #define dbg_cmd(oxu, label, command) { \
  181. char _buf[80]; \
  182. dbg_command_buf(_buf, sizeof _buf, label, command); \
  183. oxu_dbg(oxu, "%s\n", _buf); \
  184. }
  185. #define dbg_port(oxu, label, port, status) { \
  186. char _buf[80]; \
  187. dbg_port_buf(_buf, sizeof _buf, label, port, status); \
  188. oxu_dbg(oxu, "%s\n", _buf); \
  189. }
  190. /*
  191. * Module parameters
  192. */
  193. /* Initial IRQ latency: faster than hw default */
  194. static int log2_irq_thresh; /* 0 to 6 */
  195. module_param(log2_irq_thresh, int, S_IRUGO);
  196. MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
  197. /* Initial park setting: slower than hw default */
  198. static unsigned park;
  199. module_param(park, uint, S_IRUGO);
  200. MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
  201. /* For flakey hardware, ignore overcurrent indicators */
  202. static bool ignore_oc;
  203. module_param(ignore_oc, bool, S_IRUGO);
  204. MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
  205. static void ehci_work(struct oxu_hcd *oxu);
  206. static int oxu_hub_control(struct usb_hcd *hcd,
  207. u16 typeReq, u16 wValue, u16 wIndex,
  208. char *buf, u16 wLength);
  209. /*
  210. * Local functions
  211. */
  212. /* Low level read/write registers functions */
  213. static inline u32 oxu_readl(void *base, u32 reg)
  214. {
  215. return readl(base + reg);
  216. }
  217. static inline void oxu_writel(void *base, u32 reg, u32 val)
  218. {
  219. writel(val, base + reg);
  220. }
  221. static inline void timer_action_done(struct oxu_hcd *oxu,
  222. enum ehci_timer_action action)
  223. {
  224. clear_bit(action, &oxu->actions);
  225. }
  226. static inline void timer_action(struct oxu_hcd *oxu,
  227. enum ehci_timer_action action)
  228. {
  229. if (!test_and_set_bit(action, &oxu->actions)) {
  230. unsigned long t;
  231. switch (action) {
  232. case TIMER_IAA_WATCHDOG:
  233. t = EHCI_IAA_JIFFIES;
  234. break;
  235. case TIMER_IO_WATCHDOG:
  236. t = EHCI_IO_JIFFIES;
  237. break;
  238. case TIMER_ASYNC_OFF:
  239. t = EHCI_ASYNC_JIFFIES;
  240. break;
  241. case TIMER_ASYNC_SHRINK:
  242. default:
  243. t = EHCI_SHRINK_JIFFIES;
  244. break;
  245. }
  246. t += jiffies;
  247. /* all timings except IAA watchdog can be overridden.
  248. * async queue SHRINK often precedes IAA. while it's ready
  249. * to go OFF neither can matter, and afterwards the IO
  250. * watchdog stops unless there's still periodic traffic.
  251. */
  252. if (action != TIMER_IAA_WATCHDOG
  253. && t > oxu->watchdog.expires
  254. && timer_pending(&oxu->watchdog))
  255. return;
  256. mod_timer(&oxu->watchdog, t);
  257. }
  258. }
  259. /*
  260. * handshake - spin reading hc until handshake completes or fails
  261. * @ptr: address of hc register to be read
  262. * @mask: bits to look at in result of read
  263. * @done: value of those bits when handshake succeeds
  264. * @usec: timeout in microseconds
  265. *
  266. * Returns negative errno, or zero on success
  267. *
  268. * Success happens when the "mask" bits have the specified value (hardware
  269. * handshake done). There are two failure modes: "usec" have passed (major
  270. * hardware flakeout), or the register reads as all-ones (hardware removed).
  271. *
  272. * That last failure should_only happen in cases like physical cardbus eject
  273. * before driver shutdown. But it also seems to be caused by bugs in cardbus
  274. * bridge shutdown: shutting down the bridge before the devices using it.
  275. */
  276. static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
  277. u32 mask, u32 done, int usec)
  278. {
  279. u32 result;
  280. do {
  281. result = readl(ptr);
  282. if (result == ~(u32)0) /* card removed */
  283. return -ENODEV;
  284. result &= mask;
  285. if (result == done)
  286. return 0;
  287. udelay(1);
  288. usec--;
  289. } while (usec > 0);
  290. return -ETIMEDOUT;
  291. }
  292. /* Force HC to halt state from unknown (EHCI spec section 2.3) */
  293. static int ehci_halt(struct oxu_hcd *oxu)
  294. {
  295. u32 temp = readl(&oxu->regs->status);
  296. /* disable any irqs left enabled by previous code */
  297. writel(0, &oxu->regs->intr_enable);
  298. if ((temp & STS_HALT) != 0)
  299. return 0;
  300. temp = readl(&oxu->regs->command);
  301. temp &= ~CMD_RUN;
  302. writel(temp, &oxu->regs->command);
  303. return handshake(oxu, &oxu->regs->status,
  304. STS_HALT, STS_HALT, 16 * 125);
  305. }
  306. /* Put TDI/ARC silicon into EHCI mode */
  307. static void tdi_reset(struct oxu_hcd *oxu)
  308. {
  309. u32 __iomem *reg_ptr;
  310. u32 tmp;
  311. reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
  312. tmp = readl(reg_ptr);
  313. tmp |= 0x3;
  314. writel(tmp, reg_ptr);
  315. }
  316. /* Reset a non-running (STS_HALT == 1) controller */
  317. static int ehci_reset(struct oxu_hcd *oxu)
  318. {
  319. int retval;
  320. u32 command = readl(&oxu->regs->command);
  321. command |= CMD_RESET;
  322. dbg_cmd(oxu, "reset", command);
  323. writel(command, &oxu->regs->command);
  324. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  325. oxu->next_statechange = jiffies;
  326. retval = handshake(oxu, &oxu->regs->command,
  327. CMD_RESET, 0, 250 * 1000);
  328. if (retval)
  329. return retval;
  330. tdi_reset(oxu);
  331. return retval;
  332. }
  333. /* Idle the controller (from running) */
  334. static void ehci_quiesce(struct oxu_hcd *oxu)
  335. {
  336. u32 temp;
  337. #ifdef DEBUG
  338. if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  339. BUG();
  340. #endif
  341. /* wait for any schedule enables/disables to take effect */
  342. temp = readl(&oxu->regs->command) << 10;
  343. temp &= STS_ASS | STS_PSS;
  344. if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
  345. temp, 16 * 125) != 0) {
  346. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  347. return;
  348. }
  349. /* then disable anything that's still active */
  350. temp = readl(&oxu->regs->command);
  351. temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
  352. writel(temp, &oxu->regs->command);
  353. /* hardware can take 16 microframes to turn off ... */
  354. if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
  355. 0, 16 * 125) != 0) {
  356. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  357. return;
  358. }
  359. }
  360. static int check_reset_complete(struct oxu_hcd *oxu, int index,
  361. u32 __iomem *status_reg, int port_status)
  362. {
  363. if (!(port_status & PORT_CONNECT)) {
  364. oxu->reset_done[index] = 0;
  365. return port_status;
  366. }
  367. /* if reset finished and it's still not enabled -- handoff */
  368. if (!(port_status & PORT_PE)) {
  369. oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
  370. index+1);
  371. return port_status;
  372. } else
  373. oxu_dbg(oxu, "port %d high speed\n", index + 1);
  374. return port_status;
  375. }
  376. static void ehci_hub_descriptor(struct oxu_hcd *oxu,
  377. struct usb_hub_descriptor *desc)
  378. {
  379. int ports = HCS_N_PORTS(oxu->hcs_params);
  380. u16 temp;
  381. desc->bDescriptorType = 0x29;
  382. desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
  383. desc->bHubContrCurrent = 0;
  384. desc->bNbrPorts = ports;
  385. temp = 1 + (ports / 8);
  386. desc->bDescLength = 7 + 2 * temp;
  387. /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
  388. memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
  389. memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
  390. temp = 0x0008; /* per-port overcurrent reporting */
  391. if (HCS_PPC(oxu->hcs_params))
  392. temp |= 0x0001; /* per-port power control */
  393. else
  394. temp |= 0x0002; /* no power switching */
  395. desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
  396. }
  397. /* Allocate an OXU210HP on-chip memory data buffer
  398. *
  399. * An on-chip memory data buffer is required for each OXU210HP USB transfer.
  400. * Each transfer descriptor has one or more on-chip memory data buffers.
  401. *
  402. * Data buffers are allocated from a fix sized pool of data blocks.
  403. * To minimise fragmentation and give reasonable memory utlisation,
  404. * data buffers are allocated with sizes the power of 2 multiples of
  405. * the block size, starting on an address a multiple of the allocated size.
  406. *
  407. * FIXME: callers of this function require a buffer to be allocated for
  408. * len=0. This is a waste of on-chip memory and should be fix. Then this
  409. * function should be changed to not allocate a buffer for len=0.
  410. */
  411. static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
  412. {
  413. int n_blocks; /* minium blocks needed to hold len */
  414. int a_blocks; /* blocks allocated */
  415. int i, j;
  416. /* Don't allocte bigger than supported */
  417. if (len > BUFFER_SIZE * BUFFER_NUM) {
  418. oxu_err(oxu, "buffer too big (%d)\n", len);
  419. return -ENOMEM;
  420. }
  421. spin_lock(&oxu->mem_lock);
  422. /* Number of blocks needed to hold len */
  423. n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
  424. /* Round the number of blocks up to the power of 2 */
  425. for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
  426. ;
  427. /* Find a suitable available data buffer */
  428. for (i = 0; i < BUFFER_NUM;
  429. i += max(a_blocks, (int)oxu->db_used[i])) {
  430. /* Check all the required blocks are available */
  431. for (j = 0; j < a_blocks; j++)
  432. if (oxu->db_used[i + j])
  433. break;
  434. if (j != a_blocks)
  435. continue;
  436. /* Allocate blocks found! */
  437. qtd->buffer = (void *) &oxu->mem->db_pool[i];
  438. qtd->buffer_dma = virt_to_phys(qtd->buffer);
  439. qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
  440. oxu->db_used[i] = a_blocks;
  441. spin_unlock(&oxu->mem_lock);
  442. return 0;
  443. }
  444. /* Failed */
  445. spin_unlock(&oxu->mem_lock);
  446. return -ENOMEM;
  447. }
  448. static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
  449. {
  450. int index;
  451. spin_lock(&oxu->mem_lock);
  452. index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
  453. / BUFFER_SIZE;
  454. oxu->db_used[index] = 0;
  455. qtd->qtd_buffer_len = 0;
  456. qtd->buffer_dma = 0;
  457. qtd->buffer = NULL;
  458. spin_unlock(&oxu->mem_lock);
  459. }
  460. static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
  461. {
  462. memset(qtd, 0, sizeof *qtd);
  463. qtd->qtd_dma = dma;
  464. qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
  465. qtd->hw_next = EHCI_LIST_END;
  466. qtd->hw_alt_next = EHCI_LIST_END;
  467. INIT_LIST_HEAD(&qtd->qtd_list);
  468. }
  469. static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
  470. {
  471. int index;
  472. if (qtd->buffer)
  473. oxu_buf_free(oxu, qtd);
  474. spin_lock(&oxu->mem_lock);
  475. index = qtd - &oxu->mem->qtd_pool[0];
  476. oxu->qtd_used[index] = 0;
  477. spin_unlock(&oxu->mem_lock);
  478. }
  479. static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
  480. {
  481. int i;
  482. struct ehci_qtd *qtd = NULL;
  483. spin_lock(&oxu->mem_lock);
  484. for (i = 0; i < QTD_NUM; i++)
  485. if (!oxu->qtd_used[i])
  486. break;
  487. if (i < QTD_NUM) {
  488. qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
  489. memset(qtd, 0, sizeof *qtd);
  490. qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
  491. qtd->hw_next = EHCI_LIST_END;
  492. qtd->hw_alt_next = EHCI_LIST_END;
  493. INIT_LIST_HEAD(&qtd->qtd_list);
  494. qtd->qtd_dma = virt_to_phys(qtd);
  495. oxu->qtd_used[i] = 1;
  496. }
  497. spin_unlock(&oxu->mem_lock);
  498. return qtd;
  499. }
  500. static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
  501. {
  502. int index;
  503. spin_lock(&oxu->mem_lock);
  504. index = qh - &oxu->mem->qh_pool[0];
  505. oxu->qh_used[index] = 0;
  506. spin_unlock(&oxu->mem_lock);
  507. }
  508. static void qh_destroy(struct kref *kref)
  509. {
  510. struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
  511. struct oxu_hcd *oxu = qh->oxu;
  512. /* clean qtds first, and know this is not linked */
  513. if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
  514. oxu_dbg(oxu, "unused qh not empty!\n");
  515. BUG();
  516. }
  517. if (qh->dummy)
  518. oxu_qtd_free(oxu, qh->dummy);
  519. oxu_qh_free(oxu, qh);
  520. }
  521. static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
  522. {
  523. int i;
  524. struct ehci_qh *qh = NULL;
  525. spin_lock(&oxu->mem_lock);
  526. for (i = 0; i < QHEAD_NUM; i++)
  527. if (!oxu->qh_used[i])
  528. break;
  529. if (i < QHEAD_NUM) {
  530. qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
  531. memset(qh, 0, sizeof *qh);
  532. kref_init(&qh->kref);
  533. qh->oxu = oxu;
  534. qh->qh_dma = virt_to_phys(qh);
  535. INIT_LIST_HEAD(&qh->qtd_list);
  536. /* dummy td enables safe urb queuing */
  537. qh->dummy = ehci_qtd_alloc(oxu);
  538. if (qh->dummy == NULL) {
  539. oxu_dbg(oxu, "no dummy td\n");
  540. oxu->qh_used[i] = 0;
  541. qh = NULL;
  542. goto unlock;
  543. }
  544. oxu->qh_used[i] = 1;
  545. }
  546. unlock:
  547. spin_unlock(&oxu->mem_lock);
  548. return qh;
  549. }
  550. /* to share a qh (cpu threads, or hc) */
  551. static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
  552. {
  553. kref_get(&qh->kref);
  554. return qh;
  555. }
  556. static inline void qh_put(struct ehci_qh *qh)
  557. {
  558. kref_put(&qh->kref, qh_destroy);
  559. }
  560. static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
  561. {
  562. int index;
  563. spin_lock(&oxu->mem_lock);
  564. index = murb - &oxu->murb_pool[0];
  565. oxu->murb_used[index] = 0;
  566. spin_unlock(&oxu->mem_lock);
  567. }
  568. static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
  569. {
  570. int i;
  571. struct oxu_murb *murb = NULL;
  572. spin_lock(&oxu->mem_lock);
  573. for (i = 0; i < MURB_NUM; i++)
  574. if (!oxu->murb_used[i])
  575. break;
  576. if (i < MURB_NUM) {
  577. murb = &(oxu->murb_pool)[i];
  578. oxu->murb_used[i] = 1;
  579. }
  580. spin_unlock(&oxu->mem_lock);
  581. return murb;
  582. }
  583. /* The queue heads and transfer descriptors are managed from pools tied
  584. * to each of the "per device" structures.
  585. * This is the initialisation and cleanup code.
  586. */
  587. static void ehci_mem_cleanup(struct oxu_hcd *oxu)
  588. {
  589. kfree(oxu->murb_pool);
  590. oxu->murb_pool = NULL;
  591. if (oxu->async)
  592. qh_put(oxu->async);
  593. oxu->async = NULL;
  594. del_timer(&oxu->urb_timer);
  595. oxu->periodic = NULL;
  596. /* shadow periodic table */
  597. kfree(oxu->pshadow);
  598. oxu->pshadow = NULL;
  599. }
  600. /* Remember to add cleanup code (above) if you add anything here.
  601. */
  602. static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
  603. {
  604. int i;
  605. for (i = 0; i < oxu->periodic_size; i++)
  606. oxu->mem->frame_list[i] = EHCI_LIST_END;
  607. for (i = 0; i < QHEAD_NUM; i++)
  608. oxu->qh_used[i] = 0;
  609. for (i = 0; i < QTD_NUM; i++)
  610. oxu->qtd_used[i] = 0;
  611. oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
  612. if (!oxu->murb_pool)
  613. goto fail;
  614. for (i = 0; i < MURB_NUM; i++)
  615. oxu->murb_used[i] = 0;
  616. oxu->async = oxu_qh_alloc(oxu);
  617. if (!oxu->async)
  618. goto fail;
  619. oxu->periodic = (__le32 *) &oxu->mem->frame_list;
  620. oxu->periodic_dma = virt_to_phys(oxu->periodic);
  621. for (i = 0; i < oxu->periodic_size; i++)
  622. oxu->periodic[i] = EHCI_LIST_END;
  623. /* software shadow of hardware table */
  624. oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
  625. if (oxu->pshadow != NULL)
  626. return 0;
  627. fail:
  628. oxu_dbg(oxu, "couldn't init memory\n");
  629. ehci_mem_cleanup(oxu);
  630. return -ENOMEM;
  631. }
  632. /* Fill a qtd, returning how much of the buffer we were able to queue up.
  633. */
  634. static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
  635. int token, int maxpacket)
  636. {
  637. int i, count;
  638. u64 addr = buf;
  639. /* one buffer entry per 4K ... first might be short or unaligned */
  640. qtd->hw_buf[0] = cpu_to_le32((u32)addr);
  641. qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
  642. count = 0x1000 - (buf & 0x0fff); /* rest of that page */
  643. if (likely(len < count)) /* ... iff needed */
  644. count = len;
  645. else {
  646. buf += 0x1000;
  647. buf &= ~0x0fff;
  648. /* per-qtd limit: from 16K to 20K (best alignment) */
  649. for (i = 1; count < len && i < 5; i++) {
  650. addr = buf;
  651. qtd->hw_buf[i] = cpu_to_le32((u32)addr);
  652. qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
  653. buf += 0x1000;
  654. if ((count + 0x1000) < len)
  655. count += 0x1000;
  656. else
  657. count = len;
  658. }
  659. /* short packets may only terminate transfers */
  660. if (count != len)
  661. count -= (count % maxpacket);
  662. }
  663. qtd->hw_token = cpu_to_le32((count << 16) | token);
  664. qtd->length = count;
  665. return count;
  666. }
  667. static inline void qh_update(struct oxu_hcd *oxu,
  668. struct ehci_qh *qh, struct ehci_qtd *qtd)
  669. {
  670. /* writes to an active overlay are unsafe */
  671. BUG_ON(qh->qh_state != QH_STATE_IDLE);
  672. qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
  673. qh->hw_alt_next = EHCI_LIST_END;
  674. /* Except for control endpoints, we make hardware maintain data
  675. * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
  676. * and set the pseudo-toggle in udev. Only usb_clear_halt() will
  677. * ever clear it.
  678. */
  679. if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
  680. unsigned is_out, epnum;
  681. is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
  682. epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
  683. if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
  684. qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
  685. usb_settoggle(qh->dev, epnum, is_out, 1);
  686. }
  687. }
  688. /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
  689. wmb();
  690. qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
  691. }
  692. /* If it weren't for a common silicon quirk (writing the dummy into the qh
  693. * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
  694. * recovery (including urb dequeue) would need software changes to a QH...
  695. */
  696. static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
  697. {
  698. struct ehci_qtd *qtd;
  699. if (list_empty(&qh->qtd_list))
  700. qtd = qh->dummy;
  701. else {
  702. qtd = list_entry(qh->qtd_list.next,
  703. struct ehci_qtd, qtd_list);
  704. /* first qtd may already be partially processed */
  705. if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
  706. qtd = NULL;
  707. }
  708. if (qtd)
  709. qh_update(oxu, qh, qtd);
  710. }
  711. static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
  712. size_t length, u32 token)
  713. {
  714. /* count IN/OUT bytes, not SETUP (even short packets) */
  715. if (likely(QTD_PID(token) != 2))
  716. urb->actual_length += length - QTD_LENGTH(token);
  717. /* don't modify error codes */
  718. if (unlikely(urb->status != -EINPROGRESS))
  719. return;
  720. /* force cleanup after short read; not always an error */
  721. if (unlikely(IS_SHORT_READ(token)))
  722. urb->status = -EREMOTEIO;
  723. /* serious "can't proceed" faults reported by the hardware */
  724. if (token & QTD_STS_HALT) {
  725. if (token & QTD_STS_BABBLE) {
  726. /* FIXME "must" disable babbling device's port too */
  727. urb->status = -EOVERFLOW;
  728. } else if (token & QTD_STS_MMF) {
  729. /* fs/ls interrupt xfer missed the complete-split */
  730. urb->status = -EPROTO;
  731. } else if (token & QTD_STS_DBE) {
  732. urb->status = (QTD_PID(token) == 1) /* IN ? */
  733. ? -ENOSR /* hc couldn't read data */
  734. : -ECOMM; /* hc couldn't write data */
  735. } else if (token & QTD_STS_XACT) {
  736. /* timeout, bad crc, wrong PID, etc; retried */
  737. if (QTD_CERR(token))
  738. urb->status = -EPIPE;
  739. else {
  740. oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
  741. urb->dev->devpath,
  742. usb_pipeendpoint(urb->pipe),
  743. usb_pipein(urb->pipe) ? "in" : "out");
  744. urb->status = -EPROTO;
  745. }
  746. /* CERR nonzero + no errors + halt --> stall */
  747. } else if (QTD_CERR(token))
  748. urb->status = -EPIPE;
  749. else /* unknown */
  750. urb->status = -EPROTO;
  751. oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
  752. usb_pipedevice(urb->pipe),
  753. usb_pipeendpoint(urb->pipe),
  754. usb_pipein(urb->pipe) ? "in" : "out",
  755. token, urb->status);
  756. }
  757. }
  758. static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
  759. __releases(oxu->lock)
  760. __acquires(oxu->lock)
  761. {
  762. if (likely(urb->hcpriv != NULL)) {
  763. struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
  764. /* S-mask in a QH means it's an interrupt urb */
  765. if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
  766. /* ... update hc-wide periodic stats (for usbfs) */
  767. oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
  768. }
  769. qh_put(qh);
  770. }
  771. urb->hcpriv = NULL;
  772. switch (urb->status) {
  773. case -EINPROGRESS: /* success */
  774. urb->status = 0;
  775. default: /* fault */
  776. break;
  777. case -EREMOTEIO: /* fault or normal */
  778. if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
  779. urb->status = 0;
  780. break;
  781. case -ECONNRESET: /* canceled */
  782. case -ENOENT:
  783. break;
  784. }
  785. #ifdef OXU_URB_TRACE
  786. oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
  787. __func__, urb->dev->devpath, urb,
  788. usb_pipeendpoint(urb->pipe),
  789. usb_pipein(urb->pipe) ? "in" : "out",
  790. urb->status,
  791. urb->actual_length, urb->transfer_buffer_length);
  792. #endif
  793. /* complete() can reenter this HCD */
  794. spin_unlock(&oxu->lock);
  795. usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
  796. spin_lock(&oxu->lock);
  797. }
  798. static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
  799. static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
  800. static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
  801. static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
  802. #define HALT_BIT cpu_to_le32(QTD_STS_HALT)
  803. /* Process and free completed qtds for a qh, returning URBs to drivers.
  804. * Chases up to qh->hw_current. Returns number of completions called,
  805. * indicating how much "real" work we did.
  806. */
  807. static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
  808. {
  809. struct ehci_qtd *last = NULL, *end = qh->dummy;
  810. struct list_head *entry, *tmp;
  811. int stopped;
  812. unsigned count = 0;
  813. int do_status = 0;
  814. u8 state;
  815. struct oxu_murb *murb = NULL;
  816. if (unlikely(list_empty(&qh->qtd_list)))
  817. return count;
  818. /* completions (or tasks on other cpus) must never clobber HALT
  819. * till we've gone through and cleaned everything up, even when
  820. * they add urbs to this qh's queue or mark them for unlinking.
  821. *
  822. * NOTE: unlinking expects to be done in queue order.
  823. */
  824. state = qh->qh_state;
  825. qh->qh_state = QH_STATE_COMPLETING;
  826. stopped = (state == QH_STATE_IDLE);
  827. /* remove de-activated QTDs from front of queue.
  828. * after faults (including short reads), cleanup this urb
  829. * then let the queue advance.
  830. * if queue is stopped, handles unlinks.
  831. */
  832. list_for_each_safe(entry, tmp, &qh->qtd_list) {
  833. struct ehci_qtd *qtd;
  834. struct urb *urb;
  835. u32 token = 0;
  836. qtd = list_entry(entry, struct ehci_qtd, qtd_list);
  837. urb = qtd->urb;
  838. /* Clean up any state from previous QTD ...*/
  839. if (last) {
  840. if (likely(last->urb != urb)) {
  841. if (last->urb->complete == NULL) {
  842. murb = (struct oxu_murb *) last->urb;
  843. last->urb = murb->main;
  844. if (murb->last) {
  845. ehci_urb_done(oxu, last->urb);
  846. count++;
  847. }
  848. oxu_murb_free(oxu, murb);
  849. } else {
  850. ehci_urb_done(oxu, last->urb);
  851. count++;
  852. }
  853. }
  854. oxu_qtd_free(oxu, last);
  855. last = NULL;
  856. }
  857. /* ignore urbs submitted during completions we reported */
  858. if (qtd == end)
  859. break;
  860. /* hardware copies qtd out of qh overlay */
  861. rmb();
  862. token = le32_to_cpu(qtd->hw_token);
  863. /* always clean up qtds the hc de-activated */
  864. if ((token & QTD_STS_ACTIVE) == 0) {
  865. if ((token & QTD_STS_HALT) != 0) {
  866. stopped = 1;
  867. /* magic dummy for some short reads; qh won't advance.
  868. * that silicon quirk can kick in with this dummy too.
  869. */
  870. } else if (IS_SHORT_READ(token) &&
  871. !(qtd->hw_alt_next & EHCI_LIST_END)) {
  872. stopped = 1;
  873. goto halt;
  874. }
  875. /* stop scanning when we reach qtds the hc is using */
  876. } else if (likely(!stopped &&
  877. HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
  878. break;
  879. } else {
  880. stopped = 1;
  881. if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
  882. urb->status = -ESHUTDOWN;
  883. /* ignore active urbs unless some previous qtd
  884. * for the urb faulted (including short read) or
  885. * its urb was canceled. we may patch qh or qtds.
  886. */
  887. if (likely(urb->status == -EINPROGRESS))
  888. continue;
  889. /* issue status after short control reads */
  890. if (unlikely(do_status != 0)
  891. && QTD_PID(token) == 0 /* OUT */) {
  892. do_status = 0;
  893. continue;
  894. }
  895. /* token in overlay may be most current */
  896. if (state == QH_STATE_IDLE
  897. && cpu_to_le32(qtd->qtd_dma)
  898. == qh->hw_current)
  899. token = le32_to_cpu(qh->hw_token);
  900. /* force halt for unlinked or blocked qh, so we'll
  901. * patch the qh later and so that completions can't
  902. * activate it while we "know" it's stopped.
  903. */
  904. if ((HALT_BIT & qh->hw_token) == 0) {
  905. halt:
  906. qh->hw_token |= HALT_BIT;
  907. wmb();
  908. }
  909. }
  910. /* Remove it from the queue */
  911. qtd_copy_status(oxu, urb->complete ?
  912. urb : ((struct oxu_murb *) urb)->main,
  913. qtd->length, token);
  914. if ((usb_pipein(qtd->urb->pipe)) &&
  915. (NULL != qtd->transfer_buffer))
  916. memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
  917. do_status = (urb->status == -EREMOTEIO)
  918. && usb_pipecontrol(urb->pipe);
  919. if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
  920. last = list_entry(qtd->qtd_list.prev,
  921. struct ehci_qtd, qtd_list);
  922. last->hw_next = qtd->hw_next;
  923. }
  924. list_del(&qtd->qtd_list);
  925. last = qtd;
  926. }
  927. /* last urb's completion might still need calling */
  928. if (likely(last != NULL)) {
  929. if (last->urb->complete == NULL) {
  930. murb = (struct oxu_murb *) last->urb;
  931. last->urb = murb->main;
  932. if (murb->last) {
  933. ehci_urb_done(oxu, last->urb);
  934. count++;
  935. }
  936. oxu_murb_free(oxu, murb);
  937. } else {
  938. ehci_urb_done(oxu, last->urb);
  939. count++;
  940. }
  941. oxu_qtd_free(oxu, last);
  942. }
  943. /* restore original state; caller must unlink or relink */
  944. qh->qh_state = state;
  945. /* be sure the hardware's done with the qh before refreshing
  946. * it after fault cleanup, or recovering from silicon wrongly
  947. * overlaying the dummy qtd (which reduces DMA chatter).
  948. */
  949. if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
  950. switch (state) {
  951. case QH_STATE_IDLE:
  952. qh_refresh(oxu, qh);
  953. break;
  954. case QH_STATE_LINKED:
  955. /* should be rare for periodic transfers,
  956. * except maybe high bandwidth ...
  957. */
  958. if ((cpu_to_le32(QH_SMASK)
  959. & qh->hw_info2) != 0) {
  960. intr_deschedule(oxu, qh);
  961. (void) qh_schedule(oxu, qh);
  962. } else
  963. unlink_async(oxu, qh);
  964. break;
  965. /* otherwise, unlink already started */
  966. }
  967. }
  968. return count;
  969. }
  970. /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
  971. #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
  972. /* ... and packet size, for any kind of endpoint descriptor */
  973. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  974. /* Reverse of qh_urb_transaction: free a list of TDs.
  975. * used for cleanup after errors, before HC sees an URB's TDs.
  976. */
  977. static void qtd_list_free(struct oxu_hcd *oxu,
  978. struct urb *urb, struct list_head *qtd_list)
  979. {
  980. struct list_head *entry, *temp;
  981. list_for_each_safe(entry, temp, qtd_list) {
  982. struct ehci_qtd *qtd;
  983. qtd = list_entry(entry, struct ehci_qtd, qtd_list);
  984. list_del(&qtd->qtd_list);
  985. oxu_qtd_free(oxu, qtd);
  986. }
  987. }
  988. /* Create a list of filled qtds for this URB; won't link into qh.
  989. */
  990. static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
  991. struct urb *urb,
  992. struct list_head *head,
  993. gfp_t flags)
  994. {
  995. struct ehci_qtd *qtd, *qtd_prev;
  996. dma_addr_t buf;
  997. int len, maxpacket;
  998. int is_input;
  999. u32 token;
  1000. void *transfer_buf = NULL;
  1001. int ret;
  1002. /*
  1003. * URBs map to sequences of QTDs: one logical transaction
  1004. */
  1005. qtd = ehci_qtd_alloc(oxu);
  1006. if (unlikely(!qtd))
  1007. return NULL;
  1008. list_add_tail(&qtd->qtd_list, head);
  1009. qtd->urb = urb;
  1010. token = QTD_STS_ACTIVE;
  1011. token |= (EHCI_TUNE_CERR << 10);
  1012. /* for split transactions, SplitXState initialized to zero */
  1013. len = urb->transfer_buffer_length;
  1014. is_input = usb_pipein(urb->pipe);
  1015. if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
  1016. urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
  1017. if (usb_pipecontrol(urb->pipe)) {
  1018. /* SETUP pid */
  1019. ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
  1020. if (ret)
  1021. goto cleanup;
  1022. qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
  1023. token | (2 /* "setup" */ << 8), 8);
  1024. memcpy(qtd->buffer, qtd->urb->setup_packet,
  1025. sizeof(struct usb_ctrlrequest));
  1026. /* ... and always at least one more pid */
  1027. token ^= QTD_TOGGLE;
  1028. qtd_prev = qtd;
  1029. qtd = ehci_qtd_alloc(oxu);
  1030. if (unlikely(!qtd))
  1031. goto cleanup;
  1032. qtd->urb = urb;
  1033. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1034. list_add_tail(&qtd->qtd_list, head);
  1035. /* for zero length DATA stages, STATUS is always IN */
  1036. if (len == 0)
  1037. token |= (1 /* "in" */ << 8);
  1038. }
  1039. /*
  1040. * Data transfer stage: buffer setup
  1041. */
  1042. ret = oxu_buf_alloc(oxu, qtd, len);
  1043. if (ret)
  1044. goto cleanup;
  1045. buf = qtd->buffer_dma;
  1046. transfer_buf = urb->transfer_buffer;
  1047. if (!is_input)
  1048. memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
  1049. if (is_input)
  1050. token |= (1 /* "in" */ << 8);
  1051. /* else it's already initted to "out" pid (0 << 8) */
  1052. maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
  1053. /*
  1054. * buffer gets wrapped in one or more qtds;
  1055. * last one may be "short" (including zero len)
  1056. * and may serve as a control status ack
  1057. */
  1058. for (;;) {
  1059. int this_qtd_len;
  1060. this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
  1061. qtd->transfer_buffer = transfer_buf;
  1062. len -= this_qtd_len;
  1063. buf += this_qtd_len;
  1064. transfer_buf += this_qtd_len;
  1065. if (is_input)
  1066. qtd->hw_alt_next = oxu->async->hw_alt_next;
  1067. /* qh makes control packets use qtd toggle; maybe switch it */
  1068. if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
  1069. token ^= QTD_TOGGLE;
  1070. if (likely(len <= 0))
  1071. break;
  1072. qtd_prev = qtd;
  1073. qtd = ehci_qtd_alloc(oxu);
  1074. if (unlikely(!qtd))
  1075. goto cleanup;
  1076. if (likely(len > 0)) {
  1077. ret = oxu_buf_alloc(oxu, qtd, len);
  1078. if (ret)
  1079. goto cleanup;
  1080. }
  1081. qtd->urb = urb;
  1082. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1083. list_add_tail(&qtd->qtd_list, head);
  1084. }
  1085. /* unless the bulk/interrupt caller wants a chance to clean
  1086. * up after short reads, hc should advance qh past this urb
  1087. */
  1088. if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
  1089. || usb_pipecontrol(urb->pipe)))
  1090. qtd->hw_alt_next = EHCI_LIST_END;
  1091. /*
  1092. * control requests may need a terminating data "status" ack;
  1093. * bulk ones may need a terminating short packet (zero length).
  1094. */
  1095. if (likely(urb->transfer_buffer_length != 0)) {
  1096. int one_more = 0;
  1097. if (usb_pipecontrol(urb->pipe)) {
  1098. one_more = 1;
  1099. token ^= 0x0100; /* "in" <--> "out" */
  1100. token |= QTD_TOGGLE; /* force DATA1 */
  1101. } else if (usb_pipebulk(urb->pipe)
  1102. && (urb->transfer_flags & URB_ZERO_PACKET)
  1103. && !(urb->transfer_buffer_length % maxpacket)) {
  1104. one_more = 1;
  1105. }
  1106. if (one_more) {
  1107. qtd_prev = qtd;
  1108. qtd = ehci_qtd_alloc(oxu);
  1109. if (unlikely(!qtd))
  1110. goto cleanup;
  1111. qtd->urb = urb;
  1112. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1113. list_add_tail(&qtd->qtd_list, head);
  1114. /* never any data in such packets */
  1115. qtd_fill(qtd, 0, 0, token, 0);
  1116. }
  1117. }
  1118. /* by default, enable interrupt on urb completion */
  1119. qtd->hw_token |= cpu_to_le32(QTD_IOC);
  1120. return head;
  1121. cleanup:
  1122. qtd_list_free(oxu, urb, head);
  1123. return NULL;
  1124. }
  1125. /* Each QH holds a qtd list; a QH is used for everything except iso.
  1126. *
  1127. * For interrupt urbs, the scheduler must set the microframe scheduling
  1128. * mask(s) each time the QH gets scheduled. For highspeed, that's
  1129. * just one microframe in the s-mask. For split interrupt transactions
  1130. * there are additional complications: c-mask, maybe FSTNs.
  1131. */
  1132. static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
  1133. struct urb *urb, gfp_t flags)
  1134. {
  1135. struct ehci_qh *qh = oxu_qh_alloc(oxu);
  1136. u32 info1 = 0, info2 = 0;
  1137. int is_input, type;
  1138. int maxp = 0;
  1139. if (!qh)
  1140. return qh;
  1141. /*
  1142. * init endpoint/device data for this QH
  1143. */
  1144. info1 |= usb_pipeendpoint(urb->pipe) << 8;
  1145. info1 |= usb_pipedevice(urb->pipe) << 0;
  1146. is_input = usb_pipein(urb->pipe);
  1147. type = usb_pipetype(urb->pipe);
  1148. maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
  1149. /* Compute interrupt scheduling parameters just once, and save.
  1150. * - allowing for high bandwidth, how many nsec/uframe are used?
  1151. * - split transactions need a second CSPLIT uframe; same question
  1152. * - splits also need a schedule gap (for full/low speed I/O)
  1153. * - qh has a polling interval
  1154. *
  1155. * For control/bulk requests, the HC or TT handles these.
  1156. */
  1157. if (type == PIPE_INTERRUPT) {
  1158. qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
  1159. is_input, 0,
  1160. hb_mult(maxp) * max_packet(maxp)));
  1161. qh->start = NO_FRAME;
  1162. if (urb->dev->speed == USB_SPEED_HIGH) {
  1163. qh->c_usecs = 0;
  1164. qh->gap_uf = 0;
  1165. qh->period = urb->interval >> 3;
  1166. if (qh->period == 0 && urb->interval != 1) {
  1167. /* NOTE interval 2 or 4 uframes could work.
  1168. * But interval 1 scheduling is simpler, and
  1169. * includes high bandwidth.
  1170. */
  1171. dbg("intr period %d uframes, NYET!",
  1172. urb->interval);
  1173. goto done;
  1174. }
  1175. } else {
  1176. struct usb_tt *tt = urb->dev->tt;
  1177. int think_time;
  1178. /* gap is f(FS/LS transfer times) */
  1179. qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
  1180. is_input, 0, maxp) / (125 * 1000);
  1181. /* FIXME this just approximates SPLIT/CSPLIT times */
  1182. if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
  1183. qh->c_usecs = qh->usecs + HS_USECS(0);
  1184. qh->usecs = HS_USECS(1);
  1185. } else { /* SPLIT+DATA, gap, CSPLIT */
  1186. qh->usecs += HS_USECS(1);
  1187. qh->c_usecs = HS_USECS(0);
  1188. }
  1189. think_time = tt ? tt->think_time : 0;
  1190. qh->tt_usecs = NS_TO_US(think_time +
  1191. usb_calc_bus_time(urb->dev->speed,
  1192. is_input, 0, max_packet(maxp)));
  1193. qh->period = urb->interval;
  1194. }
  1195. }
  1196. /* support for tt scheduling, and access to toggles */
  1197. qh->dev = urb->dev;
  1198. /* using TT? */
  1199. switch (urb->dev->speed) {
  1200. case USB_SPEED_LOW:
  1201. info1 |= (1 << 12); /* EPS "low" */
  1202. /* FALL THROUGH */
  1203. case USB_SPEED_FULL:
  1204. /* EPS 0 means "full" */
  1205. if (type != PIPE_INTERRUPT)
  1206. info1 |= (EHCI_TUNE_RL_TT << 28);
  1207. if (type == PIPE_CONTROL) {
  1208. info1 |= (1 << 27); /* for TT */
  1209. info1 |= 1 << 14; /* toggle from qtd */
  1210. }
  1211. info1 |= maxp << 16;
  1212. info2 |= (EHCI_TUNE_MULT_TT << 30);
  1213. info2 |= urb->dev->ttport << 23;
  1214. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
  1215. break;
  1216. case USB_SPEED_HIGH: /* no TT involved */
  1217. info1 |= (2 << 12); /* EPS "high" */
  1218. if (type == PIPE_CONTROL) {
  1219. info1 |= (EHCI_TUNE_RL_HS << 28);
  1220. info1 |= 64 << 16; /* usb2 fixed maxpacket */
  1221. info1 |= 1 << 14; /* toggle from qtd */
  1222. info2 |= (EHCI_TUNE_MULT_HS << 30);
  1223. } else if (type == PIPE_BULK) {
  1224. info1 |= (EHCI_TUNE_RL_HS << 28);
  1225. info1 |= 512 << 16; /* usb2 fixed maxpacket */
  1226. info2 |= (EHCI_TUNE_MULT_HS << 30);
  1227. } else { /* PIPE_INTERRUPT */
  1228. info1 |= max_packet(maxp) << 16;
  1229. info2 |= hb_mult(maxp) << 30;
  1230. }
  1231. break;
  1232. default:
  1233. dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);
  1234. done:
  1235. qh_put(qh);
  1236. return NULL;
  1237. }
  1238. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
  1239. /* init as live, toggle clear, advance to dummy */
  1240. qh->qh_state = QH_STATE_IDLE;
  1241. qh->hw_info1 = cpu_to_le32(info1);
  1242. qh->hw_info2 = cpu_to_le32(info2);
  1243. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
  1244. qh_refresh(oxu, qh);
  1245. return qh;
  1246. }
  1247. /* Move qh (and its qtds) onto async queue; maybe enable queue.
  1248. */
  1249. static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1250. {
  1251. __le32 dma = QH_NEXT(qh->qh_dma);
  1252. struct ehci_qh *head;
  1253. /* (re)start the async schedule? */
  1254. head = oxu->async;
  1255. timer_action_done(oxu, TIMER_ASYNC_OFF);
  1256. if (!head->qh_next.qh) {
  1257. u32 cmd = readl(&oxu->regs->command);
  1258. if (!(cmd & CMD_ASE)) {
  1259. /* in case a clear of CMD_ASE didn't take yet */
  1260. (void)handshake(oxu, &oxu->regs->status,
  1261. STS_ASS, 0, 150);
  1262. cmd |= CMD_ASE | CMD_RUN;
  1263. writel(cmd, &oxu->regs->command);
  1264. oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
  1265. /* posted write need not be known to HC yet ... */
  1266. }
  1267. }
  1268. /* clear halt and/or toggle; and maybe recover from silicon quirk */
  1269. if (qh->qh_state == QH_STATE_IDLE)
  1270. qh_refresh(oxu, qh);
  1271. /* splice right after start */
  1272. qh->qh_next = head->qh_next;
  1273. qh->hw_next = head->hw_next;
  1274. wmb();
  1275. head->qh_next.qh = qh;
  1276. head->hw_next = dma;
  1277. qh->qh_state = QH_STATE_LINKED;
  1278. /* qtd completions reported later by interrupt */
  1279. }
  1280. #define QH_ADDR_MASK cpu_to_le32(0x7f)
  1281. /*
  1282. * For control/bulk/interrupt, return QH with these TDs appended.
  1283. * Allocates and initializes the QH if necessary.
  1284. * Returns null if it can't allocate a QH it needs to.
  1285. * If the QH has TDs (urbs) already, that's great.
  1286. */
  1287. static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
  1288. struct urb *urb, struct list_head *qtd_list,
  1289. int epnum, void **ptr)
  1290. {
  1291. struct ehci_qh *qh = NULL;
  1292. qh = (struct ehci_qh *) *ptr;
  1293. if (unlikely(qh == NULL)) {
  1294. /* can't sleep here, we have oxu->lock... */
  1295. qh = qh_make(oxu, urb, GFP_ATOMIC);
  1296. *ptr = qh;
  1297. }
  1298. if (likely(qh != NULL)) {
  1299. struct ehci_qtd *qtd;
  1300. if (unlikely(list_empty(qtd_list)))
  1301. qtd = NULL;
  1302. else
  1303. qtd = list_entry(qtd_list->next, struct ehci_qtd,
  1304. qtd_list);
  1305. /* control qh may need patching ... */
  1306. if (unlikely(epnum == 0)) {
  1307. /* usb_reset_device() briefly reverts to address 0 */
  1308. if (usb_pipedevice(urb->pipe) == 0)
  1309. qh->hw_info1 &= ~QH_ADDR_MASK;
  1310. }
  1311. /* just one way to queue requests: swap with the dummy qtd.
  1312. * only hc or qh_refresh() ever modify the overlay.
  1313. */
  1314. if (likely(qtd != NULL)) {
  1315. struct ehci_qtd *dummy;
  1316. dma_addr_t dma;
  1317. __le32 token;
  1318. /* to avoid racing the HC, use the dummy td instead of
  1319. * the first td of our list (becomes new dummy). both
  1320. * tds stay deactivated until we're done, when the
  1321. * HC is allowed to fetch the old dummy (4.10.2).
  1322. */
  1323. token = qtd->hw_token;
  1324. qtd->hw_token = HALT_BIT;
  1325. wmb();
  1326. dummy = qh->dummy;
  1327. dma = dummy->qtd_dma;
  1328. *dummy = *qtd;
  1329. dummy->qtd_dma = dma;
  1330. list_del(&qtd->qtd_list);
  1331. list_add(&dummy->qtd_list, qtd_list);
  1332. list_splice(qtd_list, qh->qtd_list.prev);
  1333. ehci_qtd_init(qtd, qtd->qtd_dma);
  1334. qh->dummy = qtd;
  1335. /* hc must see the new dummy at list end */
  1336. dma = qtd->qtd_dma;
  1337. qtd = list_entry(qh->qtd_list.prev,
  1338. struct ehci_qtd, qtd_list);
  1339. qtd->hw_next = QTD_NEXT(dma);
  1340. /* let the hc process these next qtds */
  1341. dummy->hw_token = (token & ~(0x80));
  1342. wmb();
  1343. dummy->hw_token = token;
  1344. urb->hcpriv = qh_get(qh);
  1345. }
  1346. }
  1347. return qh;
  1348. }
  1349. static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
  1350. struct list_head *qtd_list, gfp_t mem_flags)
  1351. {
  1352. struct ehci_qtd *qtd;
  1353. int epnum;
  1354. unsigned long flags;
  1355. struct ehci_qh *qh = NULL;
  1356. int rc = 0;
  1357. qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
  1358. epnum = urb->ep->desc.bEndpointAddress;
  1359. #ifdef OXU_URB_TRACE
  1360. oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
  1361. __func__, urb->dev->devpath, urb,
  1362. epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
  1363. urb->transfer_buffer_length,
  1364. qtd, urb->ep->hcpriv);
  1365. #endif
  1366. spin_lock_irqsave(&oxu->lock, flags);
  1367. if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
  1368. rc = -ESHUTDOWN;
  1369. goto done;
  1370. }
  1371. qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
  1372. if (unlikely(qh == NULL)) {
  1373. rc = -ENOMEM;
  1374. goto done;
  1375. }
  1376. /* Control/bulk operations through TTs don't need scheduling,
  1377. * the HC and TT handle it when the TT has a buffer ready.
  1378. */
  1379. if (likely(qh->qh_state == QH_STATE_IDLE))
  1380. qh_link_async(oxu, qh_get(qh));
  1381. done:
  1382. spin_unlock_irqrestore(&oxu->lock, flags);
  1383. if (unlikely(qh == NULL))
  1384. qtd_list_free(oxu, urb, qtd_list);
  1385. return rc;
  1386. }
  1387. /* The async qh for the qtds being reclaimed are now unlinked from the HC */
  1388. static void end_unlink_async(struct oxu_hcd *oxu)
  1389. {
  1390. struct ehci_qh *qh = oxu->reclaim;
  1391. struct ehci_qh *next;
  1392. timer_action_done(oxu, TIMER_IAA_WATCHDOG);
  1393. qh->qh_state = QH_STATE_IDLE;
  1394. qh->qh_next.qh = NULL;
  1395. qh_put(qh); /* refcount from reclaim */
  1396. /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
  1397. next = qh->reclaim;
  1398. oxu->reclaim = next;
  1399. oxu->reclaim_ready = 0;
  1400. qh->reclaim = NULL;
  1401. qh_completions(oxu, qh);
  1402. if (!list_empty(&qh->qtd_list)
  1403. && HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1404. qh_link_async(oxu, qh);
  1405. else {
  1406. qh_put(qh); /* refcount from async list */
  1407. /* it's not free to turn the async schedule on/off; leave it
  1408. * active but idle for a while once it empties.
  1409. */
  1410. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
  1411. && oxu->async->qh_next.qh == NULL)
  1412. timer_action(oxu, TIMER_ASYNC_OFF);
  1413. }
  1414. if (next) {
  1415. oxu->reclaim = NULL;
  1416. start_unlink_async(oxu, next);
  1417. }
  1418. }
  1419. /* makes sure the async qh will become idle */
  1420. /* caller must own oxu->lock */
  1421. static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1422. {
  1423. int cmd = readl(&oxu->regs->command);
  1424. struct ehci_qh *prev;
  1425. #ifdef DEBUG
  1426. assert_spin_locked(&oxu->lock);
  1427. if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
  1428. && qh->qh_state != QH_STATE_UNLINK_WAIT))
  1429. BUG();
  1430. #endif
  1431. /* stop async schedule right now? */
  1432. if (unlikely(qh == oxu->async)) {
  1433. /* can't get here without STS_ASS set */
  1434. if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
  1435. && !oxu->reclaim) {
  1436. /* ... and CMD_IAAD clear */
  1437. writel(cmd & ~CMD_ASE, &oxu->regs->command);
  1438. wmb();
  1439. /* handshake later, if we need to */
  1440. timer_action_done(oxu, TIMER_ASYNC_OFF);
  1441. }
  1442. return;
  1443. }
  1444. qh->qh_state = QH_STATE_UNLINK;
  1445. oxu->reclaim = qh = qh_get(qh);
  1446. prev = oxu->async;
  1447. while (prev->qh_next.qh != qh)
  1448. prev = prev->qh_next.qh;
  1449. prev->hw_next = qh->hw_next;
  1450. prev->qh_next = qh->qh_next;
  1451. wmb();
  1452. if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
  1453. /* if (unlikely(qh->reclaim != 0))
  1454. * this will recurse, probably not much
  1455. */
  1456. end_unlink_async(oxu);
  1457. return;
  1458. }
  1459. oxu->reclaim_ready = 0;
  1460. cmd |= CMD_IAAD;
  1461. writel(cmd, &oxu->regs->command);
  1462. (void) readl(&oxu->regs->command);
  1463. timer_action(oxu, TIMER_IAA_WATCHDOG);
  1464. }
  1465. static void scan_async(struct oxu_hcd *oxu)
  1466. {
  1467. struct ehci_qh *qh;
  1468. enum ehci_timer_action action = TIMER_IO_WATCHDOG;
  1469. if (!++(oxu->stamp))
  1470. oxu->stamp++;
  1471. timer_action_done(oxu, TIMER_ASYNC_SHRINK);
  1472. rescan:
  1473. qh = oxu->async->qh_next.qh;
  1474. if (likely(qh != NULL)) {
  1475. do {
  1476. /* clean any finished work for this qh */
  1477. if (!list_empty(&qh->qtd_list)
  1478. && qh->stamp != oxu->stamp) {
  1479. int temp;
  1480. /* unlinks could happen here; completion
  1481. * reporting drops the lock. rescan using
  1482. * the latest schedule, but don't rescan
  1483. * qhs we already finished (no looping).
  1484. */
  1485. qh = qh_get(qh);
  1486. qh->stamp = oxu->stamp;
  1487. temp = qh_completions(oxu, qh);
  1488. qh_put(qh);
  1489. if (temp != 0)
  1490. goto rescan;
  1491. }
  1492. /* unlink idle entries, reducing HC PCI usage as well
  1493. * as HCD schedule-scanning costs. delay for any qh
  1494. * we just scanned, there's a not-unusual case that it
  1495. * doesn't stay idle for long.
  1496. * (plus, avoids some kind of re-activation race.)
  1497. */
  1498. if (list_empty(&qh->qtd_list)) {
  1499. if (qh->stamp == oxu->stamp)
  1500. action = TIMER_ASYNC_SHRINK;
  1501. else if (!oxu->reclaim
  1502. && qh->qh_state == QH_STATE_LINKED)
  1503. start_unlink_async(oxu, qh);
  1504. }
  1505. qh = qh->qh_next.qh;
  1506. } while (qh);
  1507. }
  1508. if (action == TIMER_ASYNC_SHRINK)
  1509. timer_action(oxu, TIMER_ASYNC_SHRINK);
  1510. }
  1511. /*
  1512. * periodic_next_shadow - return "next" pointer on shadow list
  1513. * @periodic: host pointer to qh/itd/sitd
  1514. * @tag: hardware tag for type of this record
  1515. */
  1516. static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
  1517. __le32 tag)
  1518. {
  1519. switch (tag) {
  1520. default:
  1521. case Q_TYPE_QH:
  1522. return &periodic->qh->qh_next;
  1523. }
  1524. }
  1525. /* caller must hold oxu->lock */
  1526. static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
  1527. {
  1528. union ehci_shadow *prev_p = &oxu->pshadow[frame];
  1529. __le32 *hw_p = &oxu->periodic[frame];
  1530. union ehci_shadow here = *prev_p;
  1531. /* find predecessor of "ptr"; hw and shadow lists are in sync */
  1532. while (here.ptr && here.ptr != ptr) {
  1533. prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
  1534. hw_p = here.hw_next;
  1535. here = *prev_p;
  1536. }
  1537. /* an interrupt entry (at list end) could have been shared */
  1538. if (!here.ptr)
  1539. return;
  1540. /* update shadow and hardware lists ... the old "next" pointers
  1541. * from ptr may still be in use, the caller updates them.
  1542. */
  1543. *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
  1544. *hw_p = *here.hw_next;
  1545. }
  1546. /* how many of the uframe's 125 usecs are allocated? */
  1547. static unsigned short periodic_usecs(struct oxu_hcd *oxu,
  1548. unsigned frame, unsigned uframe)
  1549. {
  1550. __le32 *hw_p = &oxu->periodic[frame];
  1551. union ehci_shadow *q = &oxu->pshadow[frame];
  1552. unsigned usecs = 0;
  1553. while (q->ptr) {
  1554. switch (Q_NEXT_TYPE(*hw_p)) {
  1555. case Q_TYPE_QH:
  1556. default:
  1557. /* is it in the S-mask? */
  1558. if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
  1559. usecs += q->qh->usecs;
  1560. /* ... or C-mask? */
  1561. if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
  1562. usecs += q->qh->c_usecs;
  1563. hw_p = &q->qh->hw_next;
  1564. q = &q->qh->qh_next;
  1565. break;
  1566. }
  1567. }
  1568. #ifdef DEBUG
  1569. if (usecs > 100)
  1570. oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
  1571. frame * 8 + uframe, usecs);
  1572. #endif
  1573. return usecs;
  1574. }
  1575. static int enable_periodic(struct oxu_hcd *oxu)
  1576. {
  1577. u32 cmd;
  1578. int status;
  1579. /* did clearing PSE did take effect yet?
  1580. * takes effect only at frame boundaries...
  1581. */
  1582. status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
  1583. if (status != 0) {
  1584. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  1585. usb_hc_died(oxu_to_hcd(oxu));
  1586. return status;
  1587. }
  1588. cmd = readl(&oxu->regs->command) | CMD_PSE;
  1589. writel(cmd, &oxu->regs->command);
  1590. /* posted write ... PSS happens later */
  1591. oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
  1592. /* make sure ehci_work scans these */
  1593. oxu->next_uframe = readl(&oxu->regs->frame_index)
  1594. % (oxu->periodic_size << 3);
  1595. return 0;
  1596. }
  1597. static int disable_periodic(struct oxu_hcd *oxu)
  1598. {
  1599. u32 cmd;
  1600. int status;
  1601. /* did setting PSE not take effect yet?
  1602. * takes effect only at frame boundaries...
  1603. */
  1604. status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
  1605. if (status != 0) {
  1606. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  1607. usb_hc_died(oxu_to_hcd(oxu));
  1608. return status;
  1609. }
  1610. cmd = readl(&oxu->regs->command) & ~CMD_PSE;
  1611. writel(cmd, &oxu->regs->command);
  1612. /* posted write ... */
  1613. oxu->next_uframe = -1;
  1614. return 0;
  1615. }
  1616. /* periodic schedule slots have iso tds (normal or split) first, then a
  1617. * sparse tree for active interrupt transfers.
  1618. *
  1619. * this just links in a qh; caller guarantees uframe masks are set right.
  1620. * no FSTN support (yet; oxu 0.96+)
  1621. */
  1622. static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1623. {
  1624. unsigned i;
  1625. unsigned period = qh->period;
  1626. dev_dbg(&qh->dev->dev,
  1627. "link qh%d-%04x/%p start %d [%d/%d us]\n",
  1628. period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  1629. qh, qh->start, qh->usecs, qh->c_usecs);
  1630. /* high bandwidth, or otherwise every microframe */
  1631. if (period == 0)
  1632. period = 1;
  1633. for (i = qh->start; i < oxu->periodic_size; i += period) {
  1634. union ehci_shadow *prev = &oxu->pshadow[i];
  1635. __le32 *hw_p = &oxu->periodic[i];
  1636. union ehci_shadow here = *prev;
  1637. __le32 type = 0;
  1638. /* skip the iso nodes at list head */
  1639. while (here.ptr) {
  1640. type = Q_NEXT_TYPE(*hw_p);
  1641. if (type == Q_TYPE_QH)
  1642. break;
  1643. prev = periodic_next_shadow(prev, type);
  1644. hw_p = &here.qh->hw_next;
  1645. here = *prev;
  1646. }
  1647. /* sorting each branch by period (slow-->fast)
  1648. * enables sharing interior tree nodes
  1649. */
  1650. while (here.ptr && qh != here.qh) {
  1651. if (qh->period > here.qh->period)
  1652. break;
  1653. prev = &here.qh->qh_next;
  1654. hw_p = &here.qh->hw_next;
  1655. here = *prev;
  1656. }
  1657. /* link in this qh, unless some earlier pass did that */
  1658. if (qh != here.qh) {
  1659. qh->qh_next = here;
  1660. if (here.qh)
  1661. qh->hw_next = *hw_p;
  1662. wmb();
  1663. prev->qh = qh;
  1664. *hw_p = QH_NEXT(qh->qh_dma);
  1665. }
  1666. }
  1667. qh->qh_state = QH_STATE_LINKED;
  1668. qh_get(qh);
  1669. /* update per-qh bandwidth for usbfs */
  1670. oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
  1671. ? ((qh->usecs + qh->c_usecs) / qh->period)
  1672. : (qh->usecs * 8);
  1673. /* maybe enable periodic schedule processing */
  1674. if (!oxu->periodic_sched++)
  1675. return enable_periodic(oxu);
  1676. return 0;
  1677. }
  1678. static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1679. {
  1680. unsigned i;
  1681. unsigned period;
  1682. /* FIXME:
  1683. * IF this isn't high speed
  1684. * and this qh is active in the current uframe
  1685. * (and overlay token SplitXstate is false?)
  1686. * THEN
  1687. * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
  1688. */
  1689. /* high bandwidth, or otherwise part of every microframe */
  1690. period = qh->period;
  1691. if (period == 0)
  1692. period = 1;
  1693. for (i = qh->start; i < oxu->periodic_size; i += period)
  1694. periodic_unlink(oxu, i, qh);
  1695. /* update per-qh bandwidth for usbfs */
  1696. oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
  1697. ? ((qh->usecs + qh->c_usecs) / qh->period)
  1698. : (qh->usecs * 8);
  1699. dev_dbg(&qh->dev->dev,
  1700. "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
  1701. qh->period,
  1702. le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  1703. qh, qh->start, qh->usecs, qh->c_usecs);
  1704. /* qh->qh_next still "live" to HC */
  1705. qh->qh_state = QH_STATE_UNLINK;
  1706. qh->qh_next.ptr = NULL;
  1707. qh_put(qh);
  1708. /* maybe turn off periodic schedule */
  1709. oxu->periodic_sched--;
  1710. if (!oxu->periodic_sched)
  1711. (void) disable_periodic(oxu);
  1712. }
  1713. static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1714. {
  1715. unsigned wait;
  1716. qh_unlink_periodic(oxu, qh);
  1717. /* simple/paranoid: always delay, expecting the HC needs to read
  1718. * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
  1719. * expect khubd to clean up after any CSPLITs we won't issue.
  1720. * active high speed queues may need bigger delays...
  1721. */
  1722. if (list_empty(&qh->qtd_list)
  1723. || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
  1724. wait = 2;
  1725. else
  1726. wait = 55; /* worst case: 3 * 1024 */
  1727. udelay(wait);
  1728. qh->qh_state = QH_STATE_IDLE;
  1729. qh->hw_next = EHCI_LIST_END;
  1730. wmb();
  1731. }
  1732. static int check_period(struct oxu_hcd *oxu,
  1733. unsigned frame, unsigned uframe,
  1734. unsigned period, unsigned usecs)
  1735. {
  1736. int claimed;
  1737. /* complete split running into next frame?
  1738. * given FSTN support, we could sometimes check...
  1739. */
  1740. if (uframe >= 8)
  1741. return 0;
  1742. /*
  1743. * 80% periodic == 100 usec/uframe available
  1744. * convert "usecs we need" to "max already claimed"
  1745. */
  1746. usecs = 100 - usecs;
  1747. /* we "know" 2 and 4 uframe intervals were rejected; so
  1748. * for period 0, check _every_ microframe in the schedule.
  1749. */
  1750. if (unlikely(period == 0)) {
  1751. do {
  1752. for (uframe = 0; uframe < 7; uframe++) {
  1753. claimed = periodic_usecs(oxu, frame, uframe);
  1754. if (claimed > usecs)
  1755. return 0;
  1756. }
  1757. } while ((frame += 1) < oxu->periodic_size);
  1758. /* just check the specified uframe, at that period */
  1759. } else {
  1760. do {
  1761. claimed = periodic_usecs(oxu, frame, uframe);
  1762. if (claimed > usecs)
  1763. return 0;
  1764. } while ((frame += period) < oxu->periodic_size);
  1765. }
  1766. return 1;
  1767. }
  1768. static int check_intr_schedule(struct oxu_hcd *oxu,
  1769. unsigned frame, unsigned uframe,
  1770. const struct ehci_qh *qh, __le32 *c_maskp)
  1771. {
  1772. int retval = -ENOSPC;
  1773. if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
  1774. goto done;
  1775. if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
  1776. goto done;
  1777. if (!qh->c_usecs) {
  1778. retval = 0;
  1779. *c_maskp = 0;
  1780. goto done;
  1781. }
  1782. done:
  1783. return retval;
  1784. }
  1785. /* "first fit" scheduling policy used the first time through,
  1786. * or when the previous schedule slot can't be re-used.
  1787. */
  1788. static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1789. {
  1790. int status;
  1791. unsigned uframe;
  1792. __le32 c_mask;
  1793. unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
  1794. qh_refresh(oxu, qh);
  1795. qh->hw_next = EHCI_LIST_END;
  1796. frame = qh->start;
  1797. /* reuse the previous schedule slots, if we can */
  1798. if (frame < qh->period) {
  1799. uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
  1800. status = check_intr_schedule(oxu, frame, --uframe,
  1801. qh, &c_mask);
  1802. } else {
  1803. uframe = 0;
  1804. c_mask = 0;
  1805. status = -ENOSPC;
  1806. }
  1807. /* else scan the schedule to find a group of slots such that all
  1808. * uframes have enough periodic bandwidth available.
  1809. */
  1810. if (status) {
  1811. /* "normal" case, uframing flexible except with splits */
  1812. if (qh->period) {
  1813. frame = qh->period - 1;
  1814. do {
  1815. for (uframe = 0; uframe < 8; uframe++) {
  1816. status = check_intr_schedule(oxu,
  1817. frame, uframe, qh,
  1818. &c_mask);
  1819. if (status == 0)
  1820. break;
  1821. }
  1822. } while (status && frame--);
  1823. /* qh->period == 0 means every uframe */
  1824. } else {
  1825. frame = 0;
  1826. status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
  1827. }
  1828. if (status)
  1829. goto done;
  1830. qh->start = frame;
  1831. /* reset S-frame and (maybe) C-frame masks */
  1832. qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
  1833. qh->hw_info2 |= qh->period
  1834. ? cpu_to_le32(1 << uframe)
  1835. : cpu_to_le32(QH_SMASK);
  1836. qh->hw_info2 |= c_mask;
  1837. } else
  1838. oxu_dbg(oxu, "reused qh %p schedule\n", qh);
  1839. /* stuff into the periodic schedule */
  1840. status = qh_link_periodic(oxu, qh);
  1841. done:
  1842. return status;
  1843. }
  1844. static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
  1845. struct list_head *qtd_list, gfp_t mem_flags)
  1846. {
  1847. unsigned epnum;
  1848. unsigned long flags;
  1849. struct ehci_qh *qh;
  1850. int status = 0;
  1851. struct list_head empty;
  1852. /* get endpoint and transfer/schedule data */
  1853. epnum = urb->ep->desc.bEndpointAddress;
  1854. spin_lock_irqsave(&oxu->lock, flags);
  1855. if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
  1856. status = -ESHUTDOWN;
  1857. goto done;
  1858. }
  1859. /* get qh and force any scheduling errors */
  1860. INIT_LIST_HEAD(&empty);
  1861. qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
  1862. if (qh == NULL) {
  1863. status = -ENOMEM;
  1864. goto done;
  1865. }
  1866. if (qh->qh_state == QH_STATE_IDLE) {
  1867. status = qh_schedule(oxu, qh);
  1868. if (status != 0)
  1869. goto done;
  1870. }
  1871. /* then queue the urb's tds to the qh */
  1872. qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
  1873. BUG_ON(qh == NULL);
  1874. /* ... update usbfs periodic stats */
  1875. oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
  1876. done:
  1877. spin_unlock_irqrestore(&oxu->lock, flags);
  1878. if (status)
  1879. qtd_list_free(oxu, urb, qtd_list);
  1880. return status;
  1881. }
  1882. static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
  1883. gfp_t mem_flags)
  1884. {
  1885. oxu_dbg(oxu, "iso support is missing!\n");
  1886. return -ENOSYS;
  1887. }
  1888. static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
  1889. gfp_t mem_flags)
  1890. {
  1891. oxu_dbg(oxu, "split iso support is missing!\n");
  1892. return -ENOSYS;
  1893. }
  1894. static void scan_periodic(struct oxu_hcd *oxu)
  1895. {
  1896. unsigned frame, clock, now_uframe, mod;
  1897. unsigned modified;
  1898. mod = oxu->periodic_size << 3;
  1899. /*
  1900. * When running, scan from last scan point up to "now"
  1901. * else clean up by scanning everything that's left.
  1902. * Touches as few pages as possible: cache-friendly.
  1903. */
  1904. now_uframe = oxu->next_uframe;
  1905. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1906. clock = readl(&oxu->regs->frame_index);
  1907. else
  1908. clock = now_uframe + mod - 1;
  1909. clock %= mod;
  1910. for (;;) {
  1911. union ehci_shadow q, *q_p;
  1912. __le32 type, *hw_p;
  1913. unsigned uframes;
  1914. /* don't scan past the live uframe */
  1915. frame = now_uframe >> 3;
  1916. if (frame == (clock >> 3))
  1917. uframes = now_uframe & 0x07;
  1918. else {
  1919. /* safe to scan the whole frame at once */
  1920. now_uframe |= 0x07;
  1921. uframes = 8;
  1922. }
  1923. restart:
  1924. /* scan each element in frame's queue for completions */
  1925. q_p = &oxu->pshadow[frame];
  1926. hw_p = &oxu->periodic[frame];
  1927. q.ptr = q_p->ptr;
  1928. type = Q_NEXT_TYPE(*hw_p);
  1929. modified = 0;
  1930. while (q.ptr != NULL) {
  1931. union ehci_shadow temp;
  1932. int live;
  1933. live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
  1934. switch (type) {
  1935. case Q_TYPE_QH:
  1936. /* handle any completions */
  1937. temp.qh = qh_get(q.qh);
  1938. type = Q_NEXT_TYPE(q.qh->hw_next);
  1939. q = q.qh->qh_next;
  1940. modified = qh_completions(oxu, temp.qh);
  1941. if (unlikely(list_empty(&temp.qh->qtd_list)))
  1942. intr_deschedule(oxu, temp.qh);
  1943. qh_put(temp.qh);
  1944. break;
  1945. default:
  1946. dbg("corrupt type %d frame %d shadow %p",
  1947. type, frame, q.ptr);
  1948. q.ptr = NULL;
  1949. }
  1950. /* assume completion callbacks modify the queue */
  1951. if (unlikely(modified))
  1952. goto restart;
  1953. }
  1954. /* Stop when we catch up to the HC */
  1955. /* FIXME: this assumes we won't get lapped when
  1956. * latencies climb; that should be rare, but...
  1957. * detect it, and just go all the way around.
  1958. * FLR might help detect this case, so long as latencies
  1959. * don't exceed periodic_size msec (default 1.024 sec).
  1960. */
  1961. /* FIXME: likewise assumes HC doesn't halt mid-scan */
  1962. if (now_uframe == clock) {
  1963. unsigned now;
  1964. if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1965. break;
  1966. oxu->next_uframe = now_uframe;
  1967. now = readl(&oxu->regs->frame_index) % mod;
  1968. if (now_uframe == now)
  1969. break;
  1970. /* rescan the rest of this frame, then ... */
  1971. clock = now;
  1972. } else {
  1973. now_uframe++;
  1974. now_uframe %= mod;
  1975. }
  1976. }
  1977. }
  1978. /* On some systems, leaving remote wakeup enabled prevents system shutdown.
  1979. * The firmware seems to think that powering off is a wakeup event!
  1980. * This routine turns off remote wakeup and everything else, on all ports.
  1981. */
  1982. static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
  1983. {
  1984. int port = HCS_N_PORTS(oxu->hcs_params);
  1985. while (port--)
  1986. writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
  1987. }
  1988. static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
  1989. {
  1990. unsigned port;
  1991. if (!HCS_PPC(oxu->hcs_params))
  1992. return;
  1993. oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
  1994. for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
  1995. (void) oxu_hub_control(oxu_to_hcd(oxu),
  1996. is_on ? SetPortFeature : ClearPortFeature,
  1997. USB_PORT_FEAT_POWER,
  1998. port--, NULL, 0);
  1999. msleep(20);
  2000. }
  2001. /* Called from some interrupts, timers, and so on.
  2002. * It calls driver completion functions, after dropping oxu->lock.
  2003. */
  2004. static void ehci_work(struct oxu_hcd *oxu)
  2005. {
  2006. timer_action_done(oxu, TIMER_IO_WATCHDOG);
  2007. if (oxu->reclaim_ready)
  2008. end_unlink_async(oxu);
  2009. /* another CPU may drop oxu->lock during a schedule scan while
  2010. * it reports urb completions. this flag guards against bogus
  2011. * attempts at re-entrant schedule scanning.
  2012. */
  2013. if (oxu->scanning)
  2014. return;
  2015. oxu->scanning = 1;
  2016. scan_async(oxu);
  2017. if (oxu->next_uframe != -1)
  2018. scan_periodic(oxu);
  2019. oxu->scanning = 0;
  2020. /* the IO watchdog guards against hardware or driver bugs that
  2021. * misplace IRQs, and should let us run completely without IRQs.
  2022. * such lossage has been observed on both VT6202 and VT8235.
  2023. */
  2024. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
  2025. (oxu->async->qh_next.ptr != NULL ||
  2026. oxu->periodic_sched != 0))
  2027. timer_action(oxu, TIMER_IO_WATCHDOG);
  2028. }
  2029. static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  2030. {
  2031. /* if we need to use IAA and it's busy, defer */
  2032. if (qh->qh_state == QH_STATE_LINKED
  2033. && oxu->reclaim
  2034. && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
  2035. struct ehci_qh *last;
  2036. for (last = oxu->reclaim;
  2037. last->reclaim;
  2038. last = last->reclaim)
  2039. continue;
  2040. qh->qh_state = QH_STATE_UNLINK_WAIT;
  2041. last->reclaim = qh;
  2042. /* bypass IAA if the hc can't care */
  2043. } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
  2044. end_unlink_async(oxu);
  2045. /* something else might have unlinked the qh by now */
  2046. if (qh->qh_state == QH_STATE_LINKED)
  2047. start_unlink_async(oxu, qh);
  2048. }
  2049. /*
  2050. * USB host controller methods
  2051. */
  2052. static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
  2053. {
  2054. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2055. u32 status, pcd_status = 0;
  2056. int bh;
  2057. spin_lock(&oxu->lock);
  2058. status = readl(&oxu->regs->status);
  2059. /* e.g. cardbus physical eject */
  2060. if (status == ~(u32) 0) {
  2061. oxu_dbg(oxu, "device removed\n");
  2062. goto dead;
  2063. }
  2064. /* Shared IRQ? */
  2065. status &= INTR_MASK;
  2066. if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
  2067. spin_unlock(&oxu->lock);
  2068. return IRQ_NONE;
  2069. }
  2070. /* clear (just) interrupts */
  2071. writel(status, &oxu->regs->status);
  2072. readl(&oxu->regs->command); /* unblock posted write */
  2073. bh = 0;
  2074. #ifdef OXU_VERBOSE_DEBUG
  2075. /* unrequested/ignored: Frame List Rollover */
  2076. dbg_status(oxu, "irq", status);
  2077. #endif
  2078. /* INT, ERR, and IAA interrupt rates can be throttled */
  2079. /* normal [4.15.1.2] or error [4.15.1.1] completion */
  2080. if (likely((status & (STS_INT|STS_ERR)) != 0))
  2081. bh = 1;
  2082. /* complete the unlinking of some qh [4.15.2.3] */
  2083. if (status & STS_IAA) {
  2084. oxu->reclaim_ready = 1;
  2085. bh = 1;
  2086. }
  2087. /* remote wakeup [4.3.1] */
  2088. if (status & STS_PCD) {
  2089. unsigned i = HCS_N_PORTS(oxu->hcs_params);
  2090. pcd_status = status;
  2091. /* resume root hub? */
  2092. if (!(readl(&oxu->regs->command) & CMD_RUN))
  2093. usb_hcd_resume_root_hub(hcd);
  2094. while (i--) {
  2095. int pstatus = readl(&oxu->regs->port_status[i]);
  2096. if (pstatus & PORT_OWNER)
  2097. continue;
  2098. if (!(pstatus & PORT_RESUME)
  2099. || oxu->reset_done[i] != 0)
  2100. continue;
  2101. /* start 20 msec resume signaling from this port,
  2102. * and make khubd collect PORT_STAT_C_SUSPEND to
  2103. * stop that signaling.
  2104. */
  2105. oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
  2106. oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
  2107. mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
  2108. }
  2109. }
  2110. /* PCI errors [4.15.2.4] */
  2111. if (unlikely((status & STS_FATAL) != 0)) {
  2112. /* bogus "fatal" IRQs appear on some chips... why? */
  2113. status = readl(&oxu->regs->status);
  2114. dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
  2115. dbg_status(oxu, "fatal", status);
  2116. if (status & STS_HALT) {
  2117. oxu_err(oxu, "fatal error\n");
  2118. dead:
  2119. ehci_reset(oxu);
  2120. writel(0, &oxu->regs->configured_flag);
  2121. usb_hc_died(hcd);
  2122. /* generic layer kills/unlinks all urbs, then
  2123. * uses oxu_stop to clean up the rest
  2124. */
  2125. bh = 1;
  2126. }
  2127. }
  2128. if (bh)
  2129. ehci_work(oxu);
  2130. spin_unlock(&oxu->lock);
  2131. if (pcd_status & STS_PCD)
  2132. usb_hcd_poll_rh_status(hcd);
  2133. return IRQ_HANDLED;
  2134. }
  2135. static irqreturn_t oxu_irq(struct usb_hcd *hcd)
  2136. {
  2137. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2138. int ret = IRQ_HANDLED;
  2139. u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
  2140. u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
  2141. /* Disable all interrupt */
  2142. oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
  2143. if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
  2144. (!oxu->is_otg && (status & OXU_USBSPHI)))
  2145. oxu210_hcd_irq(hcd);
  2146. else
  2147. ret = IRQ_NONE;
  2148. /* Enable all interrupt back */
  2149. oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
  2150. return ret;
  2151. }
  2152. static void oxu_watchdog(unsigned long param)
  2153. {
  2154. struct oxu_hcd *oxu = (struct oxu_hcd *) param;
  2155. unsigned long flags;
  2156. spin_lock_irqsave(&oxu->lock, flags);
  2157. /* lost IAA irqs wedge things badly; seen with a vt8235 */
  2158. if (oxu->reclaim) {
  2159. u32 status = readl(&oxu->regs->status);
  2160. if (status & STS_IAA) {
  2161. oxu_vdbg(oxu, "lost IAA\n");
  2162. writel(STS_IAA, &oxu->regs->status);
  2163. oxu->reclaim_ready = 1;
  2164. }
  2165. }
  2166. /* stop async processing after it's idled a bit */
  2167. if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
  2168. start_unlink_async(oxu, oxu->async);
  2169. /* oxu could run by timer, without IRQs ... */
  2170. ehci_work(oxu);
  2171. spin_unlock_irqrestore(&oxu->lock, flags);
  2172. }
  2173. /* One-time init, only for memory state.
  2174. */
  2175. static int oxu_hcd_init(struct usb_hcd *hcd)
  2176. {
  2177. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2178. u32 temp;
  2179. int retval;
  2180. u32 hcc_params;
  2181. spin_lock_init(&oxu->lock);
  2182. init_timer(&oxu->watchdog);
  2183. oxu->watchdog.function = oxu_watchdog;
  2184. oxu->watchdog.data = (unsigned long) oxu;
  2185. /*
  2186. * hw default: 1K periodic list heads, one per frame.
  2187. * periodic_size can shrink by USBCMD update if hcc_params allows.
  2188. */
  2189. oxu->periodic_size = DEFAULT_I_TDPS;
  2190. retval = ehci_mem_init(oxu, GFP_KERNEL);
  2191. if (retval < 0)
  2192. return retval;
  2193. /* controllers may cache some of the periodic schedule ... */
  2194. hcc_params = readl(&oxu->caps->hcc_params);
  2195. if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
  2196. oxu->i_thresh = 8;
  2197. else /* N microframes cached */
  2198. oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
  2199. oxu->reclaim = NULL;
  2200. oxu->reclaim_ready = 0;
  2201. oxu->next_uframe = -1;
  2202. /*
  2203. * dedicate a qh for the async ring head, since we couldn't unlink
  2204. * a 'real' qh without stopping the async schedule [4.8]. use it
  2205. * as the 'reclamation list head' too.
  2206. * its dummy is used in hw_alt_next of many tds, to prevent the qh
  2207. * from automatically advancing to the next td after short reads.
  2208. */
  2209. oxu->async->qh_next.qh = NULL;
  2210. oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
  2211. oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
  2212. oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
  2213. oxu->async->hw_qtd_next = EHCI_LIST_END;
  2214. oxu->async->qh_state = QH_STATE_LINKED;
  2215. oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
  2216. /* clear interrupt enables, set irq latency */
  2217. if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
  2218. log2_irq_thresh = 0;
  2219. temp = 1 << (16 + log2_irq_thresh);
  2220. if (HCC_CANPARK(hcc_params)) {
  2221. /* HW default park == 3, on hardware that supports it (like
  2222. * NVidia and ALI silicon), maximizes throughput on the async
  2223. * schedule by avoiding QH fetches between transfers.
  2224. *
  2225. * With fast usb storage devices and NForce2, "park" seems to
  2226. * make problems: throughput reduction (!), data errors...
  2227. */
  2228. if (park) {
  2229. park = min(park, (unsigned) 3);
  2230. temp |= CMD_PARK;
  2231. temp |= park << 8;
  2232. }
  2233. oxu_dbg(oxu, "park %d\n", park);
  2234. }
  2235. if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
  2236. /* periodic schedule size can be smaller than default */
  2237. temp &= ~(3 << 2);
  2238. temp |= (EHCI_TUNE_FLS << 2);
  2239. }
  2240. oxu->command = temp;
  2241. return 0;
  2242. }
  2243. /* Called during probe() after chip reset completes.
  2244. */
  2245. static int oxu_reset(struct usb_hcd *hcd)
  2246. {
  2247. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2248. int ret;
  2249. spin_lock_init(&oxu->mem_lock);
  2250. INIT_LIST_HEAD(&oxu->urb_list);
  2251. oxu->urb_len = 0;
  2252. /* FIMXE */
  2253. hcd->self.controller->dma_mask = NULL;
  2254. if (oxu->is_otg) {
  2255. oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
  2256. oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
  2257. HC_LENGTH(readl(&oxu->caps->hc_capbase));
  2258. oxu->mem = hcd->regs + OXU_SPH_MEM;
  2259. } else {
  2260. oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
  2261. oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
  2262. HC_LENGTH(readl(&oxu->caps->hc_capbase));
  2263. oxu->mem = hcd->regs + OXU_OTG_MEM;
  2264. }
  2265. oxu->hcs_params = readl(&oxu->caps->hcs_params);
  2266. oxu->sbrn = 0x20;
  2267. ret = oxu_hcd_init(hcd);
  2268. if (ret)
  2269. return ret;
  2270. return 0;
  2271. }
  2272. static int oxu_run(struct usb_hcd *hcd)
  2273. {
  2274. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2275. int retval;
  2276. u32 temp, hcc_params;
  2277. hcd->uses_new_polling = 1;
  2278. /* EHCI spec section 4.1 */
  2279. retval = ehci_reset(oxu);
  2280. if (retval != 0) {
  2281. ehci_mem_cleanup(oxu);
  2282. return retval;
  2283. }
  2284. writel(oxu->periodic_dma, &oxu->regs->frame_list);
  2285. writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
  2286. /* hcc_params controls whether oxu->regs->segment must (!!!)
  2287. * be used; it constrains QH/ITD/SITD and QTD locations.
  2288. * pci_pool consistent memory always uses segment zero.
  2289. * streaming mappings for I/O buffers, like pci_map_single(),
  2290. * can return segments above 4GB, if the device allows.
  2291. *
  2292. * NOTE: the dma mask is visible through dma_supported(), so
  2293. * drivers can pass this info along ... like NETIF_F_HIGHDMA,
  2294. * Scsi_Host.highmem_io, and so forth. It's readonly to all
  2295. * host side drivers though.
  2296. */
  2297. hcc_params = readl(&oxu->caps->hcc_params);
  2298. if (HCC_64BIT_ADDR(hcc_params))
  2299. writel(0, &oxu->regs->segment);
  2300. oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
  2301. CMD_ASE | CMD_RESET);
  2302. oxu->command |= CMD_RUN;
  2303. writel(oxu->command, &oxu->regs->command);
  2304. dbg_cmd(oxu, "init", oxu->command);
  2305. /*
  2306. * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
  2307. * are explicitly handed to companion controller(s), so no TT is
  2308. * involved with the root hub. (Except where one is integrated,
  2309. * and there's no companion controller unless maybe for USB OTG.)
  2310. */
  2311. hcd->state = HC_STATE_RUNNING;
  2312. writel(FLAG_CF, &oxu->regs->configured_flag);
  2313. readl(&oxu->regs->command); /* unblock posted writes */
  2314. temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
  2315. oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
  2316. ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
  2317. temp >> 8, temp & 0xff, DRIVER_VERSION,
  2318. ignore_oc ? ", overcurrent ignored" : "");
  2319. writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
  2320. return 0;
  2321. }
  2322. static void oxu_stop(struct usb_hcd *hcd)
  2323. {
  2324. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2325. /* Turn off port power on all root hub ports. */
  2326. ehci_port_power(oxu, 0);
  2327. /* no more interrupts ... */
  2328. del_timer_sync(&oxu->watchdog);
  2329. spin_lock_irq(&oxu->lock);
  2330. if (HC_IS_RUNNING(hcd->state))
  2331. ehci_quiesce(oxu);
  2332. ehci_reset(oxu);
  2333. writel(0, &oxu->regs->intr_enable);
  2334. spin_unlock_irq(&oxu->lock);
  2335. /* let companion controllers work when we aren't */
  2336. writel(0, &oxu->regs->configured_flag);
  2337. /* root hub is shut down separately (first, when possible) */
  2338. spin_lock_irq(&oxu->lock);
  2339. if (oxu->async)
  2340. ehci_work(oxu);
  2341. spin_unlock_irq(&oxu->lock);
  2342. ehci_mem_cleanup(oxu);
  2343. dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
  2344. }
  2345. /* Kick in for silicon on any bus (not just pci, etc).
  2346. * This forcibly disables dma and IRQs, helping kexec and other cases
  2347. * where the next system software may expect clean state.
  2348. */
  2349. static void oxu_shutdown(struct usb_hcd *hcd)
  2350. {
  2351. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2352. (void) ehci_halt(oxu);
  2353. ehci_turn_off_all_ports(oxu);
  2354. /* make BIOS/etc use companion controller during reboot */
  2355. writel(0, &oxu->regs->configured_flag);
  2356. /* unblock posted writes */
  2357. readl(&oxu->regs->configured_flag);
  2358. }
  2359. /* Non-error returns are a promise to giveback() the urb later
  2360. * we drop ownership so next owner (or urb unlink) can get it
  2361. *
  2362. * urb + dev is in hcd.self.controller.urb_list
  2363. * we're queueing TDs onto software and hardware lists
  2364. *
  2365. * hcd-specific init for hcpriv hasn't been done yet
  2366. *
  2367. * NOTE: control, bulk, and interrupt share the same code to append TDs
  2368. * to a (possibly active) QH, and the same QH scanning code.
  2369. */
  2370. static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2371. gfp_t mem_flags)
  2372. {
  2373. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2374. struct list_head qtd_list;
  2375. INIT_LIST_HEAD(&qtd_list);
  2376. switch (usb_pipetype(urb->pipe)) {
  2377. case PIPE_CONTROL:
  2378. case PIPE_BULK:
  2379. default:
  2380. if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
  2381. return -ENOMEM;
  2382. return submit_async(oxu, urb, &qtd_list, mem_flags);
  2383. case PIPE_INTERRUPT:
  2384. if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
  2385. return -ENOMEM;
  2386. return intr_submit(oxu, urb, &qtd_list, mem_flags);
  2387. case PIPE_ISOCHRONOUS:
  2388. if (urb->dev->speed == USB_SPEED_HIGH)
  2389. return itd_submit(oxu, urb, mem_flags);
  2390. else
  2391. return sitd_submit(oxu, urb, mem_flags);
  2392. }
  2393. }
  2394. /* This function is responsible for breaking URBs with big data size
  2395. * into smaller size and processing small urbs in sequence.
  2396. */
  2397. static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2398. gfp_t mem_flags)
  2399. {
  2400. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2401. int num, rem;
  2402. int transfer_buffer_length;
  2403. void *transfer_buffer;
  2404. struct urb *murb;
  2405. int i, ret;
  2406. /* If not bulk pipe just enqueue the URB */
  2407. if (!usb_pipebulk(urb->pipe))
  2408. return __oxu_urb_enqueue(hcd, urb, mem_flags);
  2409. /* Otherwise we should verify the USB transfer buffer size! */
  2410. transfer_buffer = urb->transfer_buffer;
  2411. transfer_buffer_length = urb->transfer_buffer_length;
  2412. num = urb->transfer_buffer_length / 4096;
  2413. rem = urb->transfer_buffer_length % 4096;
  2414. if (rem != 0)
  2415. num++;
  2416. /* If URB is smaller than 4096 bytes just enqueue it! */
  2417. if (num == 1)
  2418. return __oxu_urb_enqueue(hcd, urb, mem_flags);
  2419. /* Ok, we have more job to do! :) */
  2420. for (i = 0; i < num - 1; i++) {
  2421. /* Get free micro URB poll till a free urb is received */
  2422. do {
  2423. murb = (struct urb *) oxu_murb_alloc(oxu);
  2424. if (!murb)
  2425. schedule();
  2426. } while (!murb);
  2427. /* Coping the urb */
  2428. memcpy(murb, urb, sizeof(struct urb));
  2429. murb->transfer_buffer_length = 4096;
  2430. murb->transfer_buffer = transfer_buffer + i * 4096;
  2431. /* Null pointer for the encodes that this is a micro urb */
  2432. murb->complete = NULL;
  2433. ((struct oxu_murb *) murb)->main = urb;
  2434. ((struct oxu_murb *) murb)->last = 0;
  2435. /* This loop is to guarantee urb to be processed when there's
  2436. * not enough resources at a particular time by retrying.
  2437. */
  2438. do {
  2439. ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
  2440. if (ret)
  2441. schedule();
  2442. } while (ret);
  2443. }
  2444. /* Last urb requires special handling */
  2445. /* Get free micro URB poll till a free urb is received */
  2446. do {
  2447. murb = (struct urb *) oxu_murb_alloc(oxu);
  2448. if (!murb)
  2449. schedule();
  2450. } while (!murb);
  2451. /* Coping the urb */
  2452. memcpy(murb, urb, sizeof(struct urb));
  2453. murb->transfer_buffer_length = rem > 0 ? rem : 4096;
  2454. murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
  2455. /* Null pointer for the encodes that this is a micro urb */
  2456. murb->complete = NULL;
  2457. ((struct oxu_murb *) murb)->main = urb;
  2458. ((struct oxu_murb *) murb)->last = 1;
  2459. do {
  2460. ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
  2461. if (ret)
  2462. schedule();
  2463. } while (ret);
  2464. return ret;
  2465. }
  2466. /* Remove from hardware lists.
  2467. * Completions normally happen asynchronously
  2468. */
  2469. static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  2470. {
  2471. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2472. struct ehci_qh *qh;
  2473. unsigned long flags;
  2474. spin_lock_irqsave(&oxu->lock, flags);
  2475. switch (usb_pipetype(urb->pipe)) {
  2476. case PIPE_CONTROL:
  2477. case PIPE_BULK:
  2478. default:
  2479. qh = (struct ehci_qh *) urb->hcpriv;
  2480. if (!qh)
  2481. break;
  2482. unlink_async(oxu, qh);
  2483. break;
  2484. case PIPE_INTERRUPT:
  2485. qh = (struct ehci_qh *) urb->hcpriv;
  2486. if (!qh)
  2487. break;
  2488. switch (qh->qh_state) {
  2489. case QH_STATE_LINKED:
  2490. intr_deschedule(oxu, qh);
  2491. /* FALL THROUGH */
  2492. case QH_STATE_IDLE:
  2493. qh_completions(oxu, qh);
  2494. break;
  2495. default:
  2496. oxu_dbg(oxu, "bogus qh %p state %d\n",
  2497. qh, qh->qh_state);
  2498. goto done;
  2499. }
  2500. /* reschedule QH iff another request is queued */
  2501. if (!list_empty(&qh->qtd_list)
  2502. && HC_IS_RUNNING(hcd->state)) {
  2503. int status;
  2504. status = qh_schedule(oxu, qh);
  2505. spin_unlock_irqrestore(&oxu->lock, flags);
  2506. if (status != 0) {
  2507. /* shouldn't happen often, but ...
  2508. * FIXME kill those tds' urbs
  2509. */
  2510. err("can't reschedule qh %p, err %d",
  2511. qh, status);
  2512. }
  2513. return status;
  2514. }
  2515. break;
  2516. }
  2517. done:
  2518. spin_unlock_irqrestore(&oxu->lock, flags);
  2519. return 0;
  2520. }
  2521. /* Bulk qh holds the data toggle */
  2522. static void oxu_endpoint_disable(struct usb_hcd *hcd,
  2523. struct usb_host_endpoint *ep)
  2524. {
  2525. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2526. unsigned long flags;
  2527. struct ehci_qh *qh, *tmp;
  2528. /* ASSERT: any requests/urbs are being unlinked */
  2529. /* ASSERT: nobody can be submitting urbs for this any more */
  2530. rescan:
  2531. spin_lock_irqsave(&oxu->lock, flags);
  2532. qh = ep->hcpriv;
  2533. if (!qh)
  2534. goto done;
  2535. /* endpoints can be iso streams. for now, we don't
  2536. * accelerate iso completions ... so spin a while.
  2537. */
  2538. if (qh->hw_info1 == 0) {
  2539. oxu_vdbg(oxu, "iso delay\n");
  2540. goto idle_timeout;
  2541. }
  2542. if (!HC_IS_RUNNING(hcd->state))
  2543. qh->qh_state = QH_STATE_IDLE;
  2544. switch (qh->qh_state) {
  2545. case QH_STATE_LINKED:
  2546. for (tmp = oxu->async->qh_next.qh;
  2547. tmp && tmp != qh;
  2548. tmp = tmp->qh_next.qh)
  2549. continue;
  2550. /* periodic qh self-unlinks on empty */
  2551. if (!tmp)
  2552. goto nogood;
  2553. unlink_async(oxu, qh);
  2554. /* FALL THROUGH */
  2555. case QH_STATE_UNLINK: /* wait for hw to finish? */
  2556. idle_timeout:
  2557. spin_unlock_irqrestore(&oxu->lock, flags);
  2558. schedule_timeout_uninterruptible(1);
  2559. goto rescan;
  2560. case QH_STATE_IDLE: /* fully unlinked */
  2561. if (list_empty(&qh->qtd_list)) {
  2562. qh_put(qh);
  2563. break;
  2564. }
  2565. /* else FALL THROUGH */
  2566. default:
  2567. nogood:
  2568. /* caller was supposed to have unlinked any requests;
  2569. * that's not our job. just leak this memory.
  2570. */
  2571. oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
  2572. qh, ep->desc.bEndpointAddress, qh->qh_state,
  2573. list_empty(&qh->qtd_list) ? "" : "(has tds)");
  2574. break;
  2575. }
  2576. ep->hcpriv = NULL;
  2577. done:
  2578. spin_unlock_irqrestore(&oxu->lock, flags);
  2579. }
  2580. static int oxu_get_frame(struct usb_hcd *hcd)
  2581. {
  2582. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2583. return (readl(&oxu->regs->frame_index) >> 3) %
  2584. oxu->periodic_size;
  2585. }
  2586. /* Build "status change" packet (one or two bytes) from HC registers */
  2587. static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
  2588. {
  2589. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2590. u32 temp, mask, status = 0;
  2591. int ports, i, retval = 1;
  2592. unsigned long flags;
  2593. /* if !USB_SUSPEND, root hub timers won't get shut down ... */
  2594. if (!HC_IS_RUNNING(hcd->state))
  2595. return 0;
  2596. /* init status to no-changes */
  2597. buf[0] = 0;
  2598. ports = HCS_N_PORTS(oxu->hcs_params);
  2599. if (ports > 7) {
  2600. buf[1] = 0;
  2601. retval++;
  2602. }
  2603. /* Some boards (mostly VIA?) report bogus overcurrent indications,
  2604. * causing massive log spam unless we completely ignore them. It
  2605. * may be relevant that VIA VT8235 controllers, where PORT_POWER is
  2606. * always set, seem to clear PORT_OCC and PORT_CSC when writing to
  2607. * PORT_POWER; that's surprising, but maybe within-spec.
  2608. */
  2609. if (!ignore_oc)
  2610. mask = PORT_CSC | PORT_PEC | PORT_OCC;
  2611. else
  2612. mask = PORT_CSC | PORT_PEC;
  2613. /* no hub change reports (bit 0) for now (power, ...) */
  2614. /* port N changes (bit N)? */
  2615. spin_lock_irqsave(&oxu->lock, flags);
  2616. for (i = 0; i < ports; i++) {
  2617. temp = readl(&oxu->regs->port_status[i]);
  2618. /*
  2619. * Return status information even for ports with OWNER set.
  2620. * Otherwise khubd wouldn't see the disconnect event when a
  2621. * high-speed device is switched over to the companion
  2622. * controller by the user.
  2623. */
  2624. if (!(temp & PORT_CONNECT))
  2625. oxu->reset_done[i] = 0;
  2626. if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
  2627. time_after_eq(jiffies, oxu->reset_done[i]))) {
  2628. if (i < 7)
  2629. buf[0] |= 1 << (i + 1);
  2630. else
  2631. buf[1] |= 1 << (i - 7);
  2632. status = STS_PCD;
  2633. }
  2634. }
  2635. /* FIXME autosuspend idle root hubs */
  2636. spin_unlock_irqrestore(&oxu->lock, flags);
  2637. return status ? retval : 0;
  2638. }
  2639. /* Returns the speed of a device attached to a port on the root hub. */
  2640. static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
  2641. unsigned int portsc)
  2642. {
  2643. switch ((portsc >> 26) & 3) {
  2644. case 0:
  2645. return 0;
  2646. case 1:
  2647. return USB_PORT_STAT_LOW_SPEED;
  2648. case 2:
  2649. default:
  2650. return USB_PORT_STAT_HIGH_SPEED;
  2651. }
  2652. }
  2653. #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
  2654. static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
  2655. u16 wValue, u16 wIndex, char *buf, u16 wLength)
  2656. {
  2657. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2658. int ports = HCS_N_PORTS(oxu->hcs_params);
  2659. u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
  2660. u32 temp, status;
  2661. unsigned long flags;
  2662. int retval = 0;
  2663. unsigned selector;
  2664. /*
  2665. * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
  2666. * HCS_INDICATOR may say we can change LEDs to off/amber/green.
  2667. * (track current state ourselves) ... blink for diagnostics,
  2668. * power, "this is the one", etc. EHCI spec supports this.
  2669. */
  2670. spin_lock_irqsave(&oxu->lock, flags);
  2671. switch (typeReq) {
  2672. case ClearHubFeature:
  2673. switch (wValue) {
  2674. case C_HUB_LOCAL_POWER:
  2675. case C_HUB_OVER_CURRENT:
  2676. /* no hub-wide feature/status flags */
  2677. break;
  2678. default:
  2679. goto error;
  2680. }
  2681. break;
  2682. case ClearPortFeature:
  2683. if (!wIndex || wIndex > ports)
  2684. goto error;
  2685. wIndex--;
  2686. temp = readl(status_reg);
  2687. /*
  2688. * Even if OWNER is set, so the port is owned by the
  2689. * companion controller, khubd needs to be able to clear
  2690. * the port-change status bits (especially
  2691. * USB_PORT_STAT_C_CONNECTION).
  2692. */
  2693. switch (wValue) {
  2694. case USB_PORT_FEAT_ENABLE:
  2695. writel(temp & ~PORT_PE, status_reg);
  2696. break;
  2697. case USB_PORT_FEAT_C_ENABLE:
  2698. writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
  2699. break;
  2700. case USB_PORT_FEAT_SUSPEND:
  2701. if (temp & PORT_RESET)
  2702. goto error;
  2703. if (temp & PORT_SUSPEND) {
  2704. if ((temp & PORT_PE) == 0)
  2705. goto error;
  2706. /* resume signaling for 20 msec */
  2707. temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
  2708. writel(temp | PORT_RESUME, status_reg);
  2709. oxu->reset_done[wIndex] = jiffies
  2710. + msecs_to_jiffies(20);
  2711. }
  2712. break;
  2713. case USB_PORT_FEAT_C_SUSPEND:
  2714. /* we auto-clear this feature */
  2715. break;
  2716. case USB_PORT_FEAT_POWER:
  2717. if (HCS_PPC(oxu->hcs_params))
  2718. writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
  2719. status_reg);
  2720. break;
  2721. case USB_PORT_FEAT_C_CONNECTION:
  2722. writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
  2723. break;
  2724. case USB_PORT_FEAT_C_OVER_CURRENT:
  2725. writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
  2726. break;
  2727. case USB_PORT_FEAT_C_RESET:
  2728. /* GetPortStatus clears reset */
  2729. break;
  2730. default:
  2731. goto error;
  2732. }
  2733. readl(&oxu->regs->command); /* unblock posted write */
  2734. break;
  2735. case GetHubDescriptor:
  2736. ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
  2737. buf);
  2738. break;
  2739. case GetHubStatus:
  2740. /* no hub-wide feature/status flags */
  2741. memset(buf, 0, 4);
  2742. break;
  2743. case GetPortStatus:
  2744. if (!wIndex || wIndex > ports)
  2745. goto error;
  2746. wIndex--;
  2747. status = 0;
  2748. temp = readl(status_reg);
  2749. /* wPortChange bits */
  2750. if (temp & PORT_CSC)
  2751. status |= USB_PORT_STAT_C_CONNECTION << 16;
  2752. if (temp & PORT_PEC)
  2753. status |= USB_PORT_STAT_C_ENABLE << 16;
  2754. if ((temp & PORT_OCC) && !ignore_oc)
  2755. status |= USB_PORT_STAT_C_OVERCURRENT << 16;
  2756. /* whoever resumes must GetPortStatus to complete it!! */
  2757. if (temp & PORT_RESUME) {
  2758. /* Remote Wakeup received? */
  2759. if (!oxu->reset_done[wIndex]) {
  2760. /* resume signaling for 20 msec */
  2761. oxu->reset_done[wIndex] = jiffies
  2762. + msecs_to_jiffies(20);
  2763. /* check the port again */
  2764. mod_timer(&oxu_to_hcd(oxu)->rh_timer,
  2765. oxu->reset_done[wIndex]);
  2766. }
  2767. /* resume completed? */
  2768. else if (time_after_eq(jiffies,
  2769. oxu->reset_done[wIndex])) {
  2770. status |= USB_PORT_STAT_C_SUSPEND << 16;
  2771. oxu->reset_done[wIndex] = 0;
  2772. /* stop resume signaling */
  2773. temp = readl(status_reg);
  2774. writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
  2775. status_reg);
  2776. retval = handshake(oxu, status_reg,
  2777. PORT_RESUME, 0, 2000 /* 2msec */);
  2778. if (retval != 0) {
  2779. oxu_err(oxu,
  2780. "port %d resume error %d\n",
  2781. wIndex + 1, retval);
  2782. goto error;
  2783. }
  2784. temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
  2785. }
  2786. }
  2787. /* whoever resets must GetPortStatus to complete it!! */
  2788. if ((temp & PORT_RESET)
  2789. && time_after_eq(jiffies,
  2790. oxu->reset_done[wIndex])) {
  2791. status |= USB_PORT_STAT_C_RESET << 16;
  2792. oxu->reset_done[wIndex] = 0;
  2793. /* force reset to complete */
  2794. writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
  2795. status_reg);
  2796. /* REVISIT: some hardware needs 550+ usec to clear
  2797. * this bit; seems too long to spin routinely...
  2798. */
  2799. retval = handshake(oxu, status_reg,
  2800. PORT_RESET, 0, 750);
  2801. if (retval != 0) {
  2802. oxu_err(oxu, "port %d reset error %d\n",
  2803. wIndex + 1, retval);
  2804. goto error;
  2805. }
  2806. /* see what we found out */
  2807. temp = check_reset_complete(oxu, wIndex, status_reg,
  2808. readl(status_reg));
  2809. }
  2810. /* transfer dedicated ports to the companion hc */
  2811. if ((temp & PORT_CONNECT) &&
  2812. test_bit(wIndex, &oxu->companion_ports)) {
  2813. temp &= ~PORT_RWC_BITS;
  2814. temp |= PORT_OWNER;
  2815. writel(temp, status_reg);
  2816. oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
  2817. temp = readl(status_reg);
  2818. }
  2819. /*
  2820. * Even if OWNER is set, there's no harm letting khubd
  2821. * see the wPortStatus values (they should all be 0 except
  2822. * for PORT_POWER anyway).
  2823. */
  2824. if (temp & PORT_CONNECT) {
  2825. status |= USB_PORT_STAT_CONNECTION;
  2826. /* status may be from integrated TT */
  2827. status |= oxu_port_speed(oxu, temp);
  2828. }
  2829. if (temp & PORT_PE)
  2830. status |= USB_PORT_STAT_ENABLE;
  2831. if (temp & (PORT_SUSPEND|PORT_RESUME))
  2832. status |= USB_PORT_STAT_SUSPEND;
  2833. if (temp & PORT_OC)
  2834. status |= USB_PORT_STAT_OVERCURRENT;
  2835. if (temp & PORT_RESET)
  2836. status |= USB_PORT_STAT_RESET;
  2837. if (temp & PORT_POWER)
  2838. status |= USB_PORT_STAT_POWER;
  2839. #ifndef OXU_VERBOSE_DEBUG
  2840. if (status & ~0xffff) /* only if wPortChange is interesting */
  2841. #endif
  2842. dbg_port(oxu, "GetStatus", wIndex + 1, temp);
  2843. put_unaligned(cpu_to_le32(status), (__le32 *) buf);
  2844. break;
  2845. case SetHubFeature:
  2846. switch (wValue) {
  2847. case C_HUB_LOCAL_POWER:
  2848. case C_HUB_OVER_CURRENT:
  2849. /* no hub-wide feature/status flags */
  2850. break;
  2851. default:
  2852. goto error;
  2853. }
  2854. break;
  2855. case SetPortFeature:
  2856. selector = wIndex >> 8;
  2857. wIndex &= 0xff;
  2858. if (!wIndex || wIndex > ports)
  2859. goto error;
  2860. wIndex--;
  2861. temp = readl(status_reg);
  2862. if (temp & PORT_OWNER)
  2863. break;
  2864. temp &= ~PORT_RWC_BITS;
  2865. switch (wValue) {
  2866. case USB_PORT_FEAT_SUSPEND:
  2867. if ((temp & PORT_PE) == 0
  2868. || (temp & PORT_RESET) != 0)
  2869. goto error;
  2870. if (device_may_wakeup(&hcd->self.root_hub->dev))
  2871. temp |= PORT_WAKE_BITS;
  2872. writel(temp | PORT_SUSPEND, status_reg);
  2873. break;
  2874. case USB_PORT_FEAT_POWER:
  2875. if (HCS_PPC(oxu->hcs_params))
  2876. writel(temp | PORT_POWER, status_reg);
  2877. break;
  2878. case USB_PORT_FEAT_RESET:
  2879. if (temp & PORT_RESUME)
  2880. goto error;
  2881. /* line status bits may report this as low speed,
  2882. * which can be fine if this root hub has a
  2883. * transaction translator built in.
  2884. */
  2885. oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
  2886. temp |= PORT_RESET;
  2887. temp &= ~PORT_PE;
  2888. /*
  2889. * caller must wait, then call GetPortStatus
  2890. * usb 2.0 spec says 50 ms resets on root
  2891. */
  2892. oxu->reset_done[wIndex] = jiffies
  2893. + msecs_to_jiffies(50);
  2894. writel(temp, status_reg);
  2895. break;
  2896. /* For downstream facing ports (these): one hub port is put
  2897. * into test mode according to USB2 11.24.2.13, then the hub
  2898. * must be reset (which for root hub now means rmmod+modprobe,
  2899. * or else system reboot). See EHCI 2.3.9 and 4.14 for info
  2900. * about the EHCI-specific stuff.
  2901. */
  2902. case USB_PORT_FEAT_TEST:
  2903. if (!selector || selector > 5)
  2904. goto error;
  2905. ehci_quiesce(oxu);
  2906. ehci_halt(oxu);
  2907. temp |= selector << 16;
  2908. writel(temp, status_reg);
  2909. break;
  2910. default:
  2911. goto error;
  2912. }
  2913. readl(&oxu->regs->command); /* unblock posted writes */
  2914. break;
  2915. default:
  2916. error:
  2917. /* "stall" on error */
  2918. retval = -EPIPE;
  2919. }
  2920. spin_unlock_irqrestore(&oxu->lock, flags);
  2921. return retval;
  2922. }
  2923. #ifdef CONFIG_PM
  2924. static int oxu_bus_suspend(struct usb_hcd *hcd)
  2925. {
  2926. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2927. int port;
  2928. int mask;
  2929. oxu_dbg(oxu, "suspend root hub\n");
  2930. if (time_before(jiffies, oxu->next_statechange))
  2931. msleep(5);
  2932. port = HCS_N_PORTS(oxu->hcs_params);
  2933. spin_lock_irq(&oxu->lock);
  2934. /* stop schedules, clean any completed work */
  2935. if (HC_IS_RUNNING(hcd->state)) {
  2936. ehci_quiesce(oxu);
  2937. hcd->state = HC_STATE_QUIESCING;
  2938. }
  2939. oxu->command = readl(&oxu->regs->command);
  2940. if (oxu->reclaim)
  2941. oxu->reclaim_ready = 1;
  2942. ehci_work(oxu);
  2943. /* Unlike other USB host controller types, EHCI doesn't have
  2944. * any notion of "global" or bus-wide suspend. The driver has
  2945. * to manually suspend all the active unsuspended ports, and
  2946. * then manually resume them in the bus_resume() routine.
  2947. */
  2948. oxu->bus_suspended = 0;
  2949. while (port--) {
  2950. u32 __iomem *reg = &oxu->regs->port_status[port];
  2951. u32 t1 = readl(reg) & ~PORT_RWC_BITS;
  2952. u32 t2 = t1;
  2953. /* keep track of which ports we suspend */
  2954. if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
  2955. !(t1 & PORT_SUSPEND)) {
  2956. t2 |= PORT_SUSPEND;
  2957. set_bit(port, &oxu->bus_suspended);
  2958. }
  2959. /* enable remote wakeup on all ports */
  2960. if (device_may_wakeup(&hcd->self.root_hub->dev))
  2961. t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
  2962. else
  2963. t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
  2964. if (t1 != t2) {
  2965. oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
  2966. port + 1, t1, t2);
  2967. writel(t2, reg);
  2968. }
  2969. }
  2970. /* turn off now-idle HC */
  2971. del_timer_sync(&oxu->watchdog);
  2972. ehci_halt(oxu);
  2973. hcd->state = HC_STATE_SUSPENDED;
  2974. /* allow remote wakeup */
  2975. mask = INTR_MASK;
  2976. if (!device_may_wakeup(&hcd->self.root_hub->dev))
  2977. mask &= ~STS_PCD;
  2978. writel(mask, &oxu->regs->intr_enable);
  2979. readl(&oxu->regs->intr_enable);
  2980. oxu->next_statechange = jiffies + msecs_to_jiffies(10);
  2981. spin_unlock_irq(&oxu->lock);
  2982. return 0;
  2983. }
  2984. /* Caller has locked the root hub, and should reset/reinit on error */
  2985. static int oxu_bus_resume(struct usb_hcd *hcd)
  2986. {
  2987. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2988. u32 temp;
  2989. int i;
  2990. if (time_before(jiffies, oxu->next_statechange))
  2991. msleep(5);
  2992. spin_lock_irq(&oxu->lock);
  2993. /* Ideally and we've got a real resume here, and no port's power
  2994. * was lost. (For PCI, that means Vaux was maintained.) But we
  2995. * could instead be restoring a swsusp snapshot -- so that BIOS was
  2996. * the last user of the controller, not reset/pm hardware keeping
  2997. * state we gave to it.
  2998. */
  2999. temp = readl(&oxu->regs->intr_enable);
  3000. oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
  3001. /* at least some APM implementations will try to deliver
  3002. * IRQs right away, so delay them until we're ready.
  3003. */
  3004. writel(0, &oxu->regs->intr_enable);
  3005. /* re-init operational registers */
  3006. writel(0, &oxu->regs->segment);
  3007. writel(oxu->periodic_dma, &oxu->regs->frame_list);
  3008. writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
  3009. /* restore CMD_RUN, framelist size, and irq threshold */
  3010. writel(oxu->command, &oxu->regs->command);
  3011. /* Some controller/firmware combinations need a delay during which
  3012. * they set up the port statuses. See Bugzilla #8190. */
  3013. mdelay(8);
  3014. /* manually resume the ports we suspended during bus_suspend() */
  3015. i = HCS_N_PORTS(oxu->hcs_params);
  3016. while (i--) {
  3017. temp = readl(&oxu->regs->port_status[i]);
  3018. temp &= ~(PORT_RWC_BITS
  3019. | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
  3020. if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
  3021. oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
  3022. temp |= PORT_RESUME;
  3023. }
  3024. writel(temp, &oxu->regs->port_status[i]);
  3025. }
  3026. i = HCS_N_PORTS(oxu->hcs_params);
  3027. mdelay(20);
  3028. while (i--) {
  3029. temp = readl(&oxu->regs->port_status[i]);
  3030. if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
  3031. temp &= ~(PORT_RWC_BITS | PORT_RESUME);
  3032. writel(temp, &oxu->regs->port_status[i]);
  3033. oxu_vdbg(oxu, "resumed port %d\n", i + 1);
  3034. }
  3035. }
  3036. (void) readl(&oxu->regs->command);
  3037. /* maybe re-activate the schedule(s) */
  3038. temp = 0;
  3039. if (oxu->async->qh_next.qh)
  3040. temp |= CMD_ASE;
  3041. if (oxu->periodic_sched)
  3042. temp |= CMD_PSE;
  3043. if (temp) {
  3044. oxu->command |= temp;
  3045. writel(oxu->command, &oxu->regs->command);
  3046. }
  3047. oxu->next_statechange = jiffies + msecs_to_jiffies(5);
  3048. hcd->state = HC_STATE_RUNNING;
  3049. /* Now we can safely re-enable irqs */
  3050. writel(INTR_MASK, &oxu->regs->intr_enable);
  3051. spin_unlock_irq(&oxu->lock);
  3052. return 0;
  3053. }
  3054. #else
  3055. static int oxu_bus_suspend(struct usb_hcd *hcd)
  3056. {
  3057. return 0;
  3058. }
  3059. static int oxu_bus_resume(struct usb_hcd *hcd)
  3060. {
  3061. return 0;
  3062. }
  3063. #endif /* CONFIG_PM */
  3064. static const struct hc_driver oxu_hc_driver = {
  3065. .description = "oxu210hp_hcd",
  3066. .product_desc = "oxu210hp HCD",
  3067. .hcd_priv_size = sizeof(struct oxu_hcd),
  3068. /*
  3069. * Generic hardware linkage
  3070. */
  3071. .irq = oxu_irq,
  3072. .flags = HCD_MEMORY | HCD_USB2,
  3073. /*
  3074. * Basic lifecycle operations
  3075. */
  3076. .reset = oxu_reset,
  3077. .start = oxu_run,
  3078. .stop = oxu_stop,
  3079. .shutdown = oxu_shutdown,
  3080. /*
  3081. * Managing i/o requests and associated device resources
  3082. */
  3083. .urb_enqueue = oxu_urb_enqueue,
  3084. .urb_dequeue = oxu_urb_dequeue,
  3085. .endpoint_disable = oxu_endpoint_disable,
  3086. /*
  3087. * Scheduling support
  3088. */
  3089. .get_frame_number = oxu_get_frame,
  3090. /*
  3091. * Root hub support
  3092. */
  3093. .hub_status_data = oxu_hub_status_data,
  3094. .hub_control = oxu_hub_control,
  3095. .bus_suspend = oxu_bus_suspend,
  3096. .bus_resume = oxu_bus_resume,
  3097. };
  3098. /*
  3099. * Module stuff
  3100. */
  3101. static void oxu_configuration(struct platform_device *pdev, void *base)
  3102. {
  3103. u32 tmp;
  3104. /* Initialize top level registers.
  3105. * First write ever
  3106. */
  3107. oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
  3108. oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
  3109. oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
  3110. tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
  3111. oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
  3112. oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
  3113. OXU_COMPARATOR | OXU_ASO_OP);
  3114. tmp = oxu_readl(base, OXU_CLKCTRL_SET);
  3115. oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
  3116. /* Clear all top interrupt enable */
  3117. oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
  3118. /* Clear all top interrupt status */
  3119. oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
  3120. /* Enable all needed top interrupt except OTG SPH core */
  3121. oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
  3122. }
  3123. static int oxu_verify_id(struct platform_device *pdev, void *base)
  3124. {
  3125. u32 id;
  3126. static const char * const bo[] = {
  3127. "reserved",
  3128. "128-pin LQFP",
  3129. "84-pin TFBGA",
  3130. "reserved",
  3131. };
  3132. /* Read controller signature register to find a match */
  3133. id = oxu_readl(base, OXU_DEVICEID);
  3134. dev_info(&pdev->dev, "device ID %x\n", id);
  3135. if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
  3136. return -1;
  3137. dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
  3138. id >> OXU_REV_SHIFT,
  3139. bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
  3140. (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
  3141. (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
  3142. return 0;
  3143. }
  3144. static const struct hc_driver oxu_hc_driver;
  3145. static struct usb_hcd *oxu_create(struct platform_device *pdev,
  3146. unsigned long memstart, unsigned long memlen,
  3147. void *base, int irq, int otg)
  3148. {
  3149. struct device *dev = &pdev->dev;
  3150. struct usb_hcd *hcd;
  3151. struct oxu_hcd *oxu;
  3152. int ret;
  3153. /* Set endian mode and host mode */
  3154. oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
  3155. OXU_USBMODE,
  3156. OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
  3157. hcd = usb_create_hcd(&oxu_hc_driver, dev,
  3158. otg ? "oxu210hp_otg" : "oxu210hp_sph");
  3159. if (!hcd)
  3160. return ERR_PTR(-ENOMEM);
  3161. hcd->rsrc_start = memstart;
  3162. hcd->rsrc_len = memlen;
  3163. hcd->regs = base;
  3164. hcd->irq = irq;
  3165. hcd->state = HC_STATE_HALT;
  3166. oxu = hcd_to_oxu(hcd);
  3167. oxu->is_otg = otg;
  3168. ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
  3169. if (ret < 0)
  3170. return ERR_PTR(ret);
  3171. return hcd;
  3172. }
  3173. static int oxu_init(struct platform_device *pdev,
  3174. unsigned long memstart, unsigned long memlen,
  3175. void *base, int irq)
  3176. {
  3177. struct oxu_info *info = platform_get_drvdata(pdev);
  3178. struct usb_hcd *hcd;
  3179. int ret;
  3180. /* First time configuration at start up */
  3181. oxu_configuration(pdev, base);
  3182. ret = oxu_verify_id(pdev, base);
  3183. if (ret) {
  3184. dev_err(&pdev->dev, "no devices found!\n");
  3185. return -ENODEV;
  3186. }
  3187. /* Create the OTG controller */
  3188. hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
  3189. if (IS_ERR(hcd)) {
  3190. dev_err(&pdev->dev, "cannot create OTG controller!\n");
  3191. ret = PTR_ERR(hcd);
  3192. goto error_create_otg;
  3193. }
  3194. info->hcd[0] = hcd;
  3195. /* Create the SPH host controller */
  3196. hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
  3197. if (IS_ERR(hcd)) {
  3198. dev_err(&pdev->dev, "cannot create SPH controller!\n");
  3199. ret = PTR_ERR(hcd);
  3200. goto error_create_sph;
  3201. }
  3202. info->hcd[1] = hcd;
  3203. oxu_writel(base, OXU_CHIPIRQEN_SET,
  3204. oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
  3205. return 0;
  3206. error_create_sph:
  3207. usb_remove_hcd(info->hcd[0]);
  3208. usb_put_hcd(info->hcd[0]);
  3209. error_create_otg:
  3210. return ret;
  3211. }
  3212. static int oxu_drv_probe(struct platform_device *pdev)
  3213. {
  3214. struct resource *res;
  3215. void *base;
  3216. unsigned long memstart, memlen;
  3217. int irq, ret;
  3218. struct oxu_info *info;
  3219. if (usb_disabled())
  3220. return -ENODEV;
  3221. /*
  3222. * Get the platform resources
  3223. */
  3224. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  3225. if (!res) {
  3226. dev_err(&pdev->dev,
  3227. "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
  3228. return -ENODEV;
  3229. }
  3230. irq = res->start;
  3231. dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
  3232. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3233. if (!res) {
  3234. dev_err(&pdev->dev, "no registers address! Check %s setup!\n",
  3235. dev_name(&pdev->dev));
  3236. return -ENODEV;
  3237. }
  3238. memstart = res->start;
  3239. memlen = resource_size(res);
  3240. dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
  3241. if (!request_mem_region(memstart, memlen,
  3242. oxu_hc_driver.description)) {
  3243. dev_dbg(&pdev->dev, "memory area already in use\n");
  3244. return -EBUSY;
  3245. }
  3246. ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
  3247. if (ret) {
  3248. dev_err(&pdev->dev, "error setting irq type\n");
  3249. ret = -EFAULT;
  3250. goto error_set_irq_type;
  3251. }
  3252. base = ioremap(memstart, memlen);
  3253. if (!base) {
  3254. dev_dbg(&pdev->dev, "error mapping memory\n");
  3255. ret = -EFAULT;
  3256. goto error_ioremap;
  3257. }
  3258. /* Allocate a driver data struct to hold useful info for both
  3259. * SPH & OTG devices
  3260. */
  3261. info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL);
  3262. if (!info) {
  3263. dev_dbg(&pdev->dev, "error allocating memory\n");
  3264. ret = -EFAULT;
  3265. goto error_alloc;
  3266. }
  3267. platform_set_drvdata(pdev, info);
  3268. ret = oxu_init(pdev, memstart, memlen, base, irq);
  3269. if (ret < 0) {
  3270. dev_dbg(&pdev->dev, "cannot init USB devices\n");
  3271. goto error_init;
  3272. }
  3273. dev_info(&pdev->dev, "devices enabled and running\n");
  3274. platform_set_drvdata(pdev, info);
  3275. return 0;
  3276. error_init:
  3277. kfree(info);
  3278. platform_set_drvdata(pdev, NULL);
  3279. error_alloc:
  3280. iounmap(base);
  3281. error_set_irq_type:
  3282. error_ioremap:
  3283. release_mem_region(memstart, memlen);
  3284. dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
  3285. return ret;
  3286. }
  3287. static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
  3288. {
  3289. usb_remove_hcd(hcd);
  3290. usb_put_hcd(hcd);
  3291. }
  3292. static int oxu_drv_remove(struct platform_device *pdev)
  3293. {
  3294. struct oxu_info *info = platform_get_drvdata(pdev);
  3295. unsigned long memstart = info->hcd[0]->rsrc_start,
  3296. memlen = info->hcd[0]->rsrc_len;
  3297. void *base = info->hcd[0]->regs;
  3298. oxu_remove(pdev, info->hcd[0]);
  3299. oxu_remove(pdev, info->hcd[1]);
  3300. iounmap(base);
  3301. release_mem_region(memstart, memlen);
  3302. kfree(info);
  3303. platform_set_drvdata(pdev, NULL);
  3304. return 0;
  3305. }
  3306. static void oxu_drv_shutdown(struct platform_device *pdev)
  3307. {
  3308. oxu_drv_remove(pdev);
  3309. }
  3310. #if 0
  3311. /* FIXME: TODO */
  3312. static int oxu_drv_suspend(struct device *dev)
  3313. {
  3314. struct platform_device *pdev = to_platform_device(dev);
  3315. struct usb_hcd *hcd = dev_get_drvdata(dev);
  3316. return 0;
  3317. }
  3318. static int oxu_drv_resume(struct device *dev)
  3319. {
  3320. struct platform_device *pdev = to_platform_device(dev);
  3321. struct usb_hcd *hcd = dev_get_drvdata(dev);
  3322. return 0;
  3323. }
  3324. #else
  3325. #define oxu_drv_suspend NULL
  3326. #define oxu_drv_resume NULL
  3327. #endif
  3328. static struct platform_driver oxu_driver = {
  3329. .probe = oxu_drv_probe,
  3330. .remove = oxu_drv_remove,
  3331. .shutdown = oxu_drv_shutdown,
  3332. .suspend = oxu_drv_suspend,
  3333. .resume = oxu_drv_resume,
  3334. .driver = {
  3335. .name = "oxu210hp-hcd",
  3336. .bus = &platform_bus_type
  3337. }
  3338. };
  3339. module_platform_driver(oxu_driver);
  3340. MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
  3341. MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
  3342. MODULE_LICENSE("GPL");