ci13xxx_udc.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784
  1. /*
  2. * ci13xxx_udc.c - MIPS USB IP core family device controller
  3. *
  4. * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
  5. *
  6. * Author: David Lopo
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. /*
  13. * Description: MIPS USB IP core family device controller
  14. * Currently it only supports IP part number CI13412
  15. *
  16. * This driver is composed of several blocks:
  17. * - HW: hardware interface
  18. * - DBG: debug facilities (optional)
  19. * - UTIL: utilities
  20. * - ISR: interrupts handling
  21. * - ENDPT: endpoint operations (Gadget API)
  22. * - GADGET: gadget operations (Gadget API)
  23. * - BUS: bus glue code, bus abstraction layer
  24. *
  25. * Compile Options
  26. * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
  27. * - STALL_IN: non-empty bulk-in pipes cannot be halted
  28. * if defined mass storage compliance succeeds but with warnings
  29. * => case 4: Hi > Dn
  30. * => case 5: Hi > Di
  31. * => case 8: Hi <> Do
  32. * if undefined usbtest 13 fails
  33. * - TRACE: enable function tracing (depends on DEBUG)
  34. *
  35. * Main Features
  36. * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
  37. * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
  38. * - Normal & LPM support
  39. *
  40. * USBTEST Report
  41. * - OK: 0-12, 13 (STALL_IN defined) & 14
  42. * - Not Supported: 15 & 16 (ISO)
  43. *
  44. * TODO List
  45. * - OTG
  46. * - Isochronous & Interrupt Traffic
  47. * - Handle requests which spawns into several TDs
  48. * - GET_STATUS(device) - always reports 0
  49. * - Gadget API (majority of optional features)
  50. * - Suspend & Remote Wakeup
  51. */
  52. #include <linux/delay.h>
  53. #include <linux/device.h>
  54. #include <linux/dmapool.h>
  55. #include <linux/dma-mapping.h>
  56. #include <linux/init.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/io.h>
  59. #include <linux/irq.h>
  60. #include <linux/kernel.h>
  61. #include <linux/slab.h>
  62. #include <linux/pm_runtime.h>
  63. #include <linux/usb/ch9.h>
  64. #include <linux/usb/gadget.h>
  65. #include <linux/usb/otg.h>
  66. #include "ci13xxx_udc.h"
  67. /******************************************************************************
  68. * DEFINE
  69. *****************************************************************************/
  70. /* ctrl register bank access */
  71. static DEFINE_SPINLOCK(udc_lock);
  72. /* control endpoint description */
  73. static const struct usb_endpoint_descriptor
  74. ctrl_endpt_desc = {
  75. .bLength = USB_DT_ENDPOINT_SIZE,
  76. .bDescriptorType = USB_DT_ENDPOINT,
  77. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  78. .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
  79. };
  80. /* UDC descriptor */
  81. static struct ci13xxx *_udc;
  82. /* Interrupt statistics */
  83. #define ISR_MASK 0x1F
  84. static struct {
  85. u32 test;
  86. u32 ui;
  87. u32 uei;
  88. u32 pci;
  89. u32 uri;
  90. u32 sli;
  91. u32 none;
  92. struct {
  93. u32 cnt;
  94. u32 buf[ISR_MASK+1];
  95. u32 idx;
  96. } hndl;
  97. } isr_statistics;
  98. /**
  99. * ffs_nr: find first (least significant) bit set
  100. * @x: the word to search
  101. *
  102. * This function returns bit number (instead of position)
  103. */
  104. static int ffs_nr(u32 x)
  105. {
  106. int n = ffs(x);
  107. return n ? n-1 : 32;
  108. }
  109. /******************************************************************************
  110. * HW block
  111. *****************************************************************************/
  112. /* register bank descriptor */
  113. static struct {
  114. unsigned lpm; /* is LPM? */
  115. void __iomem *abs; /* bus map offset */
  116. void __iomem *cap; /* bus map offset + CAP offset + CAP data */
  117. size_t size; /* bank size */
  118. } hw_bank;
  119. /* MSM specific */
  120. #define ABS_AHBBURST (0x0090UL)
  121. #define ABS_AHBMODE (0x0098UL)
  122. /* UDC register map */
  123. #define ABS_CAPLENGTH (0x100UL)
  124. #define ABS_HCCPARAMS (0x108UL)
  125. #define ABS_DCCPARAMS (0x124UL)
  126. #define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL)
  127. /* offset to CAPLENTGH (addr + data) */
  128. #define CAP_USBCMD (0x000UL)
  129. #define CAP_USBSTS (0x004UL)
  130. #define CAP_USBINTR (0x008UL)
  131. #define CAP_DEVICEADDR (0x014UL)
  132. #define CAP_ENDPTLISTADDR (0x018UL)
  133. #define CAP_PORTSC (0x044UL)
  134. #define CAP_DEVLC (0x084UL)
  135. #define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL)
  136. #define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
  137. #define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL)
  138. #define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL)
  139. #define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL)
  140. #define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
  141. #define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
  142. #define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
  143. /* maximum number of enpoints: valid only after hw_device_reset() */
  144. static unsigned hw_ep_max;
  145. /**
  146. * hw_ep_bit: calculates the bit number
  147. * @num: endpoint number
  148. * @dir: endpoint direction
  149. *
  150. * This function returns bit number
  151. */
  152. static inline int hw_ep_bit(int num, int dir)
  153. {
  154. return num + (dir ? 16 : 0);
  155. }
  156. /**
  157. * hw_aread: reads from register bitfield
  158. * @addr: address relative to bus map
  159. * @mask: bitfield mask
  160. *
  161. * This function returns register bitfield data
  162. */
  163. static u32 hw_aread(u32 addr, u32 mask)
  164. {
  165. return ioread32(addr + hw_bank.abs) & mask;
  166. }
  167. /**
  168. * hw_awrite: writes to register bitfield
  169. * @addr: address relative to bus map
  170. * @mask: bitfield mask
  171. * @data: new data
  172. */
  173. static void hw_awrite(u32 addr, u32 mask, u32 data)
  174. {
  175. iowrite32(hw_aread(addr, ~mask) | (data & mask),
  176. addr + hw_bank.abs);
  177. }
  178. /**
  179. * hw_cread: reads from register bitfield
  180. * @addr: address relative to CAP offset plus content
  181. * @mask: bitfield mask
  182. *
  183. * This function returns register bitfield data
  184. */
  185. static u32 hw_cread(u32 addr, u32 mask)
  186. {
  187. return ioread32(addr + hw_bank.cap) & mask;
  188. }
  189. /**
  190. * hw_cwrite: writes to register bitfield
  191. * @addr: address relative to CAP offset plus content
  192. * @mask: bitfield mask
  193. * @data: new data
  194. */
  195. static void hw_cwrite(u32 addr, u32 mask, u32 data)
  196. {
  197. iowrite32(hw_cread(addr, ~mask) | (data & mask),
  198. addr + hw_bank.cap);
  199. }
  200. /**
  201. * hw_ctest_and_clear: tests & clears register bitfield
  202. * @addr: address relative to CAP offset plus content
  203. * @mask: bitfield mask
  204. *
  205. * This function returns register bitfield data
  206. */
  207. static u32 hw_ctest_and_clear(u32 addr, u32 mask)
  208. {
  209. u32 reg = hw_cread(addr, mask);
  210. iowrite32(reg, addr + hw_bank.cap);
  211. return reg;
  212. }
  213. /**
  214. * hw_ctest_and_write: tests & writes register bitfield
  215. * @addr: address relative to CAP offset plus content
  216. * @mask: bitfield mask
  217. * @data: new data
  218. *
  219. * This function returns register bitfield data
  220. */
  221. static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
  222. {
  223. u32 reg = hw_cread(addr, ~0);
  224. iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
  225. return (reg & mask) >> ffs_nr(mask);
  226. }
  227. static int hw_device_init(void __iomem *base)
  228. {
  229. u32 reg;
  230. /* bank is a module variable */
  231. hw_bank.abs = base;
  232. hw_bank.cap = hw_bank.abs;
  233. hw_bank.cap += ABS_CAPLENGTH;
  234. hw_bank.cap += ioread8(hw_bank.cap);
  235. reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
  236. hw_bank.lpm = reg;
  237. hw_bank.size = hw_bank.cap - hw_bank.abs;
  238. hw_bank.size += CAP_LAST;
  239. hw_bank.size /= sizeof(u32);
  240. reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
  241. if (reg == 0 || reg > ENDPT_MAX)
  242. return -ENODEV;
  243. hw_ep_max = reg; /* cache hw ENDPT_MAX */
  244. /* setup lock mode ? */
  245. /* ENDPTSETUPSTAT is '0' by default */
  246. /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
  247. return 0;
  248. }
  249. /**
  250. * hw_device_reset: resets chip (execute without interruption)
  251. * @base: register base address
  252. *
  253. * This function returns an error code
  254. */
  255. static int hw_device_reset(struct ci13xxx *udc)
  256. {
  257. /* should flush & stop before reset */
  258. hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
  259. hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
  260. hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
  261. while (hw_cread(CAP_USBCMD, USBCMD_RST))
  262. udelay(10); /* not RTOS friendly */
  263. if (udc->udc_driver->notify_event)
  264. udc->udc_driver->notify_event(udc,
  265. CI13XXX_CONTROLLER_RESET_EVENT);
  266. if (udc->udc_driver->flags && CI13XXX_DISABLE_STREAMING)
  267. hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
  268. /* USBMODE should be configured step by step */
  269. hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
  270. hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
  271. hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */
  272. if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
  273. pr_err("cannot enter in device mode");
  274. pr_err("lpm = %i", hw_bank.lpm);
  275. return -ENODEV;
  276. }
  277. return 0;
  278. }
  279. /**
  280. * hw_device_state: enables/disables interrupts & starts/stops device (execute
  281. * without interruption)
  282. * @dma: 0 => disable, !0 => enable and set dma engine
  283. *
  284. * This function returns an error code
  285. */
  286. static int hw_device_state(u32 dma)
  287. {
  288. if (dma) {
  289. hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
  290. /* interrupt, error, port change, reset, sleep/suspend */
  291. hw_cwrite(CAP_USBINTR, ~0,
  292. USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
  293. hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
  294. } else {
  295. hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
  296. hw_cwrite(CAP_USBINTR, ~0, 0);
  297. }
  298. return 0;
  299. }
  300. /**
  301. * hw_ep_flush: flush endpoint fifo (execute without interruption)
  302. * @num: endpoint number
  303. * @dir: endpoint direction
  304. *
  305. * This function returns an error code
  306. */
  307. static int hw_ep_flush(int num, int dir)
  308. {
  309. int n = hw_ep_bit(num, dir);
  310. do {
  311. /* flush any pending transfer */
  312. hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
  313. while (hw_cread(CAP_ENDPTFLUSH, BIT(n)))
  314. cpu_relax();
  315. } while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
  316. return 0;
  317. }
  318. /**
  319. * hw_ep_disable: disables endpoint (execute without interruption)
  320. * @num: endpoint number
  321. * @dir: endpoint direction
  322. *
  323. * This function returns an error code
  324. */
  325. static int hw_ep_disable(int num, int dir)
  326. {
  327. hw_ep_flush(num, dir);
  328. hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
  329. dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
  330. return 0;
  331. }
  332. /**
  333. * hw_ep_enable: enables endpoint (execute without interruption)
  334. * @num: endpoint number
  335. * @dir: endpoint direction
  336. * @type: endpoint type
  337. *
  338. * This function returns an error code
  339. */
  340. static int hw_ep_enable(int num, int dir, int type)
  341. {
  342. u32 mask, data;
  343. if (dir) {
  344. mask = ENDPTCTRL_TXT; /* type */
  345. data = type << ffs_nr(mask);
  346. mask |= ENDPTCTRL_TXS; /* unstall */
  347. mask |= ENDPTCTRL_TXR; /* reset data toggle */
  348. data |= ENDPTCTRL_TXR;
  349. mask |= ENDPTCTRL_TXE; /* enable */
  350. data |= ENDPTCTRL_TXE;
  351. } else {
  352. mask = ENDPTCTRL_RXT; /* type */
  353. data = type << ffs_nr(mask);
  354. mask |= ENDPTCTRL_RXS; /* unstall */
  355. mask |= ENDPTCTRL_RXR; /* reset data toggle */
  356. data |= ENDPTCTRL_RXR;
  357. mask |= ENDPTCTRL_RXE; /* enable */
  358. data |= ENDPTCTRL_RXE;
  359. }
  360. hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
  361. return 0;
  362. }
  363. /**
  364. * hw_ep_get_halt: return endpoint halt status
  365. * @num: endpoint number
  366. * @dir: endpoint direction
  367. *
  368. * This function returns 1 if endpoint halted
  369. */
  370. static int hw_ep_get_halt(int num, int dir)
  371. {
  372. u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
  373. return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
  374. }
  375. /**
  376. * hw_ep_is_primed: test if endpoint is primed (execute without interruption)
  377. * @num: endpoint number
  378. * @dir: endpoint direction
  379. *
  380. * This function returns true if endpoint primed
  381. */
  382. static int hw_ep_is_primed(int num, int dir)
  383. {
  384. u32 reg = hw_cread(CAP_ENDPTPRIME, ~0) | hw_cread(CAP_ENDPTSTAT, ~0);
  385. return test_bit(hw_ep_bit(num, dir), (void *)&reg);
  386. }
  387. /**
  388. * hw_test_and_clear_setup_status: test & clear setup status (execute without
  389. * interruption)
  390. * @n: bit number (endpoint)
  391. *
  392. * This function returns setup status
  393. */
  394. static int hw_test_and_clear_setup_status(int n)
  395. {
  396. return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
  397. }
  398. /**
  399. * hw_ep_prime: primes endpoint (execute without interruption)
  400. * @num: endpoint number
  401. * @dir: endpoint direction
  402. * @is_ctrl: true if control endpoint
  403. *
  404. * This function returns an error code
  405. */
  406. static int hw_ep_prime(int num, int dir, int is_ctrl)
  407. {
  408. int n = hw_ep_bit(num, dir);
  409. /* the caller should flush first */
  410. if (hw_ep_is_primed(num, dir))
  411. return -EBUSY;
  412. if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
  413. return -EAGAIN;
  414. hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
  415. while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
  416. cpu_relax();
  417. if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
  418. return -EAGAIN;
  419. /* status shoult be tested according with manual but it doesn't work */
  420. return 0;
  421. }
  422. /**
  423. * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
  424. * without interruption)
  425. * @num: endpoint number
  426. * @dir: endpoint direction
  427. * @value: true => stall, false => unstall
  428. *
  429. * This function returns an error code
  430. */
  431. static int hw_ep_set_halt(int num, int dir, int value)
  432. {
  433. if (value != 0 && value != 1)
  434. return -EINVAL;
  435. do {
  436. u32 addr = CAP_ENDPTCTRL + num * sizeof(u32);
  437. u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
  438. u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
  439. /* data toggle - reserved for EP0 but it's in ESS */
  440. hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
  441. } while (value != hw_ep_get_halt(num, dir));
  442. return 0;
  443. }
  444. /**
  445. * hw_intr_clear: disables interrupt & clears interrupt status (execute without
  446. * interruption)
  447. * @n: interrupt bit
  448. *
  449. * This function returns an error code
  450. */
  451. static int hw_intr_clear(int n)
  452. {
  453. if (n >= REG_BITS)
  454. return -EINVAL;
  455. hw_cwrite(CAP_USBINTR, BIT(n), 0);
  456. hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
  457. return 0;
  458. }
  459. /**
  460. * hw_intr_force: enables interrupt & forces interrupt status (execute without
  461. * interruption)
  462. * @n: interrupt bit
  463. *
  464. * This function returns an error code
  465. */
  466. static int hw_intr_force(int n)
  467. {
  468. if (n >= REG_BITS)
  469. return -EINVAL;
  470. hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
  471. hw_cwrite(CAP_USBINTR, BIT(n), BIT(n));
  472. hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
  473. hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
  474. return 0;
  475. }
  476. /**
  477. * hw_is_port_high_speed: test if port is high speed
  478. *
  479. * This function returns true if high speed port
  480. */
  481. static int hw_port_is_high_speed(void)
  482. {
  483. return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
  484. hw_cread(CAP_PORTSC, PORTSC_HSP);
  485. }
  486. /**
  487. * hw_port_test_get: reads port test mode value
  488. *
  489. * This function returns port test mode value
  490. */
  491. static u8 hw_port_test_get(void)
  492. {
  493. return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
  494. }
  495. /**
  496. * hw_port_test_set: writes port test mode (execute without interruption)
  497. * @mode: new value
  498. *
  499. * This function returns an error code
  500. */
  501. static int hw_port_test_set(u8 mode)
  502. {
  503. const u8 TEST_MODE_MAX = 7;
  504. if (mode > TEST_MODE_MAX)
  505. return -EINVAL;
  506. hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
  507. return 0;
  508. }
  509. /**
  510. * hw_read_intr_enable: returns interrupt enable register
  511. *
  512. * This function returns register data
  513. */
  514. static u32 hw_read_intr_enable(void)
  515. {
  516. return hw_cread(CAP_USBINTR, ~0);
  517. }
  518. /**
  519. * hw_read_intr_status: returns interrupt status register
  520. *
  521. * This function returns register data
  522. */
  523. static u32 hw_read_intr_status(void)
  524. {
  525. return hw_cread(CAP_USBSTS, ~0);
  526. }
  527. /**
  528. * hw_register_read: reads all device registers (execute without interruption)
  529. * @buf: destination buffer
  530. * @size: buffer size
  531. *
  532. * This function returns number of registers read
  533. */
  534. static size_t hw_register_read(u32 *buf, size_t size)
  535. {
  536. unsigned i;
  537. if (size > hw_bank.size)
  538. size = hw_bank.size;
  539. for (i = 0; i < size; i++)
  540. buf[i] = hw_aread(i * sizeof(u32), ~0);
  541. return size;
  542. }
  543. /**
  544. * hw_register_write: writes to register
  545. * @addr: register address
  546. * @data: register value
  547. *
  548. * This function returns an error code
  549. */
  550. static int hw_register_write(u16 addr, u32 data)
  551. {
  552. /* align */
  553. addr /= sizeof(u32);
  554. if (addr >= hw_bank.size)
  555. return -EINVAL;
  556. /* align */
  557. addr *= sizeof(u32);
  558. hw_awrite(addr, ~0, data);
  559. return 0;
  560. }
  561. /**
  562. * hw_test_and_clear_complete: test & clear complete status (execute without
  563. * interruption)
  564. * @n: bit number (endpoint)
  565. *
  566. * This function returns complete status
  567. */
  568. static int hw_test_and_clear_complete(int n)
  569. {
  570. return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
  571. }
  572. /**
  573. * hw_test_and_clear_intr_active: test & clear active interrupts (execute
  574. * without interruption)
  575. *
  576. * This function returns active interrutps
  577. */
  578. static u32 hw_test_and_clear_intr_active(void)
  579. {
  580. u32 reg = hw_read_intr_status() & hw_read_intr_enable();
  581. hw_cwrite(CAP_USBSTS, ~0, reg);
  582. return reg;
  583. }
  584. /**
  585. * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
  586. * interruption)
  587. *
  588. * This function returns guard value
  589. */
  590. static int hw_test_and_clear_setup_guard(void)
  591. {
  592. return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
  593. }
  594. /**
  595. * hw_test_and_set_setup_guard: test & set setup guard (execute without
  596. * interruption)
  597. *
  598. * This function returns guard value
  599. */
  600. static int hw_test_and_set_setup_guard(void)
  601. {
  602. return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
  603. }
  604. /**
  605. * hw_usb_set_address: configures USB address (execute without interruption)
  606. * @value: new USB address
  607. *
  608. * This function returns an error code
  609. */
  610. static int hw_usb_set_address(u8 value)
  611. {
  612. /* advance */
  613. hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
  614. value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
  615. return 0;
  616. }
  617. /**
  618. * hw_usb_reset: restart device after a bus reset (execute without
  619. * interruption)
  620. *
  621. * This function returns an error code
  622. */
  623. static int hw_usb_reset(void)
  624. {
  625. hw_usb_set_address(0);
  626. /* ESS flushes only at end?!? */
  627. hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */
  628. /* clear setup token semaphores */
  629. hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0); /* writes its content */
  630. /* clear complete status */
  631. hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */
  632. /* wait until all bits cleared */
  633. while (hw_cread(CAP_ENDPTPRIME, ~0))
  634. udelay(10); /* not RTOS friendly */
  635. /* reset all endpoints ? */
  636. /* reset internal status and wait for further instructions
  637. no need to verify the port reset status (ESS does it) */
  638. return 0;
  639. }
  640. /******************************************************************************
  641. * DBG block
  642. *****************************************************************************/
  643. /**
  644. * show_device: prints information about device capabilities and status
  645. *
  646. * Check "device.h" for details
  647. */
  648. static ssize_t show_device(struct device *dev, struct device_attribute *attr,
  649. char *buf)
  650. {
  651. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  652. struct usb_gadget *gadget = &udc->gadget;
  653. int n = 0;
  654. dbg_trace("[%s] %p\n", __func__, buf);
  655. if (attr == NULL || buf == NULL) {
  656. dev_err(dev, "[%s] EINVAL\n", __func__);
  657. return 0;
  658. }
  659. n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
  660. gadget->speed);
  661. n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
  662. gadget->is_dualspeed);
  663. n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
  664. gadget->is_otg);
  665. n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
  666. gadget->is_a_peripheral);
  667. n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
  668. gadget->b_hnp_enable);
  669. n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
  670. gadget->a_hnp_support);
  671. n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
  672. gadget->a_alt_hnp_support);
  673. n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
  674. (gadget->name ? gadget->name : ""));
  675. return n;
  676. }
  677. static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
  678. /**
  679. * show_driver: prints information about attached gadget (if any)
  680. *
  681. * Check "device.h" for details
  682. */
  683. static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
  684. char *buf)
  685. {
  686. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  687. struct usb_gadget_driver *driver = udc->driver;
  688. int n = 0;
  689. dbg_trace("[%s] %p\n", __func__, buf);
  690. if (attr == NULL || buf == NULL) {
  691. dev_err(dev, "[%s] EINVAL\n", __func__);
  692. return 0;
  693. }
  694. if (driver == NULL)
  695. return scnprintf(buf, PAGE_SIZE,
  696. "There is no gadget attached!\n");
  697. n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
  698. (driver->function ? driver->function : ""));
  699. n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
  700. driver->speed);
  701. return n;
  702. }
  703. static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
  704. /* Maximum event message length */
  705. #define DBG_DATA_MSG 64UL
  706. /* Maximum event messages */
  707. #define DBG_DATA_MAX 128UL
  708. /* Event buffer descriptor */
  709. static struct {
  710. char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */
  711. unsigned idx; /* index */
  712. unsigned tty; /* print to console? */
  713. rwlock_t lck; /* lock */
  714. } dbg_data = {
  715. .idx = 0,
  716. .tty = 0,
  717. .lck = __RW_LOCK_UNLOCKED(lck)
  718. };
  719. /**
  720. * dbg_dec: decrements debug event index
  721. * @idx: buffer index
  722. */
  723. static void dbg_dec(unsigned *idx)
  724. {
  725. *idx = (*idx - 1) & (DBG_DATA_MAX-1);
  726. }
  727. /**
  728. * dbg_inc: increments debug event index
  729. * @idx: buffer index
  730. */
  731. static void dbg_inc(unsigned *idx)
  732. {
  733. *idx = (*idx + 1) & (DBG_DATA_MAX-1);
  734. }
  735. /**
  736. * dbg_print: prints the common part of the event
  737. * @addr: endpoint address
  738. * @name: event name
  739. * @status: status
  740. * @extra: extra information
  741. */
  742. static void dbg_print(u8 addr, const char *name, int status, const char *extra)
  743. {
  744. struct timeval tval;
  745. unsigned int stamp;
  746. unsigned long flags;
  747. write_lock_irqsave(&dbg_data.lck, flags);
  748. do_gettimeofday(&tval);
  749. stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s */
  750. stamp = stamp * 1000000 + tval.tv_usec;
  751. scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
  752. "%04X\t» %02X %-7.7s %4i «\t%s\n",
  753. stamp, addr, name, status, extra);
  754. dbg_inc(&dbg_data.idx);
  755. write_unlock_irqrestore(&dbg_data.lck, flags);
  756. if (dbg_data.tty != 0)
  757. pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n",
  758. stamp, addr, name, status, extra);
  759. }
  760. /**
  761. * dbg_done: prints a DONE event
  762. * @addr: endpoint address
  763. * @td: transfer descriptor
  764. * @status: status
  765. */
  766. static void dbg_done(u8 addr, const u32 token, int status)
  767. {
  768. char msg[DBG_DATA_MSG];
  769. scnprintf(msg, sizeof(msg), "%d %02X",
  770. (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
  771. (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS));
  772. dbg_print(addr, "DONE", status, msg);
  773. }
  774. /**
  775. * dbg_event: prints a generic event
  776. * @addr: endpoint address
  777. * @name: event name
  778. * @status: status
  779. */
  780. static void dbg_event(u8 addr, const char *name, int status)
  781. {
  782. if (name != NULL)
  783. dbg_print(addr, name, status, "");
  784. }
  785. /*
  786. * dbg_queue: prints a QUEUE event
  787. * @addr: endpoint address
  788. * @req: USB request
  789. * @status: status
  790. */
  791. static void dbg_queue(u8 addr, const struct usb_request *req, int status)
  792. {
  793. char msg[DBG_DATA_MSG];
  794. if (req != NULL) {
  795. scnprintf(msg, sizeof(msg),
  796. "%d %d", !req->no_interrupt, req->length);
  797. dbg_print(addr, "QUEUE", status, msg);
  798. }
  799. }
  800. /**
  801. * dbg_setup: prints a SETUP event
  802. * @addr: endpoint address
  803. * @req: setup request
  804. */
  805. static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
  806. {
  807. char msg[DBG_DATA_MSG];
  808. if (req != NULL) {
  809. scnprintf(msg, sizeof(msg),
  810. "%02X %02X %04X %04X %d", req->bRequestType,
  811. req->bRequest, le16_to_cpu(req->wValue),
  812. le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
  813. dbg_print(addr, "SETUP", 0, msg);
  814. }
  815. }
  816. /**
  817. * show_events: displays the event buffer
  818. *
  819. * Check "device.h" for details
  820. */
  821. static ssize_t show_events(struct device *dev, struct device_attribute *attr,
  822. char *buf)
  823. {
  824. unsigned long flags;
  825. unsigned i, j, n = 0;
  826. dbg_trace("[%s] %p\n", __func__, buf);
  827. if (attr == NULL || buf == NULL) {
  828. dev_err(dev, "[%s] EINVAL\n", __func__);
  829. return 0;
  830. }
  831. read_lock_irqsave(&dbg_data.lck, flags);
  832. i = dbg_data.idx;
  833. for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
  834. n += strlen(dbg_data.buf[i]);
  835. if (n >= PAGE_SIZE) {
  836. n -= strlen(dbg_data.buf[i]);
  837. break;
  838. }
  839. }
  840. for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
  841. j += scnprintf(buf + j, PAGE_SIZE - j,
  842. "%s", dbg_data.buf[i]);
  843. read_unlock_irqrestore(&dbg_data.lck, flags);
  844. return n;
  845. }
  846. /**
  847. * store_events: configure if events are going to be also printed to console
  848. *
  849. * Check "device.h" for details
  850. */
  851. static ssize_t store_events(struct device *dev, struct device_attribute *attr,
  852. const char *buf, size_t count)
  853. {
  854. unsigned tty;
  855. dbg_trace("[%s] %p, %d\n", __func__, buf, count);
  856. if (attr == NULL || buf == NULL) {
  857. dev_err(dev, "[%s] EINVAL\n", __func__);
  858. goto done;
  859. }
  860. if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
  861. dev_err(dev, "<1|0>: enable|disable console log\n");
  862. goto done;
  863. }
  864. dbg_data.tty = tty;
  865. dev_info(dev, "tty = %u", dbg_data.tty);
  866. done:
  867. return count;
  868. }
  869. static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
  870. /**
  871. * show_inters: interrupt status, enable status and historic
  872. *
  873. * Check "device.h" for details
  874. */
  875. static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
  876. char *buf)
  877. {
  878. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  879. unsigned long flags;
  880. u32 intr;
  881. unsigned i, j, n = 0;
  882. dbg_trace("[%s] %p\n", __func__, buf);
  883. if (attr == NULL || buf == NULL) {
  884. dev_err(dev, "[%s] EINVAL\n", __func__);
  885. return 0;
  886. }
  887. spin_lock_irqsave(udc->lock, flags);
  888. n += scnprintf(buf + n, PAGE_SIZE - n,
  889. "status = %08x\n", hw_read_intr_status());
  890. n += scnprintf(buf + n, PAGE_SIZE - n,
  891. "enable = %08x\n", hw_read_intr_enable());
  892. n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
  893. isr_statistics.test);
  894. n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n",
  895. isr_statistics.ui);
  896. n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n",
  897. isr_statistics.uei);
  898. n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n",
  899. isr_statistics.pci);
  900. n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n",
  901. isr_statistics.uri);
  902. n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n",
  903. isr_statistics.sli);
  904. n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
  905. isr_statistics.none);
  906. n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
  907. isr_statistics.hndl.cnt);
  908. for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
  909. i &= ISR_MASK;
  910. intr = isr_statistics.hndl.buf[i];
  911. if (USBi_UI & intr)
  912. n += scnprintf(buf + n, PAGE_SIZE - n, "ui ");
  913. intr &= ~USBi_UI;
  914. if (USBi_UEI & intr)
  915. n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
  916. intr &= ~USBi_UEI;
  917. if (USBi_PCI & intr)
  918. n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
  919. intr &= ~USBi_PCI;
  920. if (USBi_URI & intr)
  921. n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
  922. intr &= ~USBi_URI;
  923. if (USBi_SLI & intr)
  924. n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
  925. intr &= ~USBi_SLI;
  926. if (intr)
  927. n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
  928. if (isr_statistics.hndl.buf[i])
  929. n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
  930. }
  931. spin_unlock_irqrestore(udc->lock, flags);
  932. return n;
  933. }
  934. /**
  935. * store_inters: enable & force or disable an individual interrutps
  936. * (to be used for test purposes only)
  937. *
  938. * Check "device.h" for details
  939. */
  940. static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
  941. const char *buf, size_t count)
  942. {
  943. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  944. unsigned long flags;
  945. unsigned en, bit;
  946. dbg_trace("[%s] %p, %d\n", __func__, buf, count);
  947. if (attr == NULL || buf == NULL) {
  948. dev_err(dev, "[%s] EINVAL\n", __func__);
  949. goto done;
  950. }
  951. if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
  952. dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
  953. goto done;
  954. }
  955. spin_lock_irqsave(udc->lock, flags);
  956. if (en) {
  957. if (hw_intr_force(bit))
  958. dev_err(dev, "invalid bit number\n");
  959. else
  960. isr_statistics.test++;
  961. } else {
  962. if (hw_intr_clear(bit))
  963. dev_err(dev, "invalid bit number\n");
  964. }
  965. spin_unlock_irqrestore(udc->lock, flags);
  966. done:
  967. return count;
  968. }
  969. static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
  970. /**
  971. * show_port_test: reads port test mode
  972. *
  973. * Check "device.h" for details
  974. */
  975. static ssize_t show_port_test(struct device *dev,
  976. struct device_attribute *attr, char *buf)
  977. {
  978. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  979. unsigned long flags;
  980. unsigned mode;
  981. dbg_trace("[%s] %p\n", __func__, buf);
  982. if (attr == NULL || buf == NULL) {
  983. dev_err(dev, "[%s] EINVAL\n", __func__);
  984. return 0;
  985. }
  986. spin_lock_irqsave(udc->lock, flags);
  987. mode = hw_port_test_get();
  988. spin_unlock_irqrestore(udc->lock, flags);
  989. return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
  990. }
  991. /**
  992. * store_port_test: writes port test mode
  993. *
  994. * Check "device.h" for details
  995. */
  996. static ssize_t store_port_test(struct device *dev,
  997. struct device_attribute *attr,
  998. const char *buf, size_t count)
  999. {
  1000. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  1001. unsigned long flags;
  1002. unsigned mode;
  1003. dbg_trace("[%s] %p, %d\n", __func__, buf, count);
  1004. if (attr == NULL || buf == NULL) {
  1005. dev_err(dev, "[%s] EINVAL\n", __func__);
  1006. goto done;
  1007. }
  1008. if (sscanf(buf, "%u", &mode) != 1) {
  1009. dev_err(dev, "<mode>: set port test mode");
  1010. goto done;
  1011. }
  1012. spin_lock_irqsave(udc->lock, flags);
  1013. if (hw_port_test_set(mode))
  1014. dev_err(dev, "invalid mode\n");
  1015. spin_unlock_irqrestore(udc->lock, flags);
  1016. done:
  1017. return count;
  1018. }
  1019. static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
  1020. show_port_test, store_port_test);
  1021. /**
  1022. * show_qheads: DMA contents of all queue heads
  1023. *
  1024. * Check "device.h" for details
  1025. */
  1026. static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
  1027. char *buf)
  1028. {
  1029. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  1030. unsigned long flags;
  1031. unsigned i, j, n = 0;
  1032. dbg_trace("[%s] %p\n", __func__, buf);
  1033. if (attr == NULL || buf == NULL) {
  1034. dev_err(dev, "[%s] EINVAL\n", __func__);
  1035. return 0;
  1036. }
  1037. spin_lock_irqsave(udc->lock, flags);
  1038. for (i = 0; i < hw_ep_max; i++) {
  1039. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  1040. n += scnprintf(buf + n, PAGE_SIZE - n,
  1041. "EP=%02i: RX=%08X TX=%08X\n",
  1042. i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
  1043. for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
  1044. n += scnprintf(buf + n, PAGE_SIZE - n,
  1045. " %04X: %08X %08X\n", j,
  1046. *((u32 *)mEp->qh[RX].ptr + j),
  1047. *((u32 *)mEp->qh[TX].ptr + j));
  1048. }
  1049. }
  1050. spin_unlock_irqrestore(udc->lock, flags);
  1051. return n;
  1052. }
  1053. static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
  1054. /**
  1055. * show_registers: dumps all registers
  1056. *
  1057. * Check "device.h" for details
  1058. */
  1059. static ssize_t show_registers(struct device *dev,
  1060. struct device_attribute *attr, char *buf)
  1061. {
  1062. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  1063. unsigned long flags;
  1064. u32 dump[512];
  1065. unsigned i, k, n = 0;
  1066. dbg_trace("[%s] %p\n", __func__, buf);
  1067. if (attr == NULL || buf == NULL) {
  1068. dev_err(dev, "[%s] EINVAL\n", __func__);
  1069. return 0;
  1070. }
  1071. spin_lock_irqsave(udc->lock, flags);
  1072. k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
  1073. spin_unlock_irqrestore(udc->lock, flags);
  1074. for (i = 0; i < k; i++) {
  1075. n += scnprintf(buf + n, PAGE_SIZE - n,
  1076. "reg[0x%04X] = 0x%08X\n",
  1077. i * (unsigned)sizeof(u32), dump[i]);
  1078. }
  1079. return n;
  1080. }
  1081. /**
  1082. * store_registers: writes value to register address
  1083. *
  1084. * Check "device.h" for details
  1085. */
  1086. static ssize_t store_registers(struct device *dev,
  1087. struct device_attribute *attr,
  1088. const char *buf, size_t count)
  1089. {
  1090. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  1091. unsigned long addr, data, flags;
  1092. dbg_trace("[%s] %p, %d\n", __func__, buf, count);
  1093. if (attr == NULL || buf == NULL) {
  1094. dev_err(dev, "[%s] EINVAL\n", __func__);
  1095. goto done;
  1096. }
  1097. if (sscanf(buf, "%li %li", &addr, &data) != 2) {
  1098. dev_err(dev, "<addr> <data>: write data to register address");
  1099. goto done;
  1100. }
  1101. spin_lock_irqsave(udc->lock, flags);
  1102. if (hw_register_write(addr, data))
  1103. dev_err(dev, "invalid address range\n");
  1104. spin_unlock_irqrestore(udc->lock, flags);
  1105. done:
  1106. return count;
  1107. }
  1108. static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
  1109. show_registers, store_registers);
  1110. /**
  1111. * show_requests: DMA contents of all requests currently queued (all endpts)
  1112. *
  1113. * Check "device.h" for details
  1114. */
  1115. static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
  1116. char *buf)
  1117. {
  1118. struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
  1119. unsigned long flags;
  1120. struct list_head *ptr = NULL;
  1121. struct ci13xxx_req *req = NULL;
  1122. unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
  1123. dbg_trace("[%s] %p\n", __func__, buf);
  1124. if (attr == NULL || buf == NULL) {
  1125. dev_err(dev, "[%s] EINVAL\n", __func__);
  1126. return 0;
  1127. }
  1128. spin_lock_irqsave(udc->lock, flags);
  1129. for (i = 0; i < hw_ep_max; i++)
  1130. for (k = RX; k <= TX; k++)
  1131. list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
  1132. {
  1133. req = list_entry(ptr,
  1134. struct ci13xxx_req, queue);
  1135. n += scnprintf(buf + n, PAGE_SIZE - n,
  1136. "EP=%02i: TD=%08X %s\n",
  1137. i, (u32)req->dma,
  1138. ((k == RX) ? "RX" : "TX"));
  1139. for (j = 0; j < qSize; j++)
  1140. n += scnprintf(buf + n, PAGE_SIZE - n,
  1141. " %04X: %08X\n", j,
  1142. *((u32 *)req->ptr + j));
  1143. }
  1144. spin_unlock_irqrestore(udc->lock, flags);
  1145. return n;
  1146. }
  1147. static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
  1148. /**
  1149. * dbg_create_files: initializes the attribute interface
  1150. * @dev: device
  1151. *
  1152. * This function returns an error code
  1153. */
  1154. __maybe_unused static int dbg_create_files(struct device *dev)
  1155. {
  1156. int retval = 0;
  1157. if (dev == NULL)
  1158. return -EINVAL;
  1159. retval = device_create_file(dev, &dev_attr_device);
  1160. if (retval)
  1161. goto done;
  1162. retval = device_create_file(dev, &dev_attr_driver);
  1163. if (retval)
  1164. goto rm_device;
  1165. retval = device_create_file(dev, &dev_attr_events);
  1166. if (retval)
  1167. goto rm_driver;
  1168. retval = device_create_file(dev, &dev_attr_inters);
  1169. if (retval)
  1170. goto rm_events;
  1171. retval = device_create_file(dev, &dev_attr_port_test);
  1172. if (retval)
  1173. goto rm_inters;
  1174. retval = device_create_file(dev, &dev_attr_qheads);
  1175. if (retval)
  1176. goto rm_port_test;
  1177. retval = device_create_file(dev, &dev_attr_registers);
  1178. if (retval)
  1179. goto rm_qheads;
  1180. retval = device_create_file(dev, &dev_attr_requests);
  1181. if (retval)
  1182. goto rm_registers;
  1183. return 0;
  1184. rm_registers:
  1185. device_remove_file(dev, &dev_attr_registers);
  1186. rm_qheads:
  1187. device_remove_file(dev, &dev_attr_qheads);
  1188. rm_port_test:
  1189. device_remove_file(dev, &dev_attr_port_test);
  1190. rm_inters:
  1191. device_remove_file(dev, &dev_attr_inters);
  1192. rm_events:
  1193. device_remove_file(dev, &dev_attr_events);
  1194. rm_driver:
  1195. device_remove_file(dev, &dev_attr_driver);
  1196. rm_device:
  1197. device_remove_file(dev, &dev_attr_device);
  1198. done:
  1199. return retval;
  1200. }
  1201. /**
  1202. * dbg_remove_files: destroys the attribute interface
  1203. * @dev: device
  1204. *
  1205. * This function returns an error code
  1206. */
  1207. __maybe_unused static int dbg_remove_files(struct device *dev)
  1208. {
  1209. if (dev == NULL)
  1210. return -EINVAL;
  1211. device_remove_file(dev, &dev_attr_requests);
  1212. device_remove_file(dev, &dev_attr_registers);
  1213. device_remove_file(dev, &dev_attr_qheads);
  1214. device_remove_file(dev, &dev_attr_port_test);
  1215. device_remove_file(dev, &dev_attr_inters);
  1216. device_remove_file(dev, &dev_attr_events);
  1217. device_remove_file(dev, &dev_attr_driver);
  1218. device_remove_file(dev, &dev_attr_device);
  1219. return 0;
  1220. }
  1221. /******************************************************************************
  1222. * UTIL block
  1223. *****************************************************************************/
  1224. /**
  1225. * _usb_addr: calculates endpoint address from direction & number
  1226. * @ep: endpoint
  1227. */
  1228. static inline u8 _usb_addr(struct ci13xxx_ep *ep)
  1229. {
  1230. return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
  1231. }
  1232. /**
  1233. * _hardware_queue: configures a request at hardware level
  1234. * @gadget: gadget
  1235. * @mEp: endpoint
  1236. *
  1237. * This function returns an error code
  1238. */
  1239. static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
  1240. {
  1241. unsigned i;
  1242. trace("%p, %p", mEp, mReq);
  1243. /* don't queue twice */
  1244. if (mReq->req.status == -EALREADY)
  1245. return -EALREADY;
  1246. if (hw_ep_is_primed(mEp->num, mEp->dir))
  1247. return -EBUSY;
  1248. mReq->req.status = -EALREADY;
  1249. if (mReq->req.length && !mReq->req.dma) {
  1250. mReq->req.dma = \
  1251. dma_map_single(mEp->device, mReq->req.buf,
  1252. mReq->req.length, mEp->dir ?
  1253. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1254. if (mReq->req.dma == 0)
  1255. return -ENOMEM;
  1256. mReq->map = 1;
  1257. }
  1258. /*
  1259. * TD configuration
  1260. * TODO - handle requests which spawns into several TDs
  1261. */
  1262. memset(mReq->ptr, 0, sizeof(*mReq->ptr));
  1263. mReq->ptr->next |= TD_TERMINATE;
  1264. mReq->ptr->token = mReq->req.length << ffs_nr(TD_TOTAL_BYTES);
  1265. mReq->ptr->token &= TD_TOTAL_BYTES;
  1266. mReq->ptr->token |= TD_IOC;
  1267. mReq->ptr->token |= TD_STATUS_ACTIVE;
  1268. mReq->ptr->page[0] = mReq->req.dma;
  1269. for (i = 1; i < 5; i++)
  1270. mReq->ptr->page[i] =
  1271. (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
  1272. /*
  1273. * QH configuration
  1274. * At this point it's guaranteed exclusive access to qhead
  1275. * (endpt is not primed) so it's no need to use tripwire
  1276. */
  1277. mEp->qh[mEp->dir].ptr->td.next = mReq->dma; /* TERMINATE = 0 */
  1278. mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS; /* clear status */
  1279. if (mReq->req.zero == 0)
  1280. mEp->qh[mEp->dir].ptr->cap |= QH_ZLT;
  1281. else
  1282. mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
  1283. wmb(); /* synchronize before ep prime */
  1284. return hw_ep_prime(mEp->num, mEp->dir,
  1285. mEp->type == USB_ENDPOINT_XFER_CONTROL);
  1286. }
  1287. /**
  1288. * _hardware_dequeue: handles a request at hardware level
  1289. * @gadget: gadget
  1290. * @mEp: endpoint
  1291. *
  1292. * This function returns an error code
  1293. */
  1294. static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
  1295. {
  1296. trace("%p, %p", mEp, mReq);
  1297. if (mReq->req.status != -EALREADY)
  1298. return -EINVAL;
  1299. if (hw_ep_is_primed(mEp->num, mEp->dir))
  1300. hw_ep_flush(mEp->num, mEp->dir);
  1301. mReq->req.status = 0;
  1302. if (mReq->map) {
  1303. dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
  1304. mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1305. mReq->req.dma = 0;
  1306. mReq->map = 0;
  1307. }
  1308. mReq->req.status = mReq->ptr->token & TD_STATUS;
  1309. if ((TD_STATUS_ACTIVE & mReq->req.status) != 0)
  1310. mReq->req.status = -ECONNRESET;
  1311. else if ((TD_STATUS_HALTED & mReq->req.status) != 0)
  1312. mReq->req.status = -1;
  1313. else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
  1314. mReq->req.status = -1;
  1315. else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
  1316. mReq->req.status = -1;
  1317. mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
  1318. mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
  1319. mReq->req.actual = mReq->req.length - mReq->req.actual;
  1320. mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
  1321. return mReq->req.actual;
  1322. }
  1323. /**
  1324. * _ep_nuke: dequeues all endpoint requests
  1325. * @mEp: endpoint
  1326. *
  1327. * This function returns an error code
  1328. * Caller must hold lock
  1329. */
  1330. static int _ep_nuke(struct ci13xxx_ep *mEp)
  1331. __releases(mEp->lock)
  1332. __acquires(mEp->lock)
  1333. {
  1334. trace("%p", mEp);
  1335. if (mEp == NULL)
  1336. return -EINVAL;
  1337. hw_ep_flush(mEp->num, mEp->dir);
  1338. while (!list_empty(&mEp->qh[mEp->dir].queue)) {
  1339. /* pop oldest request */
  1340. struct ci13xxx_req *mReq = \
  1341. list_entry(mEp->qh[mEp->dir].queue.next,
  1342. struct ci13xxx_req, queue);
  1343. list_del_init(&mReq->queue);
  1344. mReq->req.status = -ESHUTDOWN;
  1345. if (mReq->req.complete != NULL) {
  1346. spin_unlock(mEp->lock);
  1347. mReq->req.complete(&mEp->ep, &mReq->req);
  1348. spin_lock(mEp->lock);
  1349. }
  1350. }
  1351. return 0;
  1352. }
  1353. /**
  1354. * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
  1355. * @gadget: gadget
  1356. *
  1357. * This function returns an error code
  1358. * Caller must hold lock
  1359. */
  1360. static int _gadget_stop_activity(struct usb_gadget *gadget)
  1361. {
  1362. struct usb_ep *ep;
  1363. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  1364. struct ci13xxx_ep *mEp = container_of(gadget->ep0,
  1365. struct ci13xxx_ep, ep);
  1366. trace("%p", gadget);
  1367. if (gadget == NULL)
  1368. return -EINVAL;
  1369. /* flush all endpoints */
  1370. gadget_for_each_ep(ep, gadget) {
  1371. usb_ep_fifo_flush(ep);
  1372. }
  1373. usb_ep_fifo_flush(gadget->ep0);
  1374. udc->driver->disconnect(gadget);
  1375. /* make sure to disable all endpoints */
  1376. gadget_for_each_ep(ep, gadget) {
  1377. usb_ep_disable(ep);
  1378. }
  1379. usb_ep_disable(gadget->ep0);
  1380. if (mEp->status != NULL) {
  1381. usb_ep_free_request(gadget->ep0, mEp->status);
  1382. mEp->status = NULL;
  1383. }
  1384. return 0;
  1385. }
  1386. /******************************************************************************
  1387. * ISR block
  1388. *****************************************************************************/
  1389. /**
  1390. * isr_reset_handler: USB reset interrupt handler
  1391. * @udc: UDC device
  1392. *
  1393. * This function resets USB engine after a bus reset occurred
  1394. */
  1395. static void isr_reset_handler(struct ci13xxx *udc)
  1396. __releases(udc->lock)
  1397. __acquires(udc->lock)
  1398. {
  1399. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
  1400. int retval;
  1401. trace("%p", udc);
  1402. if (udc == NULL) {
  1403. err("EINVAL");
  1404. return;
  1405. }
  1406. dbg_event(0xFF, "BUS RST", 0);
  1407. spin_unlock(udc->lock);
  1408. retval = _gadget_stop_activity(&udc->gadget);
  1409. if (retval)
  1410. goto done;
  1411. retval = hw_usb_reset();
  1412. if (retval)
  1413. goto done;
  1414. retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
  1415. if (!retval) {
  1416. mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
  1417. if (mEp->status == NULL) {
  1418. usb_ep_disable(&mEp->ep);
  1419. retval = -ENOMEM;
  1420. }
  1421. }
  1422. spin_lock(udc->lock);
  1423. done:
  1424. if (retval)
  1425. err("error: %i", retval);
  1426. }
  1427. /**
  1428. * isr_get_status_complete: get_status request complete function
  1429. * @ep: endpoint
  1430. * @req: request handled
  1431. *
  1432. * Caller must release lock
  1433. */
  1434. static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
  1435. {
  1436. trace("%p, %p", ep, req);
  1437. if (ep == NULL || req == NULL) {
  1438. err("EINVAL");
  1439. return;
  1440. }
  1441. kfree(req->buf);
  1442. usb_ep_free_request(ep, req);
  1443. }
  1444. /**
  1445. * isr_get_status_response: get_status request response
  1446. * @ep: endpoint
  1447. * @setup: setup request packet
  1448. *
  1449. * This function returns an error code
  1450. */
  1451. static int isr_get_status_response(struct ci13xxx_ep *mEp,
  1452. struct usb_ctrlrequest *setup)
  1453. __releases(mEp->lock)
  1454. __acquires(mEp->lock)
  1455. {
  1456. struct usb_request *req = NULL;
  1457. gfp_t gfp_flags = GFP_ATOMIC;
  1458. int dir, num, retval;
  1459. trace("%p, %p", mEp, setup);
  1460. if (mEp == NULL || setup == NULL)
  1461. return -EINVAL;
  1462. spin_unlock(mEp->lock);
  1463. req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
  1464. spin_lock(mEp->lock);
  1465. if (req == NULL)
  1466. return -ENOMEM;
  1467. req->complete = isr_get_status_complete;
  1468. req->length = 2;
  1469. req->buf = kzalloc(req->length, gfp_flags);
  1470. if (req->buf == NULL) {
  1471. retval = -ENOMEM;
  1472. goto err_free_req;
  1473. }
  1474. if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
  1475. /* TODO: D1 - Remote Wakeup; D0 - Self Powered */
  1476. retval = 0;
  1477. } else if ((setup->bRequestType & USB_RECIP_MASK) \
  1478. == USB_RECIP_ENDPOINT) {
  1479. dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
  1480. TX : RX;
  1481. num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
  1482. *((u16 *)req->buf) = hw_ep_get_halt(num, dir);
  1483. }
  1484. /* else do nothing; reserved for future use */
  1485. spin_unlock(mEp->lock);
  1486. retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
  1487. spin_lock(mEp->lock);
  1488. if (retval)
  1489. goto err_free_buf;
  1490. return 0;
  1491. err_free_buf:
  1492. kfree(req->buf);
  1493. err_free_req:
  1494. spin_unlock(mEp->lock);
  1495. usb_ep_free_request(&mEp->ep, req);
  1496. spin_lock(mEp->lock);
  1497. return retval;
  1498. }
  1499. /**
  1500. * isr_setup_status_phase: queues the status phase of a setup transation
  1501. * @mEp: endpoint
  1502. *
  1503. * This function returns an error code
  1504. */
  1505. static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
  1506. __releases(mEp->lock)
  1507. __acquires(mEp->lock)
  1508. {
  1509. int retval;
  1510. trace("%p", mEp);
  1511. /* mEp is always valid & configured */
  1512. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1513. mEp->dir = (mEp->dir == TX) ? RX : TX;
  1514. mEp->status->no_interrupt = 1;
  1515. spin_unlock(mEp->lock);
  1516. retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
  1517. spin_lock(mEp->lock);
  1518. return retval;
  1519. }
  1520. /**
  1521. * isr_tr_complete_low: transaction complete low level handler
  1522. * @mEp: endpoint
  1523. *
  1524. * This function returns an error code
  1525. * Caller must hold lock
  1526. */
  1527. static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
  1528. __releases(mEp->lock)
  1529. __acquires(mEp->lock)
  1530. {
  1531. struct ci13xxx_req *mReq;
  1532. int retval;
  1533. trace("%p", mEp);
  1534. if (list_empty(&mEp->qh[mEp->dir].queue))
  1535. return -EINVAL;
  1536. /* pop oldest request */
  1537. mReq = list_entry(mEp->qh[mEp->dir].queue.next,
  1538. struct ci13xxx_req, queue);
  1539. list_del_init(&mReq->queue);
  1540. retval = _hardware_dequeue(mEp, mReq);
  1541. if (retval < 0) {
  1542. dbg_event(_usb_addr(mEp), "DONE", retval);
  1543. goto done;
  1544. }
  1545. dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
  1546. if (!list_empty(&mEp->qh[mEp->dir].queue)) {
  1547. struct ci13xxx_req* mReqEnq;
  1548. mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
  1549. struct ci13xxx_req, queue);
  1550. _hardware_enqueue(mEp, mReqEnq);
  1551. }
  1552. if (mReq->req.complete != NULL) {
  1553. spin_unlock(mEp->lock);
  1554. mReq->req.complete(&mEp->ep, &mReq->req);
  1555. spin_lock(mEp->lock);
  1556. }
  1557. done:
  1558. return retval;
  1559. }
  1560. /**
  1561. * isr_tr_complete_handler: transaction complete interrupt handler
  1562. * @udc: UDC descriptor
  1563. *
  1564. * This function handles traffic events
  1565. */
  1566. static void isr_tr_complete_handler(struct ci13xxx *udc)
  1567. __releases(udc->lock)
  1568. __acquires(udc->lock)
  1569. {
  1570. unsigned i;
  1571. trace("%p", udc);
  1572. if (udc == NULL) {
  1573. err("EINVAL");
  1574. return;
  1575. }
  1576. for (i = 0; i < hw_ep_max; i++) {
  1577. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  1578. int type, num, err = -EINVAL;
  1579. struct usb_ctrlrequest req;
  1580. if (mEp->desc == NULL)
  1581. continue; /* not configured */
  1582. if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
  1583. (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
  1584. err = isr_tr_complete_low(mEp);
  1585. if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
  1586. if (err > 0) /* needs status phase */
  1587. err = isr_setup_status_phase(mEp);
  1588. if (err < 0) {
  1589. dbg_event(_usb_addr(mEp),
  1590. "ERROR", err);
  1591. spin_unlock(udc->lock);
  1592. if (usb_ep_set_halt(&mEp->ep))
  1593. err("error: ep_set_halt");
  1594. spin_lock(udc->lock);
  1595. }
  1596. }
  1597. }
  1598. if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
  1599. !hw_test_and_clear_setup_status(i))
  1600. continue;
  1601. if (i != 0) {
  1602. warn("ctrl traffic received at endpoint");
  1603. continue;
  1604. }
  1605. /* read_setup_packet */
  1606. do {
  1607. hw_test_and_set_setup_guard();
  1608. memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
  1609. } while (!hw_test_and_clear_setup_guard());
  1610. type = req.bRequestType;
  1611. mEp->dir = (type & USB_DIR_IN) ? TX : RX;
  1612. dbg_setup(_usb_addr(mEp), &req);
  1613. switch (req.bRequest) {
  1614. case USB_REQ_CLEAR_FEATURE:
  1615. if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
  1616. le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
  1617. goto delegate;
  1618. if (req.wLength != 0)
  1619. break;
  1620. num = le16_to_cpu(req.wIndex);
  1621. num &= USB_ENDPOINT_NUMBER_MASK;
  1622. if (!udc->ci13xxx_ep[num].wedge) {
  1623. spin_unlock(udc->lock);
  1624. err = usb_ep_clear_halt(
  1625. &udc->ci13xxx_ep[num].ep);
  1626. spin_lock(udc->lock);
  1627. if (err)
  1628. break;
  1629. }
  1630. err = isr_setup_status_phase(mEp);
  1631. break;
  1632. case USB_REQ_GET_STATUS:
  1633. if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
  1634. type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
  1635. type != (USB_DIR_IN|USB_RECIP_INTERFACE))
  1636. goto delegate;
  1637. if (le16_to_cpu(req.wLength) != 2 ||
  1638. le16_to_cpu(req.wValue) != 0)
  1639. break;
  1640. err = isr_get_status_response(mEp, &req);
  1641. break;
  1642. case USB_REQ_SET_ADDRESS:
  1643. if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
  1644. goto delegate;
  1645. if (le16_to_cpu(req.wLength) != 0 ||
  1646. le16_to_cpu(req.wIndex) != 0)
  1647. break;
  1648. err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
  1649. if (err)
  1650. break;
  1651. err = isr_setup_status_phase(mEp);
  1652. break;
  1653. case USB_REQ_SET_FEATURE:
  1654. if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
  1655. le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
  1656. goto delegate;
  1657. if (req.wLength != 0)
  1658. break;
  1659. num = le16_to_cpu(req.wIndex);
  1660. num &= USB_ENDPOINT_NUMBER_MASK;
  1661. spin_unlock(udc->lock);
  1662. err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
  1663. spin_lock(udc->lock);
  1664. if (err)
  1665. break;
  1666. err = isr_setup_status_phase(mEp);
  1667. break;
  1668. default:
  1669. delegate:
  1670. if (req.wLength == 0) /* no data phase */
  1671. mEp->dir = TX;
  1672. spin_unlock(udc->lock);
  1673. err = udc->driver->setup(&udc->gadget, &req);
  1674. spin_lock(udc->lock);
  1675. break;
  1676. }
  1677. if (err < 0) {
  1678. dbg_event(_usb_addr(mEp), "ERROR", err);
  1679. spin_unlock(udc->lock);
  1680. if (usb_ep_set_halt(&mEp->ep))
  1681. err("error: ep_set_halt");
  1682. spin_lock(udc->lock);
  1683. }
  1684. }
  1685. }
  1686. /******************************************************************************
  1687. * ENDPT block
  1688. *****************************************************************************/
  1689. /**
  1690. * ep_enable: configure endpoint, making it usable
  1691. *
  1692. * Check usb_ep_enable() at "usb_gadget.h" for details
  1693. */
  1694. static int ep_enable(struct usb_ep *ep,
  1695. const struct usb_endpoint_descriptor *desc)
  1696. {
  1697. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1698. int direction, retval = 0;
  1699. unsigned long flags;
  1700. trace("%p, %p", ep, desc);
  1701. if (ep == NULL || desc == NULL)
  1702. return -EINVAL;
  1703. spin_lock_irqsave(mEp->lock, flags);
  1704. /* only internal SW should enable ctrl endpts */
  1705. mEp->desc = desc;
  1706. if (!list_empty(&mEp->qh[mEp->dir].queue))
  1707. warn("enabling a non-empty endpoint!");
  1708. mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
  1709. mEp->num = usb_endpoint_num(desc);
  1710. mEp->type = usb_endpoint_type(desc);
  1711. mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
  1712. direction = mEp->dir;
  1713. do {
  1714. dbg_event(_usb_addr(mEp), "ENABLE", 0);
  1715. mEp->qh[mEp->dir].ptr->cap = 0;
  1716. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1717. mEp->qh[mEp->dir].ptr->cap |= QH_IOS;
  1718. else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
  1719. mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
  1720. else
  1721. mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
  1722. mEp->qh[mEp->dir].ptr->cap |=
  1723. (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
  1724. mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE; /* needed? */
  1725. retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
  1726. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1727. mEp->dir = (mEp->dir == TX) ? RX : TX;
  1728. } while (mEp->dir != direction);
  1729. spin_unlock_irqrestore(mEp->lock, flags);
  1730. return retval;
  1731. }
  1732. /**
  1733. * ep_disable: endpoint is no longer usable
  1734. *
  1735. * Check usb_ep_disable() at "usb_gadget.h" for details
  1736. */
  1737. static int ep_disable(struct usb_ep *ep)
  1738. {
  1739. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1740. int direction, retval = 0;
  1741. unsigned long flags;
  1742. trace("%p", ep);
  1743. if (ep == NULL)
  1744. return -EINVAL;
  1745. else if (mEp->desc == NULL)
  1746. return -EBUSY;
  1747. spin_lock_irqsave(mEp->lock, flags);
  1748. /* only internal SW should disable ctrl endpts */
  1749. direction = mEp->dir;
  1750. do {
  1751. dbg_event(_usb_addr(mEp), "DISABLE", 0);
  1752. retval |= _ep_nuke(mEp);
  1753. retval |= hw_ep_disable(mEp->num, mEp->dir);
  1754. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1755. mEp->dir = (mEp->dir == TX) ? RX : TX;
  1756. } while (mEp->dir != direction);
  1757. mEp->desc = NULL;
  1758. spin_unlock_irqrestore(mEp->lock, flags);
  1759. return retval;
  1760. }
  1761. /**
  1762. * ep_alloc_request: allocate a request object to use with this endpoint
  1763. *
  1764. * Check usb_ep_alloc_request() at "usb_gadget.h" for details
  1765. */
  1766. static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  1767. {
  1768. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1769. struct ci13xxx_req *mReq = NULL;
  1770. trace("%p, %i", ep, gfp_flags);
  1771. if (ep == NULL) {
  1772. err("EINVAL");
  1773. return NULL;
  1774. }
  1775. mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
  1776. if (mReq != NULL) {
  1777. INIT_LIST_HEAD(&mReq->queue);
  1778. mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
  1779. &mReq->dma);
  1780. if (mReq->ptr == NULL) {
  1781. kfree(mReq);
  1782. mReq = NULL;
  1783. }
  1784. }
  1785. dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
  1786. return (mReq == NULL) ? NULL : &mReq->req;
  1787. }
  1788. /**
  1789. * ep_free_request: frees a request object
  1790. *
  1791. * Check usb_ep_free_request() at "usb_gadget.h" for details
  1792. */
  1793. static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
  1794. {
  1795. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1796. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1797. unsigned long flags;
  1798. trace("%p, %p", ep, req);
  1799. if (ep == NULL || req == NULL) {
  1800. err("EINVAL");
  1801. return;
  1802. } else if (!list_empty(&mReq->queue)) {
  1803. err("EBUSY");
  1804. return;
  1805. }
  1806. spin_lock_irqsave(mEp->lock, flags);
  1807. if (mReq->ptr)
  1808. dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
  1809. kfree(mReq);
  1810. dbg_event(_usb_addr(mEp), "FREE", 0);
  1811. spin_unlock_irqrestore(mEp->lock, flags);
  1812. }
  1813. /**
  1814. * ep_queue: queues (submits) an I/O request to an endpoint
  1815. *
  1816. * Check usb_ep_queue()* at usb_gadget.h" for details
  1817. */
  1818. static int ep_queue(struct usb_ep *ep, struct usb_request *req,
  1819. gfp_t __maybe_unused gfp_flags)
  1820. {
  1821. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1822. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1823. int retval = 0;
  1824. unsigned long flags;
  1825. trace("%p, %p, %X", ep, req, gfp_flags);
  1826. if (ep == NULL || req == NULL || mEp->desc == NULL)
  1827. return -EINVAL;
  1828. spin_lock_irqsave(mEp->lock, flags);
  1829. if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
  1830. !list_empty(&mEp->qh[mEp->dir].queue)) {
  1831. _ep_nuke(mEp);
  1832. retval = -EOVERFLOW;
  1833. warn("endpoint ctrl %X nuked", _usb_addr(mEp));
  1834. }
  1835. /* first nuke then test link, e.g. previous status has not sent */
  1836. if (!list_empty(&mReq->queue)) {
  1837. retval = -EBUSY;
  1838. err("request already in queue");
  1839. goto done;
  1840. }
  1841. if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
  1842. req->length = (4 * CI13XXX_PAGE_SIZE);
  1843. retval = -EMSGSIZE;
  1844. warn("request length truncated");
  1845. }
  1846. dbg_queue(_usb_addr(mEp), req, retval);
  1847. /* push request */
  1848. mReq->req.status = -EINPROGRESS;
  1849. mReq->req.actual = 0;
  1850. list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
  1851. if (list_is_singular(&mEp->qh[mEp->dir].queue))
  1852. retval = _hardware_enqueue(mEp, mReq);
  1853. if (retval == -EALREADY) {
  1854. dbg_event(_usb_addr(mEp), "QUEUE", retval);
  1855. retval = 0;
  1856. }
  1857. done:
  1858. spin_unlock_irqrestore(mEp->lock, flags);
  1859. return retval;
  1860. }
  1861. /**
  1862. * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
  1863. *
  1864. * Check usb_ep_dequeue() at "usb_gadget.h" for details
  1865. */
  1866. static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  1867. {
  1868. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1869. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1870. unsigned long flags;
  1871. trace("%p, %p", ep, req);
  1872. if (ep == NULL || req == NULL || mEp->desc == NULL ||
  1873. list_empty(&mReq->queue) || list_empty(&mEp->qh[mEp->dir].queue))
  1874. return -EINVAL;
  1875. spin_lock_irqsave(mEp->lock, flags);
  1876. dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
  1877. if (mReq->req.status == -EALREADY)
  1878. _hardware_dequeue(mEp, mReq);
  1879. /* pop request */
  1880. list_del_init(&mReq->queue);
  1881. req->status = -ECONNRESET;
  1882. if (mReq->req.complete != NULL) {
  1883. spin_unlock(mEp->lock);
  1884. mReq->req.complete(&mEp->ep, &mReq->req);
  1885. spin_lock(mEp->lock);
  1886. }
  1887. spin_unlock_irqrestore(mEp->lock, flags);
  1888. return 0;
  1889. }
  1890. /**
  1891. * ep_set_halt: sets the endpoint halt feature
  1892. *
  1893. * Check usb_ep_set_halt() at "usb_gadget.h" for details
  1894. */
  1895. static int ep_set_halt(struct usb_ep *ep, int value)
  1896. {
  1897. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1898. int direction, retval = 0;
  1899. unsigned long flags;
  1900. trace("%p, %i", ep, value);
  1901. if (ep == NULL || mEp->desc == NULL)
  1902. return -EINVAL;
  1903. spin_lock_irqsave(mEp->lock, flags);
  1904. #ifndef STALL_IN
  1905. /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
  1906. if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
  1907. !list_empty(&mEp->qh[mEp->dir].queue)) {
  1908. spin_unlock_irqrestore(mEp->lock, flags);
  1909. return -EAGAIN;
  1910. }
  1911. #endif
  1912. direction = mEp->dir;
  1913. do {
  1914. dbg_event(_usb_addr(mEp), "HALT", value);
  1915. retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
  1916. if (!value)
  1917. mEp->wedge = 0;
  1918. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1919. mEp->dir = (mEp->dir == TX) ? RX : TX;
  1920. } while (mEp->dir != direction);
  1921. spin_unlock_irqrestore(mEp->lock, flags);
  1922. return retval;
  1923. }
  1924. /**
  1925. * ep_set_wedge: sets the halt feature and ignores clear requests
  1926. *
  1927. * Check usb_ep_set_wedge() at "usb_gadget.h" for details
  1928. */
  1929. static int ep_set_wedge(struct usb_ep *ep)
  1930. {
  1931. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1932. unsigned long flags;
  1933. trace("%p", ep);
  1934. if (ep == NULL || mEp->desc == NULL)
  1935. return -EINVAL;
  1936. spin_lock_irqsave(mEp->lock, flags);
  1937. dbg_event(_usb_addr(mEp), "WEDGE", 0);
  1938. mEp->wedge = 1;
  1939. spin_unlock_irqrestore(mEp->lock, flags);
  1940. return usb_ep_set_halt(ep);
  1941. }
  1942. /**
  1943. * ep_fifo_flush: flushes contents of a fifo
  1944. *
  1945. * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
  1946. */
  1947. static void ep_fifo_flush(struct usb_ep *ep)
  1948. {
  1949. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1950. unsigned long flags;
  1951. trace("%p", ep);
  1952. if (ep == NULL) {
  1953. err("%02X: -EINVAL", _usb_addr(mEp));
  1954. return;
  1955. }
  1956. spin_lock_irqsave(mEp->lock, flags);
  1957. dbg_event(_usb_addr(mEp), "FFLUSH", 0);
  1958. hw_ep_flush(mEp->num, mEp->dir);
  1959. spin_unlock_irqrestore(mEp->lock, flags);
  1960. }
  1961. /**
  1962. * Endpoint-specific part of the API to the USB controller hardware
  1963. * Check "usb_gadget.h" for details
  1964. */
  1965. static const struct usb_ep_ops usb_ep_ops = {
  1966. .enable = ep_enable,
  1967. .disable = ep_disable,
  1968. .alloc_request = ep_alloc_request,
  1969. .free_request = ep_free_request,
  1970. .queue = ep_queue,
  1971. .dequeue = ep_dequeue,
  1972. .set_halt = ep_set_halt,
  1973. .set_wedge = ep_set_wedge,
  1974. .fifo_flush = ep_fifo_flush,
  1975. };
  1976. /******************************************************************************
  1977. * GADGET block
  1978. *****************************************************************************/
  1979. static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
  1980. {
  1981. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1982. unsigned long flags;
  1983. int gadget_ready = 0;
  1984. if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
  1985. return -EOPNOTSUPP;
  1986. spin_lock_irqsave(udc->lock, flags);
  1987. udc->vbus_active = is_active;
  1988. if (udc->driver)
  1989. gadget_ready = 1;
  1990. spin_unlock_irqrestore(udc->lock, flags);
  1991. if (gadget_ready) {
  1992. if (is_active) {
  1993. pm_runtime_get_sync(&_gadget->dev);
  1994. hw_device_reset(udc);
  1995. hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
  1996. } else {
  1997. hw_device_state(0);
  1998. if (udc->udc_driver->notify_event)
  1999. udc->udc_driver->notify_event(udc,
  2000. CI13XXX_CONTROLLER_STOPPED_EVENT);
  2001. _gadget_stop_activity(&udc->gadget);
  2002. pm_runtime_put_sync(&_gadget->dev);
  2003. }
  2004. }
  2005. return 0;
  2006. }
  2007. /**
  2008. * Device operations part of the API to the USB controller hardware,
  2009. * which don't involve endpoints (or i/o)
  2010. * Check "usb_gadget.h" for details
  2011. */
  2012. static const struct usb_gadget_ops usb_gadget_ops = {
  2013. .vbus_session = ci13xxx_vbus_session,
  2014. };
  2015. /**
  2016. * usb_gadget_probe_driver: register a gadget driver
  2017. * @driver: the driver being registered
  2018. * @bind: the driver's bind callback
  2019. *
  2020. * Check usb_gadget_probe_driver() at <linux/usb/gadget.h> for details.
  2021. * Interrupts are enabled here.
  2022. */
  2023. int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
  2024. int (*bind)(struct usb_gadget *))
  2025. {
  2026. struct ci13xxx *udc = _udc;
  2027. unsigned long i, k, flags;
  2028. int retval = -ENOMEM;
  2029. trace("%p", driver);
  2030. if (driver == NULL ||
  2031. bind == NULL ||
  2032. driver->setup == NULL ||
  2033. driver->disconnect == NULL ||
  2034. driver->suspend == NULL ||
  2035. driver->resume == NULL)
  2036. return -EINVAL;
  2037. else if (udc == NULL)
  2038. return -ENODEV;
  2039. else if (udc->driver != NULL)
  2040. return -EBUSY;
  2041. /* alloc resources */
  2042. udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
  2043. sizeof(struct ci13xxx_qh),
  2044. 64, CI13XXX_PAGE_SIZE);
  2045. if (udc->qh_pool == NULL)
  2046. return -ENOMEM;
  2047. udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
  2048. sizeof(struct ci13xxx_td),
  2049. 64, CI13XXX_PAGE_SIZE);
  2050. if (udc->td_pool == NULL) {
  2051. dma_pool_destroy(udc->qh_pool);
  2052. udc->qh_pool = NULL;
  2053. return -ENOMEM;
  2054. }
  2055. spin_lock_irqsave(udc->lock, flags);
  2056. info("hw_ep_max = %d", hw_ep_max);
  2057. udc->driver = driver;
  2058. udc->gadget.dev.driver = NULL;
  2059. retval = 0;
  2060. for (i = 0; i < hw_ep_max; i++) {
  2061. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  2062. scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
  2063. mEp->lock = udc->lock;
  2064. mEp->device = &udc->gadget.dev;
  2065. mEp->td_pool = udc->td_pool;
  2066. mEp->ep.name = mEp->name;
  2067. mEp->ep.ops = &usb_ep_ops;
  2068. mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
  2069. /* this allocation cannot be random */
  2070. for (k = RX; k <= TX; k++) {
  2071. INIT_LIST_HEAD(&mEp->qh[k].queue);
  2072. spin_unlock_irqrestore(udc->lock, flags);
  2073. mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
  2074. GFP_KERNEL,
  2075. &mEp->qh[k].dma);
  2076. spin_lock_irqsave(udc->lock, flags);
  2077. if (mEp->qh[k].ptr == NULL)
  2078. retval = -ENOMEM;
  2079. else
  2080. memset(mEp->qh[k].ptr, 0,
  2081. sizeof(*mEp->qh[k].ptr));
  2082. }
  2083. if (i == 0)
  2084. udc->gadget.ep0 = &mEp->ep;
  2085. else
  2086. list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
  2087. }
  2088. if (retval)
  2089. goto done;
  2090. /* bind gadget */
  2091. driver->driver.bus = NULL;
  2092. udc->gadget.dev.driver = &driver->driver;
  2093. spin_unlock_irqrestore(udc->lock, flags);
  2094. retval = bind(&udc->gadget); /* MAY SLEEP */
  2095. spin_lock_irqsave(udc->lock, flags);
  2096. if (retval) {
  2097. udc->gadget.dev.driver = NULL;
  2098. goto done;
  2099. }
  2100. pm_runtime_get_sync(&udc->gadget.dev);
  2101. if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
  2102. if (udc->vbus_active) {
  2103. if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
  2104. hw_device_reset(udc);
  2105. } else {
  2106. pm_runtime_put_sync(&udc->gadget.dev);
  2107. goto done;
  2108. }
  2109. }
  2110. retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
  2111. if (retval)
  2112. pm_runtime_put_sync(&udc->gadget.dev);
  2113. done:
  2114. spin_unlock_irqrestore(udc->lock, flags);
  2115. if (retval)
  2116. usb_gadget_unregister_driver(driver);
  2117. return retval;
  2118. }
  2119. EXPORT_SYMBOL(usb_gadget_probe_driver);
  2120. /**
  2121. * usb_gadget_unregister_driver: unregister a gadget driver
  2122. *
  2123. * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
  2124. */
  2125. int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
  2126. {
  2127. struct ci13xxx *udc = _udc;
  2128. unsigned long i, k, flags;
  2129. trace("%p", driver);
  2130. if (driver == NULL ||
  2131. driver->unbind == NULL ||
  2132. driver->setup == NULL ||
  2133. driver->disconnect == NULL ||
  2134. driver->suspend == NULL ||
  2135. driver->resume == NULL ||
  2136. driver != udc->driver)
  2137. return -EINVAL;
  2138. spin_lock_irqsave(udc->lock, flags);
  2139. if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
  2140. udc->vbus_active) {
  2141. hw_device_state(0);
  2142. if (udc->udc_driver->notify_event)
  2143. udc->udc_driver->notify_event(udc,
  2144. CI13XXX_CONTROLLER_STOPPED_EVENT);
  2145. _gadget_stop_activity(&udc->gadget);
  2146. pm_runtime_put(&udc->gadget.dev);
  2147. }
  2148. /* unbind gadget */
  2149. spin_unlock_irqrestore(udc->lock, flags);
  2150. driver->unbind(&udc->gadget); /* MAY SLEEP */
  2151. spin_lock_irqsave(udc->lock, flags);
  2152. udc->gadget.dev.driver = NULL;
  2153. /* free resources */
  2154. for (i = 0; i < hw_ep_max; i++) {
  2155. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  2156. if (i == 0)
  2157. udc->gadget.ep0 = NULL;
  2158. else if (!list_empty(&mEp->ep.ep_list))
  2159. list_del_init(&mEp->ep.ep_list);
  2160. for (k = RX; k <= TX; k++)
  2161. if (mEp->qh[k].ptr != NULL)
  2162. dma_pool_free(udc->qh_pool,
  2163. mEp->qh[k].ptr, mEp->qh[k].dma);
  2164. }
  2165. udc->driver = NULL;
  2166. spin_unlock_irqrestore(udc->lock, flags);
  2167. if (udc->td_pool != NULL) {
  2168. dma_pool_destroy(udc->td_pool);
  2169. udc->td_pool = NULL;
  2170. }
  2171. if (udc->qh_pool != NULL) {
  2172. dma_pool_destroy(udc->qh_pool);
  2173. udc->qh_pool = NULL;
  2174. }
  2175. return 0;
  2176. }
  2177. EXPORT_SYMBOL(usb_gadget_unregister_driver);
  2178. /******************************************************************************
  2179. * BUS block
  2180. *****************************************************************************/
  2181. /**
  2182. * udc_irq: global interrupt handler
  2183. *
  2184. * This function returns IRQ_HANDLED if the IRQ has been handled
  2185. * It locks access to registers
  2186. */
  2187. static irqreturn_t udc_irq(void)
  2188. {
  2189. struct ci13xxx *udc = _udc;
  2190. irqreturn_t retval;
  2191. u32 intr;
  2192. trace();
  2193. if (udc == NULL) {
  2194. err("ENODEV");
  2195. return IRQ_HANDLED;
  2196. }
  2197. spin_lock(udc->lock);
  2198. if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
  2199. if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
  2200. USBMODE_CM_DEVICE) {
  2201. spin_unlock(udc->lock);
  2202. return IRQ_NONE;
  2203. }
  2204. }
  2205. intr = hw_test_and_clear_intr_active();
  2206. if (intr) {
  2207. isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
  2208. isr_statistics.hndl.idx &= ISR_MASK;
  2209. isr_statistics.hndl.cnt++;
  2210. /* order defines priority - do NOT change it */
  2211. if (USBi_URI & intr) {
  2212. isr_statistics.uri++;
  2213. isr_reset_handler(udc);
  2214. }
  2215. if (USBi_PCI & intr) {
  2216. isr_statistics.pci++;
  2217. udc->gadget.speed = hw_port_is_high_speed() ?
  2218. USB_SPEED_HIGH : USB_SPEED_FULL;
  2219. }
  2220. if (USBi_UEI & intr)
  2221. isr_statistics.uei++;
  2222. if (USBi_UI & intr) {
  2223. isr_statistics.ui++;
  2224. isr_tr_complete_handler(udc);
  2225. }
  2226. if (USBi_SLI & intr)
  2227. isr_statistics.sli++;
  2228. retval = IRQ_HANDLED;
  2229. } else {
  2230. isr_statistics.none++;
  2231. retval = IRQ_NONE;
  2232. }
  2233. spin_unlock(udc->lock);
  2234. return retval;
  2235. }
  2236. /**
  2237. * udc_release: driver release function
  2238. * @dev: device
  2239. *
  2240. * Currently does nothing
  2241. */
  2242. static void udc_release(struct device *dev)
  2243. {
  2244. trace("%p", dev);
  2245. if (dev == NULL)
  2246. err("EINVAL");
  2247. }
  2248. /**
  2249. * udc_probe: parent probe must call this to initialize UDC
  2250. * @dev: parent device
  2251. * @regs: registers base address
  2252. * @name: driver name
  2253. *
  2254. * This function returns an error code
  2255. * No interrupts active, the IRQ has not been requested yet
  2256. * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
  2257. */
  2258. static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
  2259. void __iomem *regs)
  2260. {
  2261. struct ci13xxx *udc;
  2262. int retval = 0;
  2263. trace("%p, %p, %p", dev, regs, name);
  2264. if (dev == NULL || regs == NULL || driver == NULL ||
  2265. driver->name == NULL)
  2266. return -EINVAL;
  2267. udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
  2268. if (udc == NULL)
  2269. return -ENOMEM;
  2270. udc->lock = &udc_lock;
  2271. udc->regs = regs;
  2272. udc->udc_driver = driver;
  2273. udc->gadget.ops = &usb_gadget_ops;
  2274. udc->gadget.speed = USB_SPEED_UNKNOWN;
  2275. udc->gadget.is_dualspeed = 1;
  2276. udc->gadget.is_otg = 0;
  2277. udc->gadget.name = driver->name;
  2278. INIT_LIST_HEAD(&udc->gadget.ep_list);
  2279. udc->gadget.ep0 = NULL;
  2280. dev_set_name(&udc->gadget.dev, "gadget");
  2281. udc->gadget.dev.dma_mask = dev->dma_mask;
  2282. udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
  2283. udc->gadget.dev.parent = dev;
  2284. udc->gadget.dev.release = udc_release;
  2285. retval = hw_device_init(regs);
  2286. if (retval < 0)
  2287. goto free_udc;
  2288. udc->transceiver = otg_get_transceiver();
  2289. if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
  2290. if (udc->transceiver == NULL) {
  2291. retval = -ENODEV;
  2292. goto free_udc;
  2293. }
  2294. }
  2295. if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
  2296. retval = hw_device_reset(udc);
  2297. if (retval)
  2298. goto put_transceiver;
  2299. }
  2300. retval = device_register(&udc->gadget.dev);
  2301. if (retval) {
  2302. put_device(&udc->gadget.dev);
  2303. goto put_transceiver;
  2304. }
  2305. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2306. retval = dbg_create_files(&udc->gadget.dev);
  2307. #endif
  2308. if (retval)
  2309. goto unreg_device;
  2310. if (udc->transceiver) {
  2311. retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
  2312. if (retval)
  2313. goto remove_dbg;
  2314. }
  2315. pm_runtime_no_callbacks(&udc->gadget.dev);
  2316. pm_runtime_enable(&udc->gadget.dev);
  2317. _udc = udc;
  2318. return retval;
  2319. err("error = %i", retval);
  2320. remove_dbg:
  2321. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2322. dbg_remove_files(&udc->gadget.dev);
  2323. #endif
  2324. unreg_device:
  2325. device_unregister(&udc->gadget.dev);
  2326. put_transceiver:
  2327. if (udc->transceiver)
  2328. otg_put_transceiver(udc->transceiver);
  2329. free_udc:
  2330. kfree(udc);
  2331. _udc = NULL;
  2332. return retval;
  2333. }
  2334. /**
  2335. * udc_remove: parent remove must call this to remove UDC
  2336. *
  2337. * No interrupts active, the IRQ has been released
  2338. */
  2339. static void udc_remove(void)
  2340. {
  2341. struct ci13xxx *udc = _udc;
  2342. if (udc == NULL) {
  2343. err("EINVAL");
  2344. return;
  2345. }
  2346. if (udc->transceiver) {
  2347. otg_set_peripheral(udc->transceiver, &udc->gadget);
  2348. otg_put_transceiver(udc->transceiver);
  2349. }
  2350. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2351. dbg_remove_files(&udc->gadget.dev);
  2352. #endif
  2353. device_unregister(&udc->gadget.dev);
  2354. kfree(udc);
  2355. _udc = NULL;
  2356. }