lpc32xx_udc.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504
  1. /*
  2. * USB Gadget driver for LPC32xx
  3. *
  4. * Authors:
  5. * Kevin Wells <kevin.wells@nxp.com>
  6. * Mike James
  7. * Roland Stigge <stigge@antcom.de>
  8. *
  9. * Copyright (C) 2006 Philips Semiconductors
  10. * Copyright (C) 2009 NXP Semiconductors
  11. * Copyright (C) 2012 Roland Stigge
  12. *
  13. * Note: This driver is based on original work done by Mike James for
  14. * the LPC3180.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with this program; if not, write to the Free Software
  28. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/delay.h>
  34. #include <linux/ioport.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/init.h>
  38. #include <linux/list.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/proc_fs.h>
  41. #include <linux/clk.h>
  42. #include <linux/usb/ch9.h>
  43. #include <linux/usb/gadget.h>
  44. #include <linux/i2c.h>
  45. #include <linux/kthread.h>
  46. #include <linux/freezer.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/dmapool.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/of.h>
  51. #include <linux/usb/isp1301.h>
  52. #include <asm/byteorder.h>
  53. #include <mach/hardware.h>
  54. #include <linux/io.h>
  55. #include <asm/irq.h>
  56. #include <asm/system.h>
  57. #include <mach/platform.h>
  58. #include <mach/irqs.h>
  59. #include <mach/board.h>
  60. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  61. #include <linux/debugfs.h>
  62. #include <linux/seq_file.h>
  63. #endif
  64. /*
  65. * USB device configuration structure
  66. */
  67. typedef void (*usc_chg_event)(int);
  68. struct lpc32xx_usbd_cfg {
  69. int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
  70. usc_chg_event conn_chgb; /* Connection change event (optional) */
  71. usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
  72. usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
  73. };
  74. /*
  75. * controller driver data structures
  76. */
  77. /* 16 endpoints (not to be confused with 32 hardware endpoints) */
  78. #define NUM_ENDPOINTS 16
  79. /*
  80. * IRQ indices make reading the code a little easier
  81. */
  82. #define IRQ_USB_LP 0
  83. #define IRQ_USB_HP 1
  84. #define IRQ_USB_DEVDMA 2
  85. #define IRQ_USB_ATX 3
  86. #define EP_OUT 0 /* RX (from host) */
  87. #define EP_IN 1 /* TX (to host) */
  88. /* Returns the interrupt mask for the selected hardware endpoint */
  89. #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
  90. #define EP_INT_TYPE 0
  91. #define EP_ISO_TYPE 1
  92. #define EP_BLK_TYPE 2
  93. #define EP_CTL_TYPE 3
  94. /* EP0 states */
  95. #define WAIT_FOR_SETUP 0 /* Wait for setup packet */
  96. #define DATA_IN 1 /* Expect dev->host transfer */
  97. #define DATA_OUT 2 /* Expect host->dev transfer */
  98. /* DD (DMA Descriptor) structure, requires word alignment, this is already
  99. * defined in the LPC32XX USB device header file, but this version is slightly
  100. * modified to tag some work data with each DMA descriptor. */
  101. struct lpc32xx_usbd_dd_gad {
  102. u32 dd_next_phy;
  103. u32 dd_setup;
  104. u32 dd_buffer_addr;
  105. u32 dd_status;
  106. u32 dd_iso_ps_mem_addr;
  107. u32 this_dma;
  108. u32 iso_status[6]; /* 5 spare */
  109. u32 dd_next_v;
  110. };
  111. /*
  112. * Logical endpoint structure
  113. */
  114. struct lpc32xx_ep {
  115. struct usb_ep ep;
  116. struct list_head queue;
  117. struct lpc32xx_udc *udc;
  118. u32 hwep_num_base; /* Physical hardware EP */
  119. u32 hwep_num; /* Maps to hardware endpoint */
  120. u32 maxpacket;
  121. u32 lep;
  122. bool is_in;
  123. bool req_pending;
  124. u32 eptype;
  125. u32 totalints;
  126. bool wedge;
  127. const struct usb_endpoint_descriptor *desc;
  128. };
  129. /*
  130. * Common UDC structure
  131. */
  132. struct lpc32xx_udc {
  133. struct usb_gadget gadget;
  134. struct usb_gadget_driver *driver;
  135. struct platform_device *pdev;
  136. struct device *dev;
  137. struct dentry *pde;
  138. spinlock_t lock;
  139. struct i2c_client *isp1301_i2c_client;
  140. /* Board and device specific */
  141. struct lpc32xx_usbd_cfg *board;
  142. u32 io_p_start;
  143. u32 io_p_size;
  144. void __iomem *udp_baseaddr;
  145. int udp_irq[4];
  146. struct clk *usb_pll_clk;
  147. struct clk *usb_slv_clk;
  148. struct clk *usb_otg_clk;
  149. /* DMA support */
  150. u32 *udca_v_base;
  151. u32 udca_p_base;
  152. struct dma_pool *dd_cache;
  153. /* Common EP and control data */
  154. u32 enabled_devints;
  155. u32 enabled_hwepints;
  156. u32 dev_status;
  157. u32 realized_eps;
  158. /* VBUS detection, pullup, and power flags */
  159. u8 vbus;
  160. u8 last_vbus;
  161. int pullup;
  162. int poweron;
  163. /* Work queues related to I2C support */
  164. struct work_struct pullup_job;
  165. struct work_struct vbus_job;
  166. struct work_struct power_job;
  167. /* USB device peripheral - various */
  168. struct lpc32xx_ep ep[NUM_ENDPOINTS];
  169. bool enabled;
  170. bool clocked;
  171. bool suspended;
  172. bool selfpowered;
  173. int ep0state;
  174. atomic_t enabled_ep_cnt;
  175. wait_queue_head_t ep_disable_wait_queue;
  176. };
  177. /*
  178. * Endpoint request
  179. */
  180. struct lpc32xx_request {
  181. struct usb_request req;
  182. struct list_head queue;
  183. struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
  184. bool mapped;
  185. bool send_zlp;
  186. };
  187. static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
  188. {
  189. return container_of(g, struct lpc32xx_udc, gadget);
  190. }
  191. #define ep_dbg(epp, fmt, arg...) \
  192. dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  193. #define ep_err(epp, fmt, arg...) \
  194. dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  195. #define ep_info(epp, fmt, arg...) \
  196. dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  197. #define ep_warn(epp, fmt, arg...) \
  198. dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
  199. #define UDCA_BUFF_SIZE (128)
  200. /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
  201. * be replaced with an inremap()ed pointer
  202. * */
  203. #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
  204. /* USB_CTRL bit defines */
  205. #define USB_SLAVE_HCLK_EN (1 << 24)
  206. #define USB_HOST_NEED_CLK_EN (1 << 21)
  207. #define USB_DEV_NEED_CLK_EN (1 << 22)
  208. /**********************************************************************
  209. * USB device controller register offsets
  210. **********************************************************************/
  211. #define USBD_DEVINTST(x) ((x) + 0x200)
  212. #define USBD_DEVINTEN(x) ((x) + 0x204)
  213. #define USBD_DEVINTCLR(x) ((x) + 0x208)
  214. #define USBD_DEVINTSET(x) ((x) + 0x20C)
  215. #define USBD_CMDCODE(x) ((x) + 0x210)
  216. #define USBD_CMDDATA(x) ((x) + 0x214)
  217. #define USBD_RXDATA(x) ((x) + 0x218)
  218. #define USBD_TXDATA(x) ((x) + 0x21C)
  219. #define USBD_RXPLEN(x) ((x) + 0x220)
  220. #define USBD_TXPLEN(x) ((x) + 0x224)
  221. #define USBD_CTRL(x) ((x) + 0x228)
  222. #define USBD_DEVINTPRI(x) ((x) + 0x22C)
  223. #define USBD_EPINTST(x) ((x) + 0x230)
  224. #define USBD_EPINTEN(x) ((x) + 0x234)
  225. #define USBD_EPINTCLR(x) ((x) + 0x238)
  226. #define USBD_EPINTSET(x) ((x) + 0x23C)
  227. #define USBD_EPINTPRI(x) ((x) + 0x240)
  228. #define USBD_REEP(x) ((x) + 0x244)
  229. #define USBD_EPIND(x) ((x) + 0x248)
  230. #define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
  231. /* DMA support registers only below */
  232. /* Set, clear, or get enabled state of the DMA request status. If
  233. * enabled, an IN or OUT token will start a DMA transfer for the EP */
  234. #define USBD_DMARST(x) ((x) + 0x250)
  235. #define USBD_DMARCLR(x) ((x) + 0x254)
  236. #define USBD_DMARSET(x) ((x) + 0x258)
  237. /* DMA UDCA head pointer */
  238. #define USBD_UDCAH(x) ((x) + 0x280)
  239. /* EP DMA status, enable, and disable. This is used to specifically
  240. * enabled or disable DMA for a specific EP */
  241. #define USBD_EPDMAST(x) ((x) + 0x284)
  242. #define USBD_EPDMAEN(x) ((x) + 0x288)
  243. #define USBD_EPDMADIS(x) ((x) + 0x28C)
  244. /* DMA master interrupts enable and pending interrupts */
  245. #define USBD_DMAINTST(x) ((x) + 0x290)
  246. #define USBD_DMAINTEN(x) ((x) + 0x294)
  247. /* DMA end of transfer interrupt enable, disable, status */
  248. #define USBD_EOTINTST(x) ((x) + 0x2A0)
  249. #define USBD_EOTINTCLR(x) ((x) + 0x2A4)
  250. #define USBD_EOTINTSET(x) ((x) + 0x2A8)
  251. /* New DD request interrupt enable, disable, status */
  252. #define USBD_NDDRTINTST(x) ((x) + 0x2AC)
  253. #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
  254. #define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
  255. /* DMA error interrupt enable, disable, status */
  256. #define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
  257. #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
  258. #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
  259. /**********************************************************************
  260. * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
  261. * USBD_DEVINTPRI register definitions
  262. **********************************************************************/
  263. #define USBD_ERR_INT (1 << 9)
  264. #define USBD_EP_RLZED (1 << 8)
  265. #define USBD_TXENDPKT (1 << 7)
  266. #define USBD_RXENDPKT (1 << 6)
  267. #define USBD_CDFULL (1 << 5)
  268. #define USBD_CCEMPTY (1 << 4)
  269. #define USBD_DEV_STAT (1 << 3)
  270. #define USBD_EP_SLOW (1 << 2)
  271. #define USBD_EP_FAST (1 << 1)
  272. #define USBD_FRAME (1 << 0)
  273. /**********************************************************************
  274. * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
  275. * USBD_EPINTPRI register definitions
  276. **********************************************************************/
  277. /* End point selection macro (RX) */
  278. #define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
  279. /* End point selection macro (TX) */
  280. #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
  281. /**********************************************************************
  282. * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
  283. * USBD_EPDMAEN/USBD_EPDMADIS/
  284. * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
  285. * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
  286. * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
  287. * register definitions
  288. **********************************************************************/
  289. /* Endpoint selection macro */
  290. #define USBD_EP_SEL(e) (1 << (e))
  291. /**********************************************************************
  292. * SBD_DMAINTST/USBD_DMAINTEN
  293. **********************************************************************/
  294. #define USBD_SYS_ERR_INT (1 << 2)
  295. #define USBD_NEW_DD_INT (1 << 1)
  296. #define USBD_EOT_INT (1 << 0)
  297. /**********************************************************************
  298. * USBD_RXPLEN register definitions
  299. **********************************************************************/
  300. #define USBD_PKT_RDY (1 << 11)
  301. #define USBD_DV (1 << 10)
  302. #define USBD_PK_LEN_MASK 0x3FF
  303. /**********************************************************************
  304. * USBD_CTRL register definitions
  305. **********************************************************************/
  306. #define USBD_LOG_ENDPOINT(e) ((e) << 2)
  307. #define USBD_WR_EN (1 << 1)
  308. #define USBD_RD_EN (1 << 0)
  309. /**********************************************************************
  310. * USBD_CMDCODE register definitions
  311. **********************************************************************/
  312. #define USBD_CMD_CODE(c) ((c) << 16)
  313. #define USBD_CMD_PHASE(p) ((p) << 8)
  314. /**********************************************************************
  315. * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
  316. **********************************************************************/
  317. #define USBD_DMAEP(e) (1 << (e))
  318. /* DD (DMA Descriptor) structure, requires word alignment */
  319. struct lpc32xx_usbd_dd {
  320. u32 *dd_next;
  321. u32 dd_setup;
  322. u32 dd_buffer_addr;
  323. u32 dd_status;
  324. u32 dd_iso_ps_mem_addr;
  325. };
  326. /* dd_setup bit defines */
  327. #define DD_SETUP_ATLE_DMA_MODE 0x01
  328. #define DD_SETUP_NEXT_DD_VALID 0x04
  329. #define DD_SETUP_ISO_EP 0x10
  330. #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
  331. #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
  332. /* dd_status bit defines */
  333. #define DD_STATUS_DD_RETIRED 0x01
  334. #define DD_STATUS_STS_MASK 0x1E
  335. #define DD_STATUS_STS_NS 0x00 /* Not serviced */
  336. #define DD_STATUS_STS_BS 0x02 /* Being serviced */
  337. #define DD_STATUS_STS_NC 0x04 /* Normal completion */
  338. #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
  339. #define DD_STATUS_STS_DOR 0x08 /* Data overrun */
  340. #define DD_STATUS_STS_SE 0x12 /* System error */
  341. #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
  342. #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
  343. #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
  344. #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
  345. #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
  346. /*
  347. *
  348. * Protocol engine bits below
  349. *
  350. */
  351. /* Device Interrupt Bit Definitions */
  352. #define FRAME_INT 0x00000001
  353. #define EP_FAST_INT 0x00000002
  354. #define EP_SLOW_INT 0x00000004
  355. #define DEV_STAT_INT 0x00000008
  356. #define CCEMTY_INT 0x00000010
  357. #define CDFULL_INT 0x00000020
  358. #define RxENDPKT_INT 0x00000040
  359. #define TxENDPKT_INT 0x00000080
  360. #define EP_RLZED_INT 0x00000100
  361. #define ERR_INT 0x00000200
  362. /* Rx & Tx Packet Length Definitions */
  363. #define PKT_LNGTH_MASK 0x000003FF
  364. #define PKT_DV 0x00000400
  365. #define PKT_RDY 0x00000800
  366. /* USB Control Definitions */
  367. #define CTRL_RD_EN 0x00000001
  368. #define CTRL_WR_EN 0x00000002
  369. /* Command Codes */
  370. #define CMD_SET_ADDR 0x00D00500
  371. #define CMD_CFG_DEV 0x00D80500
  372. #define CMD_SET_MODE 0x00F30500
  373. #define CMD_RD_FRAME 0x00F50500
  374. #define DAT_RD_FRAME 0x00F50200
  375. #define CMD_RD_TEST 0x00FD0500
  376. #define DAT_RD_TEST 0x00FD0200
  377. #define CMD_SET_DEV_STAT 0x00FE0500
  378. #define CMD_GET_DEV_STAT 0x00FE0500
  379. #define DAT_GET_DEV_STAT 0x00FE0200
  380. #define CMD_GET_ERR_CODE 0x00FF0500
  381. #define DAT_GET_ERR_CODE 0x00FF0200
  382. #define CMD_RD_ERR_STAT 0x00FB0500
  383. #define DAT_RD_ERR_STAT 0x00FB0200
  384. #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
  385. #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
  386. #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
  387. #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
  388. #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
  389. #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
  390. #define CMD_CLR_BUF 0x00F20500
  391. #define DAT_CLR_BUF 0x00F20200
  392. #define CMD_VALID_BUF 0x00FA0500
  393. /* Device Address Register Definitions */
  394. #define DEV_ADDR_MASK 0x7F
  395. #define DEV_EN 0x80
  396. /* Device Configure Register Definitions */
  397. #define CONF_DVICE 0x01
  398. /* Device Mode Register Definitions */
  399. #define AP_CLK 0x01
  400. #define INAK_CI 0x02
  401. #define INAK_CO 0x04
  402. #define INAK_II 0x08
  403. #define INAK_IO 0x10
  404. #define INAK_BI 0x20
  405. #define INAK_BO 0x40
  406. /* Device Status Register Definitions */
  407. #define DEV_CON 0x01
  408. #define DEV_CON_CH 0x02
  409. #define DEV_SUS 0x04
  410. #define DEV_SUS_CH 0x08
  411. #define DEV_RST 0x10
  412. /* Error Code Register Definitions */
  413. #define ERR_EC_MASK 0x0F
  414. #define ERR_EA 0x10
  415. /* Error Status Register Definitions */
  416. #define ERR_PID 0x01
  417. #define ERR_UEPKT 0x02
  418. #define ERR_DCRC 0x04
  419. #define ERR_TIMOUT 0x08
  420. #define ERR_EOP 0x10
  421. #define ERR_B_OVRN 0x20
  422. #define ERR_BTSTF 0x40
  423. #define ERR_TGL 0x80
  424. /* Endpoint Select Register Definitions */
  425. #define EP_SEL_F 0x01
  426. #define EP_SEL_ST 0x02
  427. #define EP_SEL_STP 0x04
  428. #define EP_SEL_PO 0x08
  429. #define EP_SEL_EPN 0x10
  430. #define EP_SEL_B_1_FULL 0x20
  431. #define EP_SEL_B_2_FULL 0x40
  432. /* Endpoint Status Register Definitions */
  433. #define EP_STAT_ST 0x01
  434. #define EP_STAT_DA 0x20
  435. #define EP_STAT_RF_MO 0x40
  436. #define EP_STAT_CND_ST 0x80
  437. /* Clear Buffer Register Definitions */
  438. #define CLR_BUF_PO 0x01
  439. /* DMA Interrupt Bit Definitions */
  440. #define EOT_INT 0x01
  441. #define NDD_REQ_INT 0x02
  442. #define SYS_ERR_INT 0x04
  443. #define DRIVER_VERSION "1.03"
  444. static const char driver_name[] = "lpc32xx_udc";
  445. /*
  446. *
  447. * proc interface support
  448. *
  449. */
  450. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  451. static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
  452. static const char debug_filename[] = "driver/udc";
  453. static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
  454. {
  455. struct lpc32xx_request *req;
  456. seq_printf(s, "\n");
  457. seq_printf(s, "%12s, maxpacket %4d %3s",
  458. ep->ep.name, ep->ep.maxpacket,
  459. ep->is_in ? "in" : "out");
  460. seq_printf(s, " type %4s", epnames[ep->eptype]);
  461. seq_printf(s, " ints: %12d", ep->totalints);
  462. if (list_empty(&ep->queue))
  463. seq_printf(s, "\t(queue empty)\n");
  464. else {
  465. list_for_each_entry(req, &ep->queue, queue) {
  466. u32 length = req->req.actual;
  467. seq_printf(s, "\treq %p len %d/%d buf %p\n",
  468. &req->req, length,
  469. req->req.length, req->req.buf);
  470. }
  471. }
  472. }
  473. static int proc_udc_show(struct seq_file *s, void *unused)
  474. {
  475. struct lpc32xx_udc *udc = s->private;
  476. struct lpc32xx_ep *ep;
  477. unsigned long flags;
  478. seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
  479. spin_lock_irqsave(&udc->lock, flags);
  480. seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
  481. udc->vbus ? "present" : "off",
  482. udc->enabled ? (udc->vbus ? "active" : "enabled") :
  483. "disabled",
  484. udc->selfpowered ? "self" : "VBUS",
  485. udc->suspended ? ", suspended" : "",
  486. udc->driver ? udc->driver->driver.name : "(none)");
  487. if (udc->enabled && udc->vbus) {
  488. proc_ep_show(s, &udc->ep[0]);
  489. list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
  490. if (ep->desc)
  491. proc_ep_show(s, ep);
  492. }
  493. }
  494. spin_unlock_irqrestore(&udc->lock, flags);
  495. return 0;
  496. }
  497. static int proc_udc_open(struct inode *inode, struct file *file)
  498. {
  499. return single_open(file, proc_udc_show, PDE(inode)->data);
  500. }
  501. static const struct file_operations proc_ops = {
  502. .owner = THIS_MODULE,
  503. .open = proc_udc_open,
  504. .read = seq_read,
  505. .llseek = seq_lseek,
  506. .release = single_release,
  507. };
  508. static void create_debug_file(struct lpc32xx_udc *udc)
  509. {
  510. udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
  511. }
  512. static void remove_debug_file(struct lpc32xx_udc *udc)
  513. {
  514. if (udc->pde)
  515. debugfs_remove(udc->pde);
  516. }
  517. #else
  518. static inline void create_debug_file(struct lpc32xx_udc *udc) {}
  519. static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
  520. #endif
  521. /* Primary initialization sequence for the ISP1301 transceiver */
  522. static void isp1301_udc_configure(struct lpc32xx_udc *udc)
  523. {
  524. /* LPC32XX only supports DAT_SE0 USB mode */
  525. /* This sequence is important */
  526. /* Disable transparent UART mode first */
  527. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  528. (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
  529. MC1_UART_EN);
  530. /* Set full speed and SE0 mode */
  531. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  532. (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  533. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  534. ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
  535. /*
  536. * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
  537. */
  538. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  539. (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  540. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  541. ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL));
  542. /* Driver VBUS_DRV high or low depending on board setup */
  543. if (udc->board->vbus_drv_pol != 0)
  544. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  545. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
  546. else
  547. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  548. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  549. OTG1_VBUS_DRV);
  550. /* Bi-directional mode with suspend control
  551. * Enable both pulldowns for now - the pullup will be enable when VBUS
  552. * is detected */
  553. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  554. (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  555. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  556. ISP1301_I2C_OTG_CONTROL_1,
  557. (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
  558. /* Discharge VBUS (just in case) */
  559. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  560. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
  561. msleep(1);
  562. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  563. (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
  564. OTG1_VBUS_DISCHRG);
  565. /* Clear and enable VBUS high edge interrupt */
  566. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  567. ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  568. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  569. ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  570. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  571. ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD);
  572. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  573. ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  574. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  575. ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
  576. /* Enable usb_need_clk clock after transceiver is initialized */
  577. writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
  578. dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
  579. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
  580. dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
  581. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02));
  582. dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
  583. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
  584. }
  585. /* Enables or disables the USB device pullup via the ISP1301 transceiver */
  586. static void isp1301_pullup_set(struct lpc32xx_udc *udc)
  587. {
  588. if (udc->pullup)
  589. /* Enable pullup for bus signalling */
  590. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  591. ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
  592. else
  593. /* Enable pullup for bus signalling */
  594. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  595. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  596. OTG1_DP_PULLUP);
  597. }
  598. static void pullup_work(struct work_struct *work)
  599. {
  600. struct lpc32xx_udc *udc =
  601. container_of(work, struct lpc32xx_udc, pullup_job);
  602. isp1301_pullup_set(udc);
  603. }
  604. static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
  605. int block)
  606. {
  607. if (en_pullup == udc->pullup)
  608. return;
  609. udc->pullup = en_pullup;
  610. if (block)
  611. isp1301_pullup_set(udc);
  612. else
  613. /* defer slow i2c pull up setting */
  614. schedule_work(&udc->pullup_job);
  615. }
  616. #ifdef CONFIG_PM
  617. /* Powers up or down the ISP1301 transceiver */
  618. static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
  619. {
  620. if (enable != 0)
  621. /* Power up ISP1301 - this ISP1301 will automatically wakeup
  622. when VBUS is detected */
  623. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  624. ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
  625. MC2_GLOBAL_PWR_DN);
  626. else
  627. /* Power down ISP1301 */
  628. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  629. ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
  630. }
  631. static void power_work(struct work_struct *work)
  632. {
  633. struct lpc32xx_udc *udc =
  634. container_of(work, struct lpc32xx_udc, power_job);
  635. isp1301_set_powerstate(udc, udc->poweron);
  636. }
  637. #endif
  638. /*
  639. *
  640. * USB protocol engine command/data read/write helper functions
  641. *
  642. */
  643. /* Issues a single command to the USB device state machine */
  644. static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
  645. {
  646. u32 pass = 0;
  647. int to;
  648. /* EP may lock on CLRI if this read isn't done */
  649. u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
  650. (void) tmp;
  651. while (pass == 0) {
  652. writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
  653. /* Write command code */
  654. writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
  655. to = 10000;
  656. while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
  657. USBD_CCEMPTY) == 0) && (to > 0)) {
  658. to--;
  659. }
  660. if (to > 0)
  661. pass = 1;
  662. cpu_relax();
  663. }
  664. }
  665. /* Issues 2 commands (or command and data) to the USB device state machine */
  666. static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
  667. u32 data)
  668. {
  669. udc_protocol_cmd_w(udc, cmd);
  670. udc_protocol_cmd_w(udc, data);
  671. }
  672. /* Issues a single command to the USB device state machine and reads
  673. * response data */
  674. static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
  675. {
  676. u32 tmp;
  677. int to = 1000;
  678. /* Write a command and read data from the protocol engine */
  679. writel((USBD_CDFULL | USBD_CCEMPTY),
  680. USBD_DEVINTCLR(udc->udp_baseaddr));
  681. /* Write command code */
  682. udc_protocol_cmd_w(udc, cmd);
  683. tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
  684. while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
  685. && (to > 0))
  686. to--;
  687. if (!to)
  688. dev_dbg(udc->dev,
  689. "Protocol engine didn't receive response (CDFULL)\n");
  690. return readl(USBD_CMDDATA(udc->udp_baseaddr));
  691. }
  692. /*
  693. *
  694. * USB device interrupt mask support functions
  695. *
  696. */
  697. /* Enable one or more USB device interrupts */
  698. static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
  699. {
  700. udc->enabled_devints |= devmask;
  701. writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
  702. }
  703. /* Disable one or more USB device interrupts */
  704. static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
  705. {
  706. udc->enabled_devints &= ~mask;
  707. writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
  708. }
  709. /* Clear one or more USB device interrupts */
  710. static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
  711. {
  712. writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
  713. }
  714. /*
  715. *
  716. * Endpoint interrupt disable/enable functions
  717. *
  718. */
  719. /* Enable one or more USB endpoint interrupts */
  720. static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  721. {
  722. udc->enabled_hwepints |= (1 << hwep);
  723. writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
  724. }
  725. /* Disable one or more USB endpoint interrupts */
  726. static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  727. {
  728. udc->enabled_hwepints &= ~(1 << hwep);
  729. writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
  730. }
  731. /* Clear one or more USB endpoint interrupts */
  732. static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  733. {
  734. writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
  735. }
  736. /* Enable DMA for the HW channel */
  737. static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
  738. {
  739. writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
  740. }
  741. /* Disable DMA for the HW channel */
  742. static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
  743. {
  744. writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
  745. }
  746. /*
  747. *
  748. * Endpoint realize/unrealize functions
  749. *
  750. */
  751. /* Before an endpoint can be used, it needs to be realized
  752. * in the USB protocol engine - this realizes the endpoint.
  753. * The interrupt (FIFO or DMA) is not enabled with this function */
  754. static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
  755. u32 maxpacket)
  756. {
  757. int to = 1000;
  758. writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
  759. writel(hwep, USBD_EPIND(udc->udp_baseaddr));
  760. udc->realized_eps |= (1 << hwep);
  761. writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
  762. writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
  763. /* Wait until endpoint is realized in hardware */
  764. while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
  765. USBD_EP_RLZED)) && (to > 0))
  766. to--;
  767. if (!to)
  768. dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
  769. writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
  770. }
  771. /* Unrealize an EP */
  772. static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
  773. {
  774. udc->realized_eps &= ~(1 << hwep);
  775. writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
  776. }
  777. /*
  778. *
  779. * Endpoint support functions
  780. *
  781. */
  782. /* Select and clear endpoint interrupt */
  783. static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
  784. {
  785. udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
  786. return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
  787. }
  788. /* Disables the endpoint in the USB protocol engine */
  789. static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
  790. {
  791. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  792. DAT_WR_BYTE(EP_STAT_DA));
  793. }
  794. /* Stalls the endpoint - endpoint will return STALL */
  795. static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
  796. {
  797. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  798. DAT_WR_BYTE(EP_STAT_ST));
  799. }
  800. /* Clear stall or reset endpoint */
  801. static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
  802. {
  803. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  804. DAT_WR_BYTE(0));
  805. }
  806. /* Select an endpoint for endpoint status, clear, validate */
  807. static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
  808. {
  809. udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
  810. }
  811. /*
  812. *
  813. * Endpoint buffer management functions
  814. *
  815. */
  816. /* Clear the current endpoint's buffer */
  817. static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
  818. {
  819. udc_select_hwep(udc, hwep);
  820. udc_protocol_cmd_w(udc, CMD_CLR_BUF);
  821. }
  822. /* Validate the current endpoint's buffer */
  823. static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
  824. {
  825. udc_select_hwep(udc, hwep);
  826. udc_protocol_cmd_w(udc, CMD_VALID_BUF);
  827. }
  828. static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
  829. {
  830. /* Clear EP interrupt */
  831. uda_clear_hwepint(udc, hwep);
  832. return udc_selep_clrint(udc, hwep);
  833. }
  834. /*
  835. *
  836. * USB EP DMA support
  837. *
  838. */
  839. /* Allocate a DMA Descriptor */
  840. static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
  841. {
  842. dma_addr_t dma;
  843. struct lpc32xx_usbd_dd_gad *dd;
  844. dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
  845. udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
  846. if (dd)
  847. dd->this_dma = dma;
  848. return dd;
  849. }
  850. /* Free a DMA Descriptor */
  851. static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
  852. {
  853. dma_pool_free(udc->dd_cache, dd, dd->this_dma);
  854. }
  855. /*
  856. *
  857. * USB setup and shutdown functions
  858. *
  859. */
  860. /* Enables or disables most of the USB system clocks when low power mode is
  861. * needed. Clocks are typically started on a connection event, and disabled
  862. * when a cable is disconnected */
  863. static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
  864. {
  865. if (enable != 0) {
  866. if (udc->clocked)
  867. return;
  868. udc->clocked = 1;
  869. /* 48MHz PLL up */
  870. clk_enable(udc->usb_pll_clk);
  871. /* Enable the USB device clock */
  872. writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
  873. USB_CTRL);
  874. clk_enable(udc->usb_otg_clk);
  875. } else {
  876. if (!udc->clocked)
  877. return;
  878. udc->clocked = 0;
  879. /* Never disable the USB_HCLK during normal operation */
  880. /* 48MHz PLL dpwn */
  881. clk_disable(udc->usb_pll_clk);
  882. /* Disable the USB device clock */
  883. writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
  884. USB_CTRL);
  885. clk_disable(udc->usb_otg_clk);
  886. }
  887. }
  888. /* Set/reset USB device address */
  889. static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
  890. {
  891. /* Address will be latched at the end of the status phase, or
  892. latched immediately if function is called twice */
  893. udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
  894. DAT_WR_BYTE(DEV_EN | addr));
  895. }
  896. /* Setup up a IN request for DMA transfer - this consists of determining the
  897. * list of DMA addresses for the transfer, allocating DMA Descriptors,
  898. * installing the DD into the UDCA, and then enabling the DMA for that EP */
  899. static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  900. {
  901. struct lpc32xx_request *req;
  902. u32 hwep = ep->hwep_num;
  903. ep->req_pending = 1;
  904. /* There will always be a request waiting here */
  905. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  906. /* Place the DD Descriptor into the UDCA */
  907. udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
  908. /* Enable DMA and interrupt for the HW EP */
  909. udc_ep_dma_enable(udc, hwep);
  910. /* Clear ZLP if last packet is not of MAXP size */
  911. if (req->req.length % ep->ep.maxpacket)
  912. req->send_zlp = 0;
  913. return 0;
  914. }
  915. /* Setup up a OUT request for DMA transfer - this consists of determining the
  916. * list of DMA addresses for the transfer, allocating DMA Descriptors,
  917. * installing the DD into the UDCA, and then enabling the DMA for that EP */
  918. static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  919. {
  920. struct lpc32xx_request *req;
  921. u32 hwep = ep->hwep_num;
  922. ep->req_pending = 1;
  923. /* There will always be a request waiting here */
  924. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  925. /* Place the DD Descriptor into the UDCA */
  926. udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
  927. /* Enable DMA and interrupt for the HW EP */
  928. udc_ep_dma_enable(udc, hwep);
  929. return 0;
  930. }
  931. static void udc_disable(struct lpc32xx_udc *udc)
  932. {
  933. u32 i;
  934. /* Disable device */
  935. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
  936. udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
  937. /* Disable all device interrupts (including EP0) */
  938. uda_disable_devint(udc, 0x3FF);
  939. /* Disable and reset all endpoint interrupts */
  940. for (i = 0; i < 32; i++) {
  941. uda_disable_hwepint(udc, i);
  942. uda_clear_hwepint(udc, i);
  943. udc_disable_hwep(udc, i);
  944. udc_unrealize_hwep(udc, i);
  945. udc->udca_v_base[i] = 0;
  946. /* Disable and clear all interrupts and DMA */
  947. udc_ep_dma_disable(udc, i);
  948. writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
  949. writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
  950. writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  951. writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
  952. }
  953. /* Disable DMA interrupts */
  954. writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
  955. writel(0, USBD_UDCAH(udc->udp_baseaddr));
  956. }
  957. static void udc_enable(struct lpc32xx_udc *udc)
  958. {
  959. u32 i;
  960. struct lpc32xx_ep *ep = &udc->ep[0];
  961. /* Start with known state */
  962. udc_disable(udc);
  963. /* Enable device */
  964. udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
  965. /* EP interrupts on high priority, FRAME interrupt on low priority */
  966. writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
  967. writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
  968. /* Clear any pending device interrupts */
  969. writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
  970. /* Setup UDCA - not yet used (DMA) */
  971. writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
  972. /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
  973. for (i = 0; i <= 1; i++) {
  974. udc_realize_hwep(udc, i, ep->ep.maxpacket);
  975. uda_enable_hwepint(udc, i);
  976. udc_select_hwep(udc, i);
  977. udc_clrstall_hwep(udc, i);
  978. udc_clr_buffer_hwep(udc, i);
  979. }
  980. /* Device interrupt setup */
  981. uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
  982. USBD_EP_FAST));
  983. uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
  984. USBD_EP_FAST));
  985. /* Set device address to 0 - called twice to force a latch in the USB
  986. engine without the need of a setup packet status closure */
  987. udc_set_address(udc, 0);
  988. udc_set_address(udc, 0);
  989. /* Enable master DMA interrupts */
  990. writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
  991. USBD_DMAINTEN(udc->udp_baseaddr));
  992. udc->dev_status = 0;
  993. }
  994. /*
  995. *
  996. * USB device board specific events handled via callbacks
  997. *
  998. */
  999. /* Connection change event - notify board function of change */
  1000. static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
  1001. {
  1002. /* Just notify of a connection change event (optional) */
  1003. if (udc->board->conn_chgb != NULL)
  1004. udc->board->conn_chgb(conn);
  1005. }
  1006. /* Suspend/resume event - notify board function of change */
  1007. static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
  1008. {
  1009. /* Just notify of a Suspend/resume change event (optional) */
  1010. if (udc->board->susp_chgb != NULL)
  1011. udc->board->susp_chgb(conn);
  1012. if (conn)
  1013. udc->suspended = 0;
  1014. else
  1015. udc->suspended = 1;
  1016. }
  1017. /* Remote wakeup enable/disable - notify board function of change */
  1018. static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
  1019. {
  1020. if (udc->board->rmwk_chgb != NULL)
  1021. udc->board->rmwk_chgb(udc->dev_status &
  1022. (1 << USB_DEVICE_REMOTE_WAKEUP));
  1023. }
  1024. /* Reads data from FIFO, adjusts for alignment and data size */
  1025. static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
  1026. {
  1027. int n, i, bl;
  1028. u16 *p16;
  1029. u32 *p32, tmp, cbytes;
  1030. /* Use optimal data transfer method based on source address and size */
  1031. switch (((u32) data) & 0x3) {
  1032. case 0: /* 32-bit aligned */
  1033. p32 = (u32 *) data;
  1034. cbytes = (bytes & ~0x3);
  1035. /* Copy 32-bit aligned data first */
  1036. for (n = 0; n < cbytes; n += 4)
  1037. *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
  1038. /* Handle any remaining bytes */
  1039. bl = bytes - cbytes;
  1040. if (bl) {
  1041. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1042. for (n = 0; n < bl; n++)
  1043. data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
  1044. }
  1045. break;
  1046. case 1: /* 8-bit aligned */
  1047. case 3:
  1048. /* Each byte has to be handled independently */
  1049. for (n = 0; n < bytes; n += 4) {
  1050. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1051. bl = bytes - n;
  1052. if (bl > 3)
  1053. bl = 3;
  1054. for (i = 0; i < bl; i++)
  1055. data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
  1056. }
  1057. break;
  1058. case 2: /* 16-bit aligned */
  1059. p16 = (u16 *) data;
  1060. cbytes = (bytes & ~0x3);
  1061. /* Copy 32-bit sized objects first with 16-bit alignment */
  1062. for (n = 0; n < cbytes; n += 4) {
  1063. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1064. *p16++ = (u16)(tmp & 0xFFFF);
  1065. *p16++ = (u16)((tmp >> 16) & 0xFFFF);
  1066. }
  1067. /* Handle any remaining bytes */
  1068. bl = bytes - cbytes;
  1069. if (bl) {
  1070. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1071. for (n = 0; n < bl; n++)
  1072. data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
  1073. }
  1074. break;
  1075. }
  1076. }
  1077. /* Read data from the FIFO for an endpoint. This function is for endpoints (such
  1078. * as EP0) that don't use DMA. This function should only be called if a packet
  1079. * is known to be ready to read for the endpoint. Note that the endpoint must
  1080. * be selected in the protocol engine prior to this call. */
  1081. static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
  1082. u32 bytes)
  1083. {
  1084. u32 tmpv;
  1085. int to = 1000;
  1086. u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
  1087. /* Setup read of endpoint */
  1088. writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
  1089. /* Wait until packet is ready */
  1090. while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
  1091. PKT_RDY) == 0) && (to > 0))
  1092. to--;
  1093. if (!to)
  1094. dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
  1095. /* Mask out count */
  1096. tmp = tmpv & PKT_LNGTH_MASK;
  1097. if (bytes < tmp)
  1098. tmp = bytes;
  1099. if ((tmp > 0) && (data != NULL))
  1100. udc_pop_fifo(udc, (u8 *) data, tmp);
  1101. writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
  1102. /* Clear the buffer */
  1103. udc_clr_buffer_hwep(udc, hwep);
  1104. return tmp;
  1105. }
  1106. /* Stuffs data into the FIFO, adjusts for alignment and data size */
  1107. static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
  1108. {
  1109. int n, i, bl;
  1110. u16 *p16;
  1111. u32 *p32, tmp, cbytes;
  1112. /* Use optimal data transfer method based on source address and size */
  1113. switch (((u32) data) & 0x3) {
  1114. case 0: /* 32-bit aligned */
  1115. p32 = (u32 *) data;
  1116. cbytes = (bytes & ~0x3);
  1117. /* Copy 32-bit aligned data first */
  1118. for (n = 0; n < cbytes; n += 4)
  1119. writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
  1120. /* Handle any remaining bytes */
  1121. bl = bytes - cbytes;
  1122. if (bl) {
  1123. tmp = 0;
  1124. for (n = 0; n < bl; n++)
  1125. tmp |= data[cbytes + n] << (n * 8);
  1126. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1127. }
  1128. break;
  1129. case 1: /* 8-bit aligned */
  1130. case 3:
  1131. /* Each byte has to be handled independently */
  1132. for (n = 0; n < bytes; n += 4) {
  1133. bl = bytes - n;
  1134. if (bl > 4)
  1135. bl = 4;
  1136. tmp = 0;
  1137. for (i = 0; i < bl; i++)
  1138. tmp |= data[n + i] << (i * 8);
  1139. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1140. }
  1141. break;
  1142. case 2: /* 16-bit aligned */
  1143. p16 = (u16 *) data;
  1144. cbytes = (bytes & ~0x3);
  1145. /* Copy 32-bit aligned data first */
  1146. for (n = 0; n < cbytes; n += 4) {
  1147. tmp = *p16++ & 0xFFFF;
  1148. tmp |= (*p16++ & 0xFFFF) << 16;
  1149. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1150. }
  1151. /* Handle any remaining bytes */
  1152. bl = bytes - cbytes;
  1153. if (bl) {
  1154. tmp = 0;
  1155. for (n = 0; n < bl; n++)
  1156. tmp |= data[cbytes + n] << (n * 8);
  1157. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1158. }
  1159. break;
  1160. }
  1161. }
  1162. /* Write data to the FIFO for an endpoint. This function is for endpoints (such
  1163. * as EP0) that don't use DMA. Note that the endpoint must be selected in the
  1164. * protocol engine prior to this call. */
  1165. static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
  1166. u32 bytes)
  1167. {
  1168. u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
  1169. if ((bytes > 0) && (data == NULL))
  1170. return;
  1171. /* Setup write of endpoint */
  1172. writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
  1173. writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
  1174. /* Need at least 1 byte to trigger TX */
  1175. if (bytes == 0)
  1176. writel(0, USBD_TXDATA(udc->udp_baseaddr));
  1177. else
  1178. udc_stuff_fifo(udc, (u8 *) data, bytes);
  1179. writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
  1180. udc_val_buffer_hwep(udc, hwep);
  1181. }
  1182. /* USB device reset - resets USB to a default state with just EP0
  1183. enabled */
  1184. static void uda_usb_reset(struct lpc32xx_udc *udc)
  1185. {
  1186. u32 i = 0;
  1187. /* Re-init device controller and EP0 */
  1188. udc_enable(udc);
  1189. udc->gadget.speed = USB_SPEED_FULL;
  1190. for (i = 1; i < NUM_ENDPOINTS; i++) {
  1191. struct lpc32xx_ep *ep = &udc->ep[i];
  1192. ep->req_pending = 0;
  1193. }
  1194. }
  1195. /* Send a ZLP on EP0 */
  1196. static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
  1197. {
  1198. udc_write_hwep(udc, EP_IN, NULL, 0);
  1199. }
  1200. /* Get current frame number */
  1201. static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
  1202. {
  1203. u16 flo, fhi;
  1204. udc_protocol_cmd_w(udc, CMD_RD_FRAME);
  1205. flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
  1206. fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
  1207. return (fhi << 8) | flo;
  1208. }
  1209. /* Set the device as configured - enables all endpoints */
  1210. static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
  1211. {
  1212. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
  1213. }
  1214. /* Set the device as unconfigured - disables all endpoints */
  1215. static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
  1216. {
  1217. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
  1218. }
  1219. /* reinit == restore initial software state */
  1220. static void udc_reinit(struct lpc32xx_udc *udc)
  1221. {
  1222. u32 i;
  1223. INIT_LIST_HEAD(&udc->gadget.ep_list);
  1224. INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
  1225. for (i = 0; i < NUM_ENDPOINTS; i++) {
  1226. struct lpc32xx_ep *ep = &udc->ep[i];
  1227. if (i != 0)
  1228. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1229. ep->desc = NULL;
  1230. ep->ep.maxpacket = ep->maxpacket;
  1231. INIT_LIST_HEAD(&ep->queue);
  1232. ep->req_pending = 0;
  1233. }
  1234. udc->ep0state = WAIT_FOR_SETUP;
  1235. }
  1236. /* Must be called with lock */
  1237. static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
  1238. {
  1239. struct lpc32xx_udc *udc = ep->udc;
  1240. list_del_init(&req->queue);
  1241. if (req->req.status == -EINPROGRESS)
  1242. req->req.status = status;
  1243. else
  1244. status = req->req.status;
  1245. if (ep->lep) {
  1246. enum dma_data_direction direction;
  1247. if (ep->is_in)
  1248. direction = DMA_TO_DEVICE;
  1249. else
  1250. direction = DMA_FROM_DEVICE;
  1251. if (req->mapped) {
  1252. dma_unmap_single(ep->udc->gadget.dev.parent,
  1253. req->req.dma, req->req.length,
  1254. direction);
  1255. req->req.dma = 0;
  1256. req->mapped = 0;
  1257. } else
  1258. dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
  1259. req->req.dma, req->req.length,
  1260. direction);
  1261. /* Free DDs */
  1262. udc_dd_free(udc, req->dd_desc_ptr);
  1263. }
  1264. if (status && status != -ESHUTDOWN)
  1265. ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
  1266. ep->req_pending = 0;
  1267. spin_unlock(&udc->lock);
  1268. req->req.complete(&ep->ep, &req->req);
  1269. spin_lock(&udc->lock);
  1270. }
  1271. /* Must be called with lock */
  1272. static void nuke(struct lpc32xx_ep *ep, int status)
  1273. {
  1274. struct lpc32xx_request *req;
  1275. while (!list_empty(&ep->queue)) {
  1276. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1277. done(ep, req, status);
  1278. }
  1279. if (ep->desc && status == -ESHUTDOWN) {
  1280. uda_disable_hwepint(ep->udc, ep->hwep_num);
  1281. udc_disable_hwep(ep->udc, ep->hwep_num);
  1282. }
  1283. }
  1284. /* IN endpoint 0 transfer */
  1285. static int udc_ep0_in_req(struct lpc32xx_udc *udc)
  1286. {
  1287. struct lpc32xx_request *req;
  1288. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1289. u32 tsend, ts = 0;
  1290. if (list_empty(&ep0->queue))
  1291. /* Nothing to send */
  1292. return 0;
  1293. else
  1294. req = list_entry(ep0->queue.next, struct lpc32xx_request,
  1295. queue);
  1296. tsend = ts = req->req.length - req->req.actual;
  1297. if (ts == 0) {
  1298. /* Send a ZLP */
  1299. udc_ep0_send_zlp(udc);
  1300. done(ep0, req, 0);
  1301. return 1;
  1302. } else if (ts > ep0->ep.maxpacket)
  1303. ts = ep0->ep.maxpacket; /* Just send what we can */
  1304. /* Write data to the EP0 FIFO and start transfer */
  1305. udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
  1306. /* Increment data pointer */
  1307. req->req.actual += ts;
  1308. if (tsend >= ep0->ep.maxpacket)
  1309. return 0; /* Stay in data transfer state */
  1310. /* Transfer request is complete */
  1311. udc->ep0state = WAIT_FOR_SETUP;
  1312. done(ep0, req, 0);
  1313. return 1;
  1314. }
  1315. /* OUT endpoint 0 transfer */
  1316. static int udc_ep0_out_req(struct lpc32xx_udc *udc)
  1317. {
  1318. struct lpc32xx_request *req;
  1319. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1320. u32 tr, bufferspace;
  1321. if (list_empty(&ep0->queue))
  1322. return 0;
  1323. else
  1324. req = list_entry(ep0->queue.next, struct lpc32xx_request,
  1325. queue);
  1326. if (req) {
  1327. if (req->req.length == 0) {
  1328. /* Just dequeue request */
  1329. done(ep0, req, 0);
  1330. udc->ep0state = WAIT_FOR_SETUP;
  1331. return 1;
  1332. }
  1333. /* Get data from FIFO */
  1334. bufferspace = req->req.length - req->req.actual;
  1335. if (bufferspace > ep0->ep.maxpacket)
  1336. bufferspace = ep0->ep.maxpacket;
  1337. /* Copy data to buffer */
  1338. prefetchw(req->req.buf + req->req.actual);
  1339. tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
  1340. bufferspace);
  1341. req->req.actual += bufferspace;
  1342. if (tr < ep0->ep.maxpacket) {
  1343. /* This is the last packet */
  1344. done(ep0, req, 0);
  1345. udc->ep0state = WAIT_FOR_SETUP;
  1346. return 1;
  1347. }
  1348. }
  1349. return 0;
  1350. }
  1351. /* Must be called with lock */
  1352. static void stop_activity(struct lpc32xx_udc *udc)
  1353. {
  1354. struct usb_gadget_driver *driver = udc->driver;
  1355. int i;
  1356. if (udc->gadget.speed == USB_SPEED_UNKNOWN)
  1357. driver = NULL;
  1358. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1359. udc->suspended = 0;
  1360. for (i = 0; i < NUM_ENDPOINTS; i++) {
  1361. struct lpc32xx_ep *ep = &udc->ep[i];
  1362. nuke(ep, -ESHUTDOWN);
  1363. }
  1364. if (driver) {
  1365. spin_unlock(&udc->lock);
  1366. driver->disconnect(&udc->gadget);
  1367. spin_lock(&udc->lock);
  1368. }
  1369. isp1301_pullup_enable(udc, 0, 0);
  1370. udc_disable(udc);
  1371. udc_reinit(udc);
  1372. }
  1373. /*
  1374. * Activate or kill host pullup
  1375. * Can be called with or without lock
  1376. */
  1377. static void pullup(struct lpc32xx_udc *udc, int is_on)
  1378. {
  1379. if (!udc->clocked)
  1380. return;
  1381. if (!udc->enabled || !udc->vbus)
  1382. is_on = 0;
  1383. if (is_on != udc->pullup)
  1384. isp1301_pullup_enable(udc, is_on, 0);
  1385. }
  1386. /* Must be called without lock */
  1387. static int lpc32xx_ep_disable(struct usb_ep *_ep)
  1388. {
  1389. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1390. struct lpc32xx_udc *udc = ep->udc;
  1391. unsigned long flags;
  1392. if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
  1393. return -EINVAL;
  1394. spin_lock_irqsave(&udc->lock, flags);
  1395. nuke(ep, -ESHUTDOWN);
  1396. /* restore the endpoint's pristine config */
  1397. ep->desc = NULL;
  1398. /* Clear all DMA statuses for this EP */
  1399. udc_ep_dma_disable(udc, ep->hwep_num);
  1400. writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
  1401. writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1402. writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1403. writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
  1404. /* Remove the DD pointer in the UDCA */
  1405. udc->udca_v_base[ep->hwep_num] = 0;
  1406. /* Disable and reset endpoint and interrupt */
  1407. uda_clear_hwepint(udc, ep->hwep_num);
  1408. udc_unrealize_hwep(udc, ep->hwep_num);
  1409. ep->hwep_num = 0;
  1410. spin_unlock_irqrestore(&udc->lock, flags);
  1411. atomic_dec(&udc->enabled_ep_cnt);
  1412. wake_up(&udc->ep_disable_wait_queue);
  1413. return 0;
  1414. }
  1415. /* Must be called without lock */
  1416. static int lpc32xx_ep_enable(struct usb_ep *_ep,
  1417. const struct usb_endpoint_descriptor *desc)
  1418. {
  1419. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1420. struct lpc32xx_udc *udc = ep->udc;
  1421. u16 maxpacket;
  1422. u32 tmp;
  1423. unsigned long flags;
  1424. /* Verify EP data */
  1425. if ((!_ep) || (!ep) || (!desc) || (ep->desc) ||
  1426. (desc->bDescriptorType != USB_DT_ENDPOINT)) {
  1427. dev_dbg(udc->dev, "bad ep or descriptor\n");
  1428. return -EINVAL;
  1429. }
  1430. maxpacket = usb_endpoint_maxp(desc);
  1431. if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
  1432. dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
  1433. return -EINVAL;
  1434. }
  1435. /* Don't touch EP0 */
  1436. if (ep->hwep_num_base == 0) {
  1437. dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
  1438. return -EINVAL;
  1439. }
  1440. /* Is driver ready? */
  1441. if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
  1442. dev_dbg(udc->dev, "bogus device state\n");
  1443. return -ESHUTDOWN;
  1444. }
  1445. tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
  1446. switch (tmp) {
  1447. case USB_ENDPOINT_XFER_CONTROL:
  1448. return -EINVAL;
  1449. case USB_ENDPOINT_XFER_INT:
  1450. if (maxpacket > ep->maxpacket) {
  1451. dev_dbg(udc->dev,
  1452. "Bad INT endpoint maxpacket %d\n", maxpacket);
  1453. return -EINVAL;
  1454. }
  1455. break;
  1456. case USB_ENDPOINT_XFER_BULK:
  1457. switch (maxpacket) {
  1458. case 8:
  1459. case 16:
  1460. case 32:
  1461. case 64:
  1462. break;
  1463. default:
  1464. dev_dbg(udc->dev,
  1465. "Bad BULK endpoint maxpacket %d\n", maxpacket);
  1466. return -EINVAL;
  1467. }
  1468. break;
  1469. case USB_ENDPOINT_XFER_ISOC:
  1470. break;
  1471. }
  1472. spin_lock_irqsave(&udc->lock, flags);
  1473. /* Initialize endpoint to match the selected descriptor */
  1474. ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
  1475. ep->desc = desc;
  1476. ep->ep.maxpacket = maxpacket;
  1477. /* Map hardware endpoint from base and direction */
  1478. if (ep->is_in)
  1479. /* IN endpoints are offset 1 from the OUT endpoint */
  1480. ep->hwep_num = ep->hwep_num_base + EP_IN;
  1481. else
  1482. ep->hwep_num = ep->hwep_num_base;
  1483. ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
  1484. ep->hwep_num, maxpacket, (ep->is_in == 1));
  1485. /* Realize the endpoint, interrupt is enabled later when
  1486. * buffers are queued, IN EPs will NAK until buffers are ready */
  1487. udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
  1488. udc_clr_buffer_hwep(udc, ep->hwep_num);
  1489. uda_disable_hwepint(udc, ep->hwep_num);
  1490. udc_clrstall_hwep(udc, ep->hwep_num);
  1491. /* Clear all DMA statuses for this EP */
  1492. udc_ep_dma_disable(udc, ep->hwep_num);
  1493. writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
  1494. writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1495. writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1496. writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
  1497. spin_unlock_irqrestore(&udc->lock, flags);
  1498. atomic_inc(&udc->enabled_ep_cnt);
  1499. return 0;
  1500. }
  1501. /*
  1502. * Allocate a USB request list
  1503. * Can be called with or without lock
  1504. */
  1505. static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
  1506. gfp_t gfp_flags)
  1507. {
  1508. struct lpc32xx_request *req;
  1509. req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
  1510. if (!req)
  1511. return NULL;
  1512. INIT_LIST_HEAD(&req->queue);
  1513. return &req->req;
  1514. }
  1515. /*
  1516. * De-allocate a USB request list
  1517. * Can be called with or without lock
  1518. */
  1519. static void lpc32xx_ep_free_request(struct usb_ep *_ep,
  1520. struct usb_request *_req)
  1521. {
  1522. struct lpc32xx_request *req;
  1523. req = container_of(_req, struct lpc32xx_request, req);
  1524. BUG_ON(!list_empty(&req->queue));
  1525. kfree(req);
  1526. }
  1527. /* Must be called without lock */
  1528. static int lpc32xx_ep_queue(struct usb_ep *_ep,
  1529. struct usb_request *_req, gfp_t gfp_flags)
  1530. {
  1531. struct lpc32xx_request *req;
  1532. struct lpc32xx_ep *ep;
  1533. struct lpc32xx_udc *udc;
  1534. unsigned long flags;
  1535. int status = 0;
  1536. req = container_of(_req, struct lpc32xx_request, req);
  1537. ep = container_of(_ep, struct lpc32xx_ep, ep);
  1538. if (!_req || !_req->complete || !_req->buf ||
  1539. !list_empty(&req->queue))
  1540. return -EINVAL;
  1541. udc = ep->udc;
  1542. if (!_ep || (!ep->desc && ep->hwep_num_base != 0)) {
  1543. dev_dbg(udc->dev, "invalid ep\n");
  1544. return -EINVAL;
  1545. }
  1546. if ((!udc) || (!udc->driver) ||
  1547. (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
  1548. dev_dbg(udc->dev, "invalid device\n");
  1549. return -EINVAL;
  1550. }
  1551. if (ep->lep) {
  1552. enum dma_data_direction direction;
  1553. struct lpc32xx_usbd_dd_gad *dd;
  1554. /* Map DMA pointer */
  1555. if (ep->is_in)
  1556. direction = DMA_TO_DEVICE;
  1557. else
  1558. direction = DMA_FROM_DEVICE;
  1559. if (req->req.dma == 0) {
  1560. req->req.dma = dma_map_single(
  1561. ep->udc->gadget.dev.parent,
  1562. req->req.buf, req->req.length, direction);
  1563. req->mapped = 1;
  1564. } else {
  1565. dma_sync_single_for_device(
  1566. ep->udc->gadget.dev.parent, req->req.dma,
  1567. req->req.length, direction);
  1568. req->mapped = 0;
  1569. }
  1570. /* For the request, build a list of DDs */
  1571. dd = udc_dd_alloc(udc);
  1572. if (!dd) {
  1573. /* Error allocating DD */
  1574. return -ENOMEM;
  1575. }
  1576. req->dd_desc_ptr = dd;
  1577. /* Setup the DMA descriptor */
  1578. dd->dd_next_phy = dd->dd_next_v = 0;
  1579. dd->dd_buffer_addr = req->req.dma;
  1580. dd->dd_status = 0;
  1581. /* Special handling for ISO EPs */
  1582. if (ep->eptype == EP_ISO_TYPE) {
  1583. dd->dd_setup = DD_SETUP_ISO_EP |
  1584. DD_SETUP_PACKETLEN(0) |
  1585. DD_SETUP_DMALENBYTES(1);
  1586. dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
  1587. if (ep->is_in)
  1588. dd->iso_status[0] = req->req.length;
  1589. else
  1590. dd->iso_status[0] = 0;
  1591. } else
  1592. dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
  1593. DD_SETUP_DMALENBYTES(req->req.length);
  1594. }
  1595. ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
  1596. _req, _req->length, _req->buf, ep->is_in, _req->zero);
  1597. spin_lock_irqsave(&udc->lock, flags);
  1598. _req->status = -EINPROGRESS;
  1599. _req->actual = 0;
  1600. req->send_zlp = _req->zero;
  1601. /* Kickstart empty queues */
  1602. if (list_empty(&ep->queue)) {
  1603. list_add_tail(&req->queue, &ep->queue);
  1604. if (ep->hwep_num_base == 0) {
  1605. /* Handle expected data direction */
  1606. if (ep->is_in) {
  1607. /* IN packet to host */
  1608. udc->ep0state = DATA_IN;
  1609. status = udc_ep0_in_req(udc);
  1610. } else {
  1611. /* OUT packet from host */
  1612. udc->ep0state = DATA_OUT;
  1613. status = udc_ep0_out_req(udc);
  1614. }
  1615. } else if (ep->is_in) {
  1616. /* IN packet to host and kick off transfer */
  1617. if (!ep->req_pending)
  1618. udc_ep_in_req_dma(udc, ep);
  1619. } else
  1620. /* OUT packet from host and kick off list */
  1621. if (!ep->req_pending)
  1622. udc_ep_out_req_dma(udc, ep);
  1623. } else
  1624. list_add_tail(&req->queue, &ep->queue);
  1625. spin_unlock_irqrestore(&udc->lock, flags);
  1626. return (status < 0) ? status : 0;
  1627. }
  1628. /* Must be called without lock */
  1629. static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1630. {
  1631. struct lpc32xx_ep *ep;
  1632. struct lpc32xx_request *req;
  1633. unsigned long flags;
  1634. ep = container_of(_ep, struct lpc32xx_ep, ep);
  1635. if (!_ep || ep->hwep_num_base == 0)
  1636. return -EINVAL;
  1637. spin_lock_irqsave(&ep->udc->lock, flags);
  1638. /* make sure it's actually queued on this endpoint */
  1639. list_for_each_entry(req, &ep->queue, queue) {
  1640. if (&req->req == _req)
  1641. break;
  1642. }
  1643. if (&req->req != _req) {
  1644. spin_unlock_irqrestore(&ep->udc->lock, flags);
  1645. return -EINVAL;
  1646. }
  1647. done(ep, req, -ECONNRESET);
  1648. spin_unlock_irqrestore(&ep->udc->lock, flags);
  1649. return 0;
  1650. }
  1651. /* Must be called without lock */
  1652. static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
  1653. {
  1654. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1655. struct lpc32xx_udc *udc = ep->udc;
  1656. unsigned long flags;
  1657. if ((!ep) || (ep->desc == NULL) || (ep->hwep_num <= 1))
  1658. return -EINVAL;
  1659. /* Don't halt an IN EP */
  1660. if (ep->is_in)
  1661. return -EAGAIN;
  1662. spin_lock_irqsave(&udc->lock, flags);
  1663. if (value == 1) {
  1664. /* stall */
  1665. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
  1666. DAT_WR_BYTE(EP_STAT_ST));
  1667. } else {
  1668. /* End stall */
  1669. ep->wedge = 0;
  1670. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
  1671. DAT_WR_BYTE(0));
  1672. }
  1673. spin_unlock_irqrestore(&udc->lock, flags);
  1674. return 0;
  1675. }
  1676. /* set the halt feature and ignores clear requests */
  1677. static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
  1678. {
  1679. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1680. if (!_ep || !ep->udc)
  1681. return -EINVAL;
  1682. ep->wedge = 1;
  1683. return usb_ep_set_halt(_ep);
  1684. }
  1685. static const struct usb_ep_ops lpc32xx_ep_ops = {
  1686. .enable = lpc32xx_ep_enable,
  1687. .disable = lpc32xx_ep_disable,
  1688. .alloc_request = lpc32xx_ep_alloc_request,
  1689. .free_request = lpc32xx_ep_free_request,
  1690. .queue = lpc32xx_ep_queue,
  1691. .dequeue = lpc32xx_ep_dequeue,
  1692. .set_halt = lpc32xx_ep_set_halt,
  1693. .set_wedge = lpc32xx_ep_set_wedge,
  1694. };
  1695. /* Send a ZLP on a non-0 IN EP */
  1696. void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1697. {
  1698. /* Clear EP status */
  1699. udc_clearep_getsts(udc, ep->hwep_num);
  1700. /* Send ZLP via FIFO mechanism */
  1701. udc_write_hwep(udc, ep->hwep_num, NULL, 0);
  1702. }
  1703. /*
  1704. * Handle EP completion for ZLP
  1705. * This function will only be called when a delayed ZLP needs to be sent out
  1706. * after a DMA transfer has filled both buffers.
  1707. */
  1708. void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1709. {
  1710. u32 epstatus;
  1711. struct lpc32xx_request *req;
  1712. if (ep->hwep_num <= 0)
  1713. return;
  1714. uda_clear_hwepint(udc, ep->hwep_num);
  1715. /* If this interrupt isn't enabled, return now */
  1716. if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
  1717. return;
  1718. /* Get endpoint status */
  1719. epstatus = udc_clearep_getsts(udc, ep->hwep_num);
  1720. /*
  1721. * This should never happen, but protect against writing to the
  1722. * buffer when full.
  1723. */
  1724. if (epstatus & EP_SEL_F)
  1725. return;
  1726. if (ep->is_in) {
  1727. udc_send_in_zlp(udc, ep);
  1728. uda_disable_hwepint(udc, ep->hwep_num);
  1729. } else
  1730. return;
  1731. /* If there isn't a request waiting, something went wrong */
  1732. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1733. if (req) {
  1734. done(ep, req, 0);
  1735. /* Start another request if ready */
  1736. if (!list_empty(&ep->queue)) {
  1737. if (ep->is_in)
  1738. udc_ep_in_req_dma(udc, ep);
  1739. else
  1740. udc_ep_out_req_dma(udc, ep);
  1741. } else
  1742. ep->req_pending = 0;
  1743. }
  1744. }
  1745. /* DMA end of transfer completion */
  1746. static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1747. {
  1748. u32 status, epstatus;
  1749. struct lpc32xx_request *req;
  1750. struct lpc32xx_usbd_dd_gad *dd;
  1751. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  1752. ep->totalints++;
  1753. #endif
  1754. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1755. if (!req) {
  1756. ep_err(ep, "DMA interrupt on no req!\n");
  1757. return;
  1758. }
  1759. dd = req->dd_desc_ptr;
  1760. /* DMA descriptor should always be retired for this call */
  1761. if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
  1762. ep_warn(ep, "DMA descriptor did not retire\n");
  1763. /* Disable DMA */
  1764. udc_ep_dma_disable(udc, ep->hwep_num);
  1765. writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
  1766. writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1767. /* System error? */
  1768. if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
  1769. (1 << ep->hwep_num)) {
  1770. writel((1 << ep->hwep_num),
  1771. USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1772. ep_err(ep, "AHB critical error!\n");
  1773. ep->req_pending = 0;
  1774. /* The error could have occurred on a packet of a multipacket
  1775. * transfer, so recovering the transfer is not possible. Close
  1776. * the request with an error */
  1777. done(ep, req, -ECONNABORTED);
  1778. return;
  1779. }
  1780. /* Handle the current DD's status */
  1781. status = dd->dd_status;
  1782. switch (status & DD_STATUS_STS_MASK) {
  1783. case DD_STATUS_STS_NS:
  1784. /* DD not serviced? This shouldn't happen! */
  1785. ep->req_pending = 0;
  1786. ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
  1787. status);
  1788. done(ep, req, -ECONNABORTED);
  1789. return;
  1790. case DD_STATUS_STS_BS:
  1791. /* Interrupt only fires on EOT - This shouldn't happen! */
  1792. ep->req_pending = 0;
  1793. ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
  1794. status);
  1795. done(ep, req, -ECONNABORTED);
  1796. return;
  1797. case DD_STATUS_STS_NC:
  1798. case DD_STATUS_STS_DUR:
  1799. /* Really just a short packet, not an underrun */
  1800. /* This is a good status and what we expect */
  1801. break;
  1802. default:
  1803. /* Data overrun, system error, or unknown */
  1804. ep->req_pending = 0;
  1805. ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
  1806. status);
  1807. done(ep, req, -ECONNABORTED);
  1808. return;
  1809. }
  1810. /* ISO endpoints are handled differently */
  1811. if (ep->eptype == EP_ISO_TYPE) {
  1812. if (ep->is_in)
  1813. req->req.actual = req->req.length;
  1814. else
  1815. req->req.actual = dd->iso_status[0] & 0xFFFF;
  1816. } else
  1817. req->req.actual += DD_STATUS_CURDMACNT(status);
  1818. /* Send a ZLP if necessary. This will be done for non-int
  1819. * packets which have a size that is a divisor of MAXP */
  1820. if (req->send_zlp) {
  1821. /*
  1822. * If at least 1 buffer is available, send the ZLP now.
  1823. * Otherwise, the ZLP send needs to be deferred until a
  1824. * buffer is available.
  1825. */
  1826. if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
  1827. udc_clearep_getsts(udc, ep->hwep_num);
  1828. uda_enable_hwepint(udc, ep->hwep_num);
  1829. epstatus = udc_clearep_getsts(udc, ep->hwep_num);
  1830. /* Let the EP interrupt handle the ZLP */
  1831. return;
  1832. } else
  1833. udc_send_in_zlp(udc, ep);
  1834. }
  1835. /* Transfer request is complete */
  1836. done(ep, req, 0);
  1837. /* Start another request if ready */
  1838. udc_clearep_getsts(udc, ep->hwep_num);
  1839. if (!list_empty((&ep->queue))) {
  1840. if (ep->is_in)
  1841. udc_ep_in_req_dma(udc, ep);
  1842. else
  1843. udc_ep_out_req_dma(udc, ep);
  1844. } else
  1845. ep->req_pending = 0;
  1846. }
  1847. /*
  1848. *
  1849. * Endpoint 0 functions
  1850. *
  1851. */
  1852. static void udc_handle_dev(struct lpc32xx_udc *udc)
  1853. {
  1854. u32 tmp;
  1855. udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
  1856. tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
  1857. if (tmp & DEV_RST)
  1858. uda_usb_reset(udc);
  1859. else if (tmp & DEV_CON_CH)
  1860. uda_power_event(udc, (tmp & DEV_CON));
  1861. else if (tmp & DEV_SUS_CH) {
  1862. if (tmp & DEV_SUS) {
  1863. if (udc->vbus == 0)
  1864. stop_activity(udc);
  1865. else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
  1866. udc->driver) {
  1867. /* Power down transceiver */
  1868. udc->poweron = 0;
  1869. schedule_work(&udc->pullup_job);
  1870. uda_resm_susp_event(udc, 1);
  1871. }
  1872. } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
  1873. udc->driver && udc->vbus) {
  1874. uda_resm_susp_event(udc, 0);
  1875. /* Power up transceiver */
  1876. udc->poweron = 1;
  1877. schedule_work(&udc->pullup_job);
  1878. }
  1879. }
  1880. }
  1881. static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
  1882. {
  1883. struct lpc32xx_ep *ep;
  1884. u32 ep0buff = 0, tmp;
  1885. switch (reqtype & USB_RECIP_MASK) {
  1886. case USB_RECIP_INTERFACE:
  1887. break; /* Not supported */
  1888. case USB_RECIP_DEVICE:
  1889. ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
  1890. if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
  1891. ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
  1892. break;
  1893. case USB_RECIP_ENDPOINT:
  1894. tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1895. ep = &udc->ep[tmp];
  1896. if ((tmp == 0) || (tmp >= NUM_ENDPOINTS) || (tmp && !ep->desc))
  1897. return -EOPNOTSUPP;
  1898. if (wIndex & USB_DIR_IN) {
  1899. if (!ep->is_in)
  1900. return -EOPNOTSUPP; /* Something's wrong */
  1901. } else if (ep->is_in)
  1902. return -EOPNOTSUPP; /* Not an IN endpoint */
  1903. /* Get status of the endpoint */
  1904. udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
  1905. tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
  1906. if (tmp & EP_SEL_ST)
  1907. ep0buff = (1 << USB_ENDPOINT_HALT);
  1908. else
  1909. ep0buff = 0;
  1910. break;
  1911. default:
  1912. break;
  1913. }
  1914. /* Return data */
  1915. udc_write_hwep(udc, EP_IN, &ep0buff, 2);
  1916. return 0;
  1917. }
  1918. static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
  1919. {
  1920. struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
  1921. struct usb_ctrlrequest ctrlpkt;
  1922. int i, bytes;
  1923. u16 wIndex, wValue, wLength, reqtype, req, tmp;
  1924. /* Nuke previous transfers */
  1925. nuke(ep0, -EPROTO);
  1926. /* Get setup packet */
  1927. bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
  1928. if (bytes != 8) {
  1929. ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
  1930. bytes);
  1931. return;
  1932. }
  1933. /* Native endianness */
  1934. wIndex = le16_to_cpu(ctrlpkt.wIndex);
  1935. wValue = le16_to_cpu(ctrlpkt.wValue);
  1936. wLength = le16_to_cpu(ctrlpkt.wLength);
  1937. reqtype = le16_to_cpu(ctrlpkt.bRequestType);
  1938. /* Set direction of EP0 */
  1939. if (likely(reqtype & USB_DIR_IN))
  1940. ep0->is_in = 1;
  1941. else
  1942. ep0->is_in = 0;
  1943. /* Handle SETUP packet */
  1944. req = le16_to_cpu(ctrlpkt.bRequest);
  1945. switch (req) {
  1946. case USB_REQ_CLEAR_FEATURE:
  1947. case USB_REQ_SET_FEATURE:
  1948. switch (reqtype) {
  1949. case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
  1950. if (wValue != USB_DEVICE_REMOTE_WAKEUP)
  1951. goto stall; /* Nothing else handled */
  1952. /* Tell board about event */
  1953. if (req == USB_REQ_CLEAR_FEATURE)
  1954. udc->dev_status &=
  1955. ~(1 << USB_DEVICE_REMOTE_WAKEUP);
  1956. else
  1957. udc->dev_status |=
  1958. (1 << USB_DEVICE_REMOTE_WAKEUP);
  1959. uda_remwkp_cgh(udc);
  1960. goto zlp_send;
  1961. case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
  1962. tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1963. if ((wValue != USB_ENDPOINT_HALT) ||
  1964. (tmp >= NUM_ENDPOINTS))
  1965. break;
  1966. /* Find hardware endpoint from logical endpoint */
  1967. ep = &udc->ep[tmp];
  1968. tmp = ep->hwep_num;
  1969. if (tmp == 0)
  1970. break;
  1971. if (req == USB_REQ_SET_FEATURE)
  1972. udc_stall_hwep(udc, tmp);
  1973. else if (!ep->wedge)
  1974. udc_clrstall_hwep(udc, tmp);
  1975. goto zlp_send;
  1976. default:
  1977. break;
  1978. }
  1979. case USB_REQ_SET_ADDRESS:
  1980. if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
  1981. udc_set_address(udc, wValue);
  1982. goto zlp_send;
  1983. }
  1984. break;
  1985. case USB_REQ_GET_STATUS:
  1986. udc_get_status(udc, reqtype, wIndex);
  1987. return;
  1988. default:
  1989. break; /* Let GadgetFS handle the descriptor instead */
  1990. }
  1991. if (likely(udc->driver)) {
  1992. /* device-2-host (IN) or no data setup command, process
  1993. * immediately */
  1994. spin_unlock(&udc->lock);
  1995. i = udc->driver->setup(&udc->gadget, &ctrlpkt);
  1996. spin_lock(&udc->lock);
  1997. if (req == USB_REQ_SET_CONFIGURATION) {
  1998. /* Configuration is set after endpoints are realized */
  1999. if (wValue) {
  2000. /* Set configuration */
  2001. udc_set_device_configured(udc);
  2002. udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
  2003. DAT_WR_BYTE(AP_CLK |
  2004. INAK_BI | INAK_II));
  2005. } else {
  2006. /* Clear configuration */
  2007. udc_set_device_unconfigured(udc);
  2008. /* Disable NAK interrupts */
  2009. udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
  2010. DAT_WR_BYTE(AP_CLK));
  2011. }
  2012. }
  2013. if (i < 0) {
  2014. /* setup processing failed, force stall */
  2015. dev_err(udc->dev,
  2016. "req %02x.%02x protocol STALL; stat %d\n",
  2017. reqtype, req, i);
  2018. udc->ep0state = WAIT_FOR_SETUP;
  2019. goto stall;
  2020. }
  2021. }
  2022. if (!ep0->is_in)
  2023. udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
  2024. return;
  2025. stall:
  2026. udc_stall_hwep(udc, EP_IN);
  2027. return;
  2028. zlp_send:
  2029. udc_ep0_send_zlp(udc);
  2030. return;
  2031. }
  2032. /* IN endpoint 0 transfer */
  2033. static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
  2034. {
  2035. struct lpc32xx_ep *ep0 = &udc->ep[0];
  2036. u32 epstatus;
  2037. /* Clear EP interrupt */
  2038. epstatus = udc_clearep_getsts(udc, EP_IN);
  2039. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2040. ep0->totalints++;
  2041. #endif
  2042. /* Stalled? Clear stall and reset buffers */
  2043. if (epstatus & EP_SEL_ST) {
  2044. udc_clrstall_hwep(udc, EP_IN);
  2045. nuke(ep0, -ECONNABORTED);
  2046. udc->ep0state = WAIT_FOR_SETUP;
  2047. return;
  2048. }
  2049. /* Is a buffer available? */
  2050. if (!(epstatus & EP_SEL_F)) {
  2051. /* Handle based on current state */
  2052. if (udc->ep0state == DATA_IN)
  2053. udc_ep0_in_req(udc);
  2054. else {
  2055. /* Unknown state for EP0 oe end of DATA IN phase */
  2056. nuke(ep0, -ECONNABORTED);
  2057. udc->ep0state = WAIT_FOR_SETUP;
  2058. }
  2059. }
  2060. }
  2061. /* OUT endpoint 0 transfer */
  2062. static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
  2063. {
  2064. struct lpc32xx_ep *ep0 = &udc->ep[0];
  2065. u32 epstatus;
  2066. /* Clear EP interrupt */
  2067. epstatus = udc_clearep_getsts(udc, EP_OUT);
  2068. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2069. ep0->totalints++;
  2070. #endif
  2071. /* Stalled? */
  2072. if (epstatus & EP_SEL_ST) {
  2073. udc_clrstall_hwep(udc, EP_OUT);
  2074. nuke(ep0, -ECONNABORTED);
  2075. udc->ep0state = WAIT_FOR_SETUP;
  2076. return;
  2077. }
  2078. /* A NAK may occur if a packet couldn't be received yet */
  2079. if (epstatus & EP_SEL_EPN)
  2080. return;
  2081. /* Setup packet incoming? */
  2082. if (epstatus & EP_SEL_STP) {
  2083. nuke(ep0, 0);
  2084. udc->ep0state = WAIT_FOR_SETUP;
  2085. }
  2086. /* Data available? */
  2087. if (epstatus & EP_SEL_F)
  2088. /* Handle based on current state */
  2089. switch (udc->ep0state) {
  2090. case WAIT_FOR_SETUP:
  2091. udc_handle_ep0_setup(udc);
  2092. break;
  2093. case DATA_OUT:
  2094. udc_ep0_out_req(udc);
  2095. break;
  2096. default:
  2097. /* Unknown state for EP0 */
  2098. nuke(ep0, -ECONNABORTED);
  2099. udc->ep0state = WAIT_FOR_SETUP;
  2100. }
  2101. }
  2102. /* Must be called without lock */
  2103. static int lpc32xx_get_frame(struct usb_gadget *gadget)
  2104. {
  2105. int frame;
  2106. unsigned long flags;
  2107. struct lpc32xx_udc *udc = to_udc(gadget);
  2108. if (!udc->clocked)
  2109. return -EINVAL;
  2110. spin_lock_irqsave(&udc->lock, flags);
  2111. frame = (int) udc_get_current_frame(udc);
  2112. spin_unlock_irqrestore(&udc->lock, flags);
  2113. return frame;
  2114. }
  2115. static int lpc32xx_wakeup(struct usb_gadget *gadget)
  2116. {
  2117. return -ENOTSUPP;
  2118. }
  2119. static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
  2120. {
  2121. struct lpc32xx_udc *udc = to_udc(gadget);
  2122. /* Always self-powered */
  2123. udc->selfpowered = (is_on != 0);
  2124. return 0;
  2125. }
  2126. /*
  2127. * vbus is here! turn everything on that's ready
  2128. * Must be called without lock
  2129. */
  2130. static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
  2131. {
  2132. unsigned long flags;
  2133. struct lpc32xx_udc *udc = to_udc(gadget);
  2134. spin_lock_irqsave(&udc->lock, flags);
  2135. /* Doesn't need lock */
  2136. if (udc->driver) {
  2137. udc_clk_set(udc, 1);
  2138. udc_enable(udc);
  2139. pullup(udc, is_active);
  2140. } else {
  2141. stop_activity(udc);
  2142. pullup(udc, 0);
  2143. spin_unlock_irqrestore(&udc->lock, flags);
  2144. /*
  2145. * Wait for all the endpoints to disable,
  2146. * before disabling clocks. Don't wait if
  2147. * endpoints are not enabled.
  2148. */
  2149. if (atomic_read(&udc->enabled_ep_cnt))
  2150. wait_event_interruptible(udc->ep_disable_wait_queue,
  2151. (atomic_read(&udc->enabled_ep_cnt) == 0));
  2152. spin_lock_irqsave(&udc->lock, flags);
  2153. udc_clk_set(udc, 0);
  2154. }
  2155. spin_unlock_irqrestore(&udc->lock, flags);
  2156. return 0;
  2157. }
  2158. /* Can be called with or without lock */
  2159. static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
  2160. {
  2161. struct lpc32xx_udc *udc = to_udc(gadget);
  2162. /* Doesn't need lock */
  2163. pullup(udc, is_on);
  2164. return 0;
  2165. }
  2166. static int lpc32xx_start(struct usb_gadget_driver *driver,
  2167. int (*bind)(struct usb_gadget *));
  2168. static int lpc32xx_stop(struct usb_gadget_driver *driver);
  2169. static const struct usb_gadget_ops lpc32xx_udc_ops = {
  2170. .get_frame = lpc32xx_get_frame,
  2171. .wakeup = lpc32xx_wakeup,
  2172. .set_selfpowered = lpc32xx_set_selfpowered,
  2173. .vbus_session = lpc32xx_vbus_session,
  2174. .pullup = lpc32xx_pullup,
  2175. .start = lpc32xx_start,
  2176. .stop = lpc32xx_stop,
  2177. };
  2178. static void nop_release(struct device *dev)
  2179. {
  2180. /* nothing to free */
  2181. }
  2182. static struct lpc32xx_udc controller = {
  2183. .gadget = {
  2184. .ops = &lpc32xx_udc_ops,
  2185. .ep0 = &controller.ep[0].ep,
  2186. .name = driver_name,
  2187. .dev = {
  2188. .init_name = "gadget",
  2189. .release = nop_release,
  2190. }
  2191. },
  2192. .ep[0] = {
  2193. .ep = {
  2194. .name = "ep0",
  2195. .ops = &lpc32xx_ep_ops,
  2196. },
  2197. .udc = &controller,
  2198. .maxpacket = 64,
  2199. .hwep_num_base = 0,
  2200. .hwep_num = 0, /* Can be 0 or 1, has special handling */
  2201. .lep = 0,
  2202. .eptype = EP_CTL_TYPE,
  2203. },
  2204. .ep[1] = {
  2205. .ep = {
  2206. .name = "ep1-int",
  2207. .ops = &lpc32xx_ep_ops,
  2208. },
  2209. .udc = &controller,
  2210. .maxpacket = 64,
  2211. .hwep_num_base = 2,
  2212. .hwep_num = 0, /* 2 or 3, will be set later */
  2213. .lep = 1,
  2214. .eptype = EP_INT_TYPE,
  2215. },
  2216. .ep[2] = {
  2217. .ep = {
  2218. .name = "ep2-bulk",
  2219. .ops = &lpc32xx_ep_ops,
  2220. },
  2221. .udc = &controller,
  2222. .maxpacket = 64,
  2223. .hwep_num_base = 4,
  2224. .hwep_num = 0, /* 4 or 5, will be set later */
  2225. .lep = 2,
  2226. .eptype = EP_BLK_TYPE,
  2227. },
  2228. .ep[3] = {
  2229. .ep = {
  2230. .name = "ep3-iso",
  2231. .ops = &lpc32xx_ep_ops,
  2232. },
  2233. .udc = &controller,
  2234. .maxpacket = 1023,
  2235. .hwep_num_base = 6,
  2236. .hwep_num = 0, /* 6 or 7, will be set later */
  2237. .lep = 3,
  2238. .eptype = EP_ISO_TYPE,
  2239. },
  2240. .ep[4] = {
  2241. .ep = {
  2242. .name = "ep4-int",
  2243. .ops = &lpc32xx_ep_ops,
  2244. },
  2245. .udc = &controller,
  2246. .maxpacket = 64,
  2247. .hwep_num_base = 8,
  2248. .hwep_num = 0, /* 8 or 9, will be set later */
  2249. .lep = 4,
  2250. .eptype = EP_INT_TYPE,
  2251. },
  2252. .ep[5] = {
  2253. .ep = {
  2254. .name = "ep5-bulk",
  2255. .ops = &lpc32xx_ep_ops,
  2256. },
  2257. .udc = &controller,
  2258. .maxpacket = 64,
  2259. .hwep_num_base = 10,
  2260. .hwep_num = 0, /* 10 or 11, will be set later */
  2261. .lep = 5,
  2262. .eptype = EP_BLK_TYPE,
  2263. },
  2264. .ep[6] = {
  2265. .ep = {
  2266. .name = "ep6-iso",
  2267. .ops = &lpc32xx_ep_ops,
  2268. },
  2269. .udc = &controller,
  2270. .maxpacket = 1023,
  2271. .hwep_num_base = 12,
  2272. .hwep_num = 0, /* 12 or 13, will be set later */
  2273. .lep = 6,
  2274. .eptype = EP_ISO_TYPE,
  2275. },
  2276. .ep[7] = {
  2277. .ep = {
  2278. .name = "ep7-int",
  2279. .ops = &lpc32xx_ep_ops,
  2280. },
  2281. .udc = &controller,
  2282. .maxpacket = 64,
  2283. .hwep_num_base = 14,
  2284. .hwep_num = 0,
  2285. .lep = 7,
  2286. .eptype = EP_INT_TYPE,
  2287. },
  2288. .ep[8] = {
  2289. .ep = {
  2290. .name = "ep8-bulk",
  2291. .ops = &lpc32xx_ep_ops,
  2292. },
  2293. .udc = &controller,
  2294. .maxpacket = 64,
  2295. .hwep_num_base = 16,
  2296. .hwep_num = 0,
  2297. .lep = 8,
  2298. .eptype = EP_BLK_TYPE,
  2299. },
  2300. .ep[9] = {
  2301. .ep = {
  2302. .name = "ep9-iso",
  2303. .ops = &lpc32xx_ep_ops,
  2304. },
  2305. .udc = &controller,
  2306. .maxpacket = 1023,
  2307. .hwep_num_base = 18,
  2308. .hwep_num = 0,
  2309. .lep = 9,
  2310. .eptype = EP_ISO_TYPE,
  2311. },
  2312. .ep[10] = {
  2313. .ep = {
  2314. .name = "ep10-int",
  2315. .ops = &lpc32xx_ep_ops,
  2316. },
  2317. .udc = &controller,
  2318. .maxpacket = 64,
  2319. .hwep_num_base = 20,
  2320. .hwep_num = 0,
  2321. .lep = 10,
  2322. .eptype = EP_INT_TYPE,
  2323. },
  2324. .ep[11] = {
  2325. .ep = {
  2326. .name = "ep11-bulk",
  2327. .ops = &lpc32xx_ep_ops,
  2328. },
  2329. .udc = &controller,
  2330. .maxpacket = 64,
  2331. .hwep_num_base = 22,
  2332. .hwep_num = 0,
  2333. .lep = 11,
  2334. .eptype = EP_BLK_TYPE,
  2335. },
  2336. .ep[12] = {
  2337. .ep = {
  2338. .name = "ep12-iso",
  2339. .ops = &lpc32xx_ep_ops,
  2340. },
  2341. .udc = &controller,
  2342. .maxpacket = 1023,
  2343. .hwep_num_base = 24,
  2344. .hwep_num = 0,
  2345. .lep = 12,
  2346. .eptype = EP_ISO_TYPE,
  2347. },
  2348. .ep[13] = {
  2349. .ep = {
  2350. .name = "ep13-int",
  2351. .ops = &lpc32xx_ep_ops,
  2352. },
  2353. .udc = &controller,
  2354. .maxpacket = 64,
  2355. .hwep_num_base = 26,
  2356. .hwep_num = 0,
  2357. .lep = 13,
  2358. .eptype = EP_INT_TYPE,
  2359. },
  2360. .ep[14] = {
  2361. .ep = {
  2362. .name = "ep14-bulk",
  2363. .ops = &lpc32xx_ep_ops,
  2364. },
  2365. .udc = &controller,
  2366. .maxpacket = 64,
  2367. .hwep_num_base = 28,
  2368. .hwep_num = 0,
  2369. .lep = 14,
  2370. .eptype = EP_BLK_TYPE,
  2371. },
  2372. .ep[15] = {
  2373. .ep = {
  2374. .name = "ep15-bulk",
  2375. .ops = &lpc32xx_ep_ops,
  2376. },
  2377. .udc = &controller,
  2378. .maxpacket = 1023,
  2379. .hwep_num_base = 30,
  2380. .hwep_num = 0,
  2381. .lep = 15,
  2382. .eptype = EP_BLK_TYPE,
  2383. },
  2384. };
  2385. /* ISO and status interrupts */
  2386. static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
  2387. {
  2388. u32 tmp, devstat;
  2389. struct lpc32xx_udc *udc = _udc;
  2390. spin_lock(&udc->lock);
  2391. /* Read the device status register */
  2392. devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
  2393. devstat &= ~USBD_EP_FAST;
  2394. writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
  2395. devstat = devstat & udc->enabled_devints;
  2396. /* Device specific handling needed? */
  2397. if (devstat & USBD_DEV_STAT)
  2398. udc_handle_dev(udc);
  2399. /* Start of frame? (devstat & FRAME_INT):
  2400. * The frame interrupt isn't really needed for ISO support,
  2401. * as the driver will queue the necessary packets */
  2402. /* Error? */
  2403. if (devstat & ERR_INT) {
  2404. /* All types of errors, from cable removal during transfer to
  2405. * misc protocol and bit errors. These are mostly for just info,
  2406. * as the USB hardware will work around these. If these errors
  2407. * happen alot, something is wrong. */
  2408. udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
  2409. tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
  2410. dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
  2411. }
  2412. spin_unlock(&udc->lock);
  2413. return IRQ_HANDLED;
  2414. }
  2415. /* EP interrupts */
  2416. static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
  2417. {
  2418. u32 tmp;
  2419. struct lpc32xx_udc *udc = _udc;
  2420. spin_lock(&udc->lock);
  2421. /* Read the device status register */
  2422. writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
  2423. /* Endpoints */
  2424. tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
  2425. /* Special handling for EP0 */
  2426. if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
  2427. /* Handle EP0 IN */
  2428. if (tmp & (EP_MASK_SEL(0, EP_IN)))
  2429. udc_handle_ep0_in(udc);
  2430. /* Handle EP0 OUT */
  2431. if (tmp & (EP_MASK_SEL(0, EP_OUT)))
  2432. udc_handle_ep0_out(udc);
  2433. }
  2434. /* All other EPs */
  2435. if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
  2436. int i;
  2437. /* Handle other EP interrupts */
  2438. for (i = 1; i < NUM_ENDPOINTS; i++) {
  2439. if (tmp & (1 << udc->ep[i].hwep_num))
  2440. udc_handle_eps(udc, &udc->ep[i]);
  2441. }
  2442. }
  2443. spin_unlock(&udc->lock);
  2444. return IRQ_HANDLED;
  2445. }
  2446. static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
  2447. {
  2448. struct lpc32xx_udc *udc = _udc;
  2449. int i;
  2450. u32 tmp;
  2451. spin_lock(&udc->lock);
  2452. /* Handle EP DMA EOT interrupts */
  2453. tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
  2454. (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
  2455. readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
  2456. readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
  2457. for (i = 1; i < NUM_ENDPOINTS; i++) {
  2458. if (tmp & (1 << udc->ep[i].hwep_num))
  2459. udc_handle_dma_ep(udc, &udc->ep[i]);
  2460. }
  2461. spin_unlock(&udc->lock);
  2462. return IRQ_HANDLED;
  2463. }
  2464. /*
  2465. *
  2466. * VBUS detection, pullup handler, and Gadget cable state notification
  2467. *
  2468. */
  2469. static void vbus_work(struct work_struct *work)
  2470. {
  2471. u8 value;
  2472. struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
  2473. vbus_job);
  2474. if (udc->enabled != 0) {
  2475. /* Discharge VBUS real quick */
  2476. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2477. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
  2478. /* Give VBUS some time (100mS) to discharge */
  2479. msleep(100);
  2480. /* Disable VBUS discharge resistor */
  2481. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2482. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  2483. OTG1_VBUS_DISCHRG);
  2484. /* Clear interrupt */
  2485. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2486. ISP1301_I2C_INTERRUPT_LATCH |
  2487. ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  2488. /* Get the VBUS status from the transceiver */
  2489. value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
  2490. ISP1301_I2C_OTG_CONTROL_2);
  2491. /* VBUS on or off? */
  2492. if (value & OTG_B_SESS_VLD)
  2493. udc->vbus = 1;
  2494. else
  2495. udc->vbus = 0;
  2496. /* VBUS changed? */
  2497. if (udc->last_vbus != udc->vbus) {
  2498. udc->last_vbus = udc->vbus;
  2499. lpc32xx_vbus_session(&udc->gadget, udc->vbus);
  2500. }
  2501. }
  2502. /* Re-enable after completion */
  2503. enable_irq(udc->udp_irq[IRQ_USB_ATX]);
  2504. }
  2505. static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
  2506. {
  2507. struct lpc32xx_udc *udc = _udc;
  2508. /* Defer handling of VBUS IRQ to work queue */
  2509. disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]);
  2510. schedule_work(&udc->vbus_job);
  2511. return IRQ_HANDLED;
  2512. }
  2513. static int lpc32xx_start(struct usb_gadget_driver *driver,
  2514. int (*bind)(struct usb_gadget *))
  2515. {
  2516. struct lpc32xx_udc *udc = &controller;
  2517. int retval, i;
  2518. if (!driver || driver->max_speed < USB_SPEED_FULL ||
  2519. !bind || !driver->setup) {
  2520. dev_err(udc->dev, "bad parameter.\n");
  2521. return -EINVAL;
  2522. }
  2523. if (udc->driver) {
  2524. dev_err(udc->dev, "UDC already has a gadget driver\n");
  2525. return -EBUSY;
  2526. }
  2527. udc->driver = driver;
  2528. udc->gadget.dev.driver = &driver->driver;
  2529. udc->gadget.dev.of_node = udc->dev->of_node;
  2530. udc->enabled = 1;
  2531. udc->selfpowered = 1;
  2532. udc->vbus = 0;
  2533. retval = bind(&udc->gadget);
  2534. if (retval) {
  2535. dev_err(udc->dev, "bind() returned %d\n", retval);
  2536. udc->enabled = 0;
  2537. udc->selfpowered = 0;
  2538. udc->driver = NULL;
  2539. udc->gadget.dev.driver = NULL;
  2540. return retval;
  2541. }
  2542. dev_dbg(udc->dev, "bound to %s\n", driver->driver.name);
  2543. /* Force VBUS process once to check for cable insertion */
  2544. udc->last_vbus = udc->vbus = 0;
  2545. schedule_work(&udc->vbus_job);
  2546. /* Do not re-enable ATX IRQ (3) */
  2547. for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
  2548. enable_irq(udc->udp_irq[i]);
  2549. return 0;
  2550. }
  2551. static int lpc32xx_stop(struct usb_gadget_driver *driver)
  2552. {
  2553. int i;
  2554. struct lpc32xx_udc *udc = &controller;
  2555. if (!driver || driver != udc->driver || !driver->unbind)
  2556. return -EINVAL;
  2557. /* Disable USB pullup */
  2558. isp1301_pullup_enable(udc, 0, 1);
  2559. for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
  2560. disable_irq(udc->udp_irq[i]);
  2561. if (udc->clocked) {
  2562. spin_lock(&udc->lock);
  2563. stop_activity(udc);
  2564. spin_unlock(&udc->lock);
  2565. /*
  2566. * Wait for all the endpoints to disable,
  2567. * before disabling clocks. Don't wait if
  2568. * endpoints are not enabled.
  2569. */
  2570. if (atomic_read(&udc->enabled_ep_cnt))
  2571. wait_event_interruptible(udc->ep_disable_wait_queue,
  2572. (atomic_read(&udc->enabled_ep_cnt) == 0));
  2573. spin_lock(&udc->lock);
  2574. udc_clk_set(udc, 0);
  2575. spin_unlock(&udc->lock);
  2576. }
  2577. udc->enabled = 0;
  2578. pullup(udc, 0);
  2579. driver->unbind(&udc->gadget);
  2580. udc->gadget.dev.driver = NULL;
  2581. udc->driver = NULL;
  2582. dev_dbg(udc->dev, "unbound from %s\n", driver->driver.name);
  2583. return 0;
  2584. }
  2585. static void lpc32xx_udc_shutdown(struct platform_device *dev)
  2586. {
  2587. /* Force disconnect on reboot */
  2588. struct lpc32xx_udc *udc = &controller;
  2589. pullup(udc, 0);
  2590. }
  2591. /*
  2592. * Callbacks to be overridden by options passed via OF (TODO)
  2593. */
  2594. static void lpc32xx_usbd_conn_chg(int conn)
  2595. {
  2596. /* Do nothing, it might be nice to enable an LED
  2597. * based on conn state being !0 */
  2598. }
  2599. static void lpc32xx_usbd_susp_chg(int susp)
  2600. {
  2601. /* Device suspend if susp != 0 */
  2602. }
  2603. static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
  2604. {
  2605. /* Enable or disable USB remote wakeup */
  2606. }
  2607. struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
  2608. .vbus_drv_pol = 0,
  2609. .conn_chgb = &lpc32xx_usbd_conn_chg,
  2610. .susp_chgb = &lpc32xx_usbd_susp_chg,
  2611. .rmwk_chgb = &lpc32xx_rmwkup_chg,
  2612. };
  2613. static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
  2614. static int __init lpc32xx_udc_probe(struct platform_device *pdev)
  2615. {
  2616. struct device *dev = &pdev->dev;
  2617. struct lpc32xx_udc *udc = &controller;
  2618. int retval, i;
  2619. struct resource *res;
  2620. dma_addr_t dma_handle;
  2621. struct device_node *isp1301_node;
  2622. /* init software state */
  2623. udc->gadget.dev.parent = dev;
  2624. udc->pdev = pdev;
  2625. udc->dev = &pdev->dev;
  2626. udc->enabled = 0;
  2627. if (pdev->dev.of_node) {
  2628. isp1301_node = of_parse_phandle(pdev->dev.of_node,
  2629. "transceiver", 0);
  2630. } else {
  2631. isp1301_node = NULL;
  2632. }
  2633. udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
  2634. if (!udc->isp1301_i2c_client)
  2635. return -EPROBE_DEFER;
  2636. dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
  2637. udc->isp1301_i2c_client->addr);
  2638. pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
  2639. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  2640. udc->board = &lpc32xx_usbddata;
  2641. /*
  2642. * Resources are mapped as follows:
  2643. * IORESOURCE_MEM, base address and size of USB space
  2644. * IORESOURCE_IRQ, USB device low priority interrupt number
  2645. * IORESOURCE_IRQ, USB device high priority interrupt number
  2646. * IORESOURCE_IRQ, USB device interrupt number
  2647. * IORESOURCE_IRQ, USB transceiver interrupt number
  2648. */
  2649. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2650. if (!res)
  2651. return -ENXIO;
  2652. spin_lock_init(&udc->lock);
  2653. /* Get IRQs */
  2654. for (i = 0; i < 4; i++) {
  2655. udc->udp_irq[i] = platform_get_irq(pdev, i);
  2656. if (udc->udp_irq[i] < 0) {
  2657. dev_err(udc->dev,
  2658. "irq resource %d not available!\n", i);
  2659. return udc->udp_irq[i];
  2660. }
  2661. }
  2662. udc->io_p_start = res->start;
  2663. udc->io_p_size = resource_size(res);
  2664. if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
  2665. dev_err(udc->dev, "someone's using UDC memory\n");
  2666. return -EBUSY;
  2667. }
  2668. udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
  2669. if (!udc->udp_baseaddr) {
  2670. retval = -ENOMEM;
  2671. dev_err(udc->dev, "IO map failure\n");
  2672. goto io_map_fail;
  2673. }
  2674. /* Enable AHB slave USB clock, needed for further USB clock control */
  2675. writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
  2676. /* Get required clocks */
  2677. udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
  2678. if (IS_ERR(udc->usb_pll_clk)) {
  2679. dev_err(udc->dev, "failed to acquire USB PLL\n");
  2680. retval = PTR_ERR(udc->usb_pll_clk);
  2681. goto pll_get_fail;
  2682. }
  2683. udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
  2684. if (IS_ERR(udc->usb_slv_clk)) {
  2685. dev_err(udc->dev, "failed to acquire USB device clock\n");
  2686. retval = PTR_ERR(udc->usb_slv_clk);
  2687. goto usb_clk_get_fail;
  2688. }
  2689. udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
  2690. if (IS_ERR(udc->usb_otg_clk)) {
  2691. dev_err(udc->dev, "failed to acquire USB otg clock\n");
  2692. retval = PTR_ERR(udc->usb_slv_clk);
  2693. goto usb_otg_clk_get_fail;
  2694. }
  2695. /* Setup PLL clock to 48MHz */
  2696. retval = clk_enable(udc->usb_pll_clk);
  2697. if (retval < 0) {
  2698. dev_err(udc->dev, "failed to start USB PLL\n");
  2699. goto pll_enable_fail;
  2700. }
  2701. retval = clk_set_rate(udc->usb_pll_clk, 48000);
  2702. if (retval < 0) {
  2703. dev_err(udc->dev, "failed to set USB clock rate\n");
  2704. goto pll_set_fail;
  2705. }
  2706. writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
  2707. /* Enable USB device clock */
  2708. retval = clk_enable(udc->usb_slv_clk);
  2709. if (retval < 0) {
  2710. dev_err(udc->dev, "failed to start USB device clock\n");
  2711. goto usb_clk_enable_fail;
  2712. }
  2713. /* Enable USB OTG clock */
  2714. retval = clk_enable(udc->usb_otg_clk);
  2715. if (retval < 0) {
  2716. dev_err(udc->dev, "failed to start USB otg clock\n");
  2717. goto usb_otg_clk_enable_fail;
  2718. }
  2719. /* Setup deferred workqueue data */
  2720. udc->poweron = udc->pullup = 0;
  2721. INIT_WORK(&udc->pullup_job, pullup_work);
  2722. INIT_WORK(&udc->vbus_job, vbus_work);
  2723. #ifdef CONFIG_PM
  2724. INIT_WORK(&udc->power_job, power_work);
  2725. #endif
  2726. /* All clocks are now on */
  2727. udc->clocked = 1;
  2728. isp1301_udc_configure(udc);
  2729. /* Allocate memory for the UDCA */
  2730. udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2731. &dma_handle,
  2732. (GFP_KERNEL | GFP_DMA));
  2733. if (!udc->udca_v_base) {
  2734. dev_err(udc->dev, "error getting UDCA region\n");
  2735. retval = -ENOMEM;
  2736. goto i2c_fail;
  2737. }
  2738. udc->udca_p_base = dma_handle;
  2739. dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
  2740. UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
  2741. /* Setup the DD DMA memory pool */
  2742. udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
  2743. sizeof(struct lpc32xx_usbd_dd_gad),
  2744. sizeof(u32), 0);
  2745. if (!udc->dd_cache) {
  2746. dev_err(udc->dev, "error getting DD DMA region\n");
  2747. retval = -ENOMEM;
  2748. goto dma_alloc_fail;
  2749. }
  2750. /* Clear USB peripheral and initialize gadget endpoints */
  2751. udc_disable(udc);
  2752. udc_reinit(udc);
  2753. retval = device_register(&udc->gadget.dev);
  2754. if (retval < 0) {
  2755. dev_err(udc->dev, "Device registration failure\n");
  2756. goto dev_register_fail;
  2757. }
  2758. /* Request IRQs - low and high priority USB device IRQs are routed to
  2759. * the same handler, while the DMA interrupt is routed elsewhere */
  2760. retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
  2761. 0, "udc_lp", udc);
  2762. if (retval < 0) {
  2763. dev_err(udc->dev, "LP request irq %d failed\n",
  2764. udc->udp_irq[IRQ_USB_LP]);
  2765. goto irq_lp_fail;
  2766. }
  2767. retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
  2768. 0, "udc_hp", udc);
  2769. if (retval < 0) {
  2770. dev_err(udc->dev, "HP request irq %d failed\n",
  2771. udc->udp_irq[IRQ_USB_HP]);
  2772. goto irq_hp_fail;
  2773. }
  2774. retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
  2775. lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
  2776. if (retval < 0) {
  2777. dev_err(udc->dev, "DEV request irq %d failed\n",
  2778. udc->udp_irq[IRQ_USB_DEVDMA]);
  2779. goto irq_dev_fail;
  2780. }
  2781. /* The transceiver interrupt is used for VBUS detection and will
  2782. kick off the VBUS handler function */
  2783. retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
  2784. 0, "udc_otg", udc);
  2785. if (retval < 0) {
  2786. dev_err(udc->dev, "VBUS request irq %d failed\n",
  2787. udc->udp_irq[IRQ_USB_ATX]);
  2788. goto irq_xcvr_fail;
  2789. }
  2790. /* Initialize wait queue */
  2791. init_waitqueue_head(&udc->ep_disable_wait_queue);
  2792. atomic_set(&udc->enabled_ep_cnt, 0);
  2793. /* Keep all IRQs disabled until GadgetFS starts up */
  2794. for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
  2795. disable_irq(udc->udp_irq[i]);
  2796. retval = usb_add_gadget_udc(dev, &udc->gadget);
  2797. if (retval < 0)
  2798. goto add_gadget_fail;
  2799. dev_set_drvdata(dev, udc);
  2800. device_init_wakeup(dev, 1);
  2801. create_debug_file(udc);
  2802. /* Disable clocks for now */
  2803. udc_clk_set(udc, 0);
  2804. dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
  2805. return 0;
  2806. add_gadget_fail:
  2807. free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
  2808. irq_xcvr_fail:
  2809. free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
  2810. irq_dev_fail:
  2811. free_irq(udc->udp_irq[IRQ_USB_HP], udc);
  2812. irq_hp_fail:
  2813. free_irq(udc->udp_irq[IRQ_USB_LP], udc);
  2814. irq_lp_fail:
  2815. device_unregister(&udc->gadget.dev);
  2816. dev_register_fail:
  2817. dma_pool_destroy(udc->dd_cache);
  2818. dma_alloc_fail:
  2819. dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2820. udc->udca_v_base, udc->udca_p_base);
  2821. i2c_fail:
  2822. clk_disable(udc->usb_otg_clk);
  2823. usb_otg_clk_enable_fail:
  2824. clk_disable(udc->usb_slv_clk);
  2825. usb_clk_enable_fail:
  2826. pll_set_fail:
  2827. clk_disable(udc->usb_pll_clk);
  2828. pll_enable_fail:
  2829. clk_put(udc->usb_slv_clk);
  2830. usb_otg_clk_get_fail:
  2831. clk_put(udc->usb_otg_clk);
  2832. usb_clk_get_fail:
  2833. clk_put(udc->usb_pll_clk);
  2834. pll_get_fail:
  2835. iounmap(udc->udp_baseaddr);
  2836. io_map_fail:
  2837. release_mem_region(udc->io_p_start, udc->io_p_size);
  2838. dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
  2839. return retval;
  2840. }
  2841. static int __devexit lpc32xx_udc_remove(struct platform_device *pdev)
  2842. {
  2843. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2844. usb_del_gadget_udc(&udc->gadget);
  2845. if (udc->driver)
  2846. return -EBUSY;
  2847. udc_clk_set(udc, 1);
  2848. udc_disable(udc);
  2849. pullup(udc, 0);
  2850. free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
  2851. device_init_wakeup(&pdev->dev, 0);
  2852. remove_debug_file(udc);
  2853. dma_pool_destroy(udc->dd_cache);
  2854. dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2855. udc->udca_v_base, udc->udca_p_base);
  2856. free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
  2857. free_irq(udc->udp_irq[IRQ_USB_HP], udc);
  2858. free_irq(udc->udp_irq[IRQ_USB_LP], udc);
  2859. device_unregister(&udc->gadget.dev);
  2860. clk_disable(udc->usb_otg_clk);
  2861. clk_put(udc->usb_otg_clk);
  2862. clk_disable(udc->usb_slv_clk);
  2863. clk_put(udc->usb_slv_clk);
  2864. clk_disable(udc->usb_pll_clk);
  2865. clk_put(udc->usb_pll_clk);
  2866. iounmap(udc->udp_baseaddr);
  2867. release_mem_region(udc->io_p_start, udc->io_p_size);
  2868. return 0;
  2869. }
  2870. #ifdef CONFIG_PM
  2871. static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
  2872. {
  2873. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2874. if (udc->clocked) {
  2875. /* Power down ISP */
  2876. udc->poweron = 0;
  2877. isp1301_set_powerstate(udc, 0);
  2878. /* Disable clocking */
  2879. udc_clk_set(udc, 0);
  2880. /* Keep clock flag on, so we know to re-enable clocks
  2881. on resume */
  2882. udc->clocked = 1;
  2883. /* Kill global USB clock */
  2884. clk_disable(udc->usb_slv_clk);
  2885. }
  2886. return 0;
  2887. }
  2888. static int lpc32xx_udc_resume(struct platform_device *pdev)
  2889. {
  2890. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2891. if (udc->clocked) {
  2892. /* Enable global USB clock */
  2893. clk_enable(udc->usb_slv_clk);
  2894. /* Enable clocking */
  2895. udc_clk_set(udc, 1);
  2896. /* ISP back to normal power mode */
  2897. udc->poweron = 1;
  2898. isp1301_set_powerstate(udc, 1);
  2899. }
  2900. return 0;
  2901. }
  2902. #else
  2903. #define lpc32xx_udc_suspend NULL
  2904. #define lpc32xx_udc_resume NULL
  2905. #endif
  2906. #ifdef CONFIG_OF
  2907. static struct of_device_id lpc32xx_udc_of_match[] = {
  2908. { .compatible = "nxp,lpc3220-udc", },
  2909. { },
  2910. };
  2911. MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
  2912. #endif
  2913. static struct platform_driver lpc32xx_udc_driver = {
  2914. .remove = __devexit_p(lpc32xx_udc_remove),
  2915. .shutdown = lpc32xx_udc_shutdown,
  2916. .suspend = lpc32xx_udc_suspend,
  2917. .resume = lpc32xx_udc_resume,
  2918. .driver = {
  2919. .name = (char *) driver_name,
  2920. .owner = THIS_MODULE,
  2921. .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
  2922. },
  2923. };
  2924. static int __init udc_init_module(void)
  2925. {
  2926. return platform_driver_probe(&lpc32xx_udc_driver, lpc32xx_udc_probe);
  2927. }
  2928. module_init(udc_init_module);
  2929. static void __exit udc_exit_module(void)
  2930. {
  2931. platform_driver_unregister(&lpc32xx_udc_driver);
  2932. }
  2933. module_exit(udc_exit_module);
  2934. MODULE_DESCRIPTION("LPC32XX udc driver");
  2935. MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
  2936. MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
  2937. MODULE_LICENSE("GPL");
  2938. MODULE_ALIAS("platform:lpc32xx_udc");