udc.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. /*
  2. * udc.c - ChipIdea UDC driver
  3. *
  4. * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
  5. *
  6. * Author: David Lopo
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/init.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/io.h>
  21. #include <linux/irq.h>
  22. #include <linux/kernel.h>
  23. #include <linux/slab.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/usb/ch9.h>
  26. #include <linux/usb/gadget.h>
  27. #include <linux/usb/otg.h>
  28. #include <linux/usb/chipidea.h>
  29. #include "ci.h"
  30. #include "udc.h"
  31. #include "bits.h"
  32. #include "debug.h"
  33. /* control endpoint description */
  34. static const struct usb_endpoint_descriptor
  35. ctrl_endpt_out_desc = {
  36. .bLength = USB_DT_ENDPOINT_SIZE,
  37. .bDescriptorType = USB_DT_ENDPOINT,
  38. .bEndpointAddress = USB_DIR_OUT,
  39. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  40. .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
  41. };
  42. static const struct usb_endpoint_descriptor
  43. ctrl_endpt_in_desc = {
  44. .bLength = USB_DT_ENDPOINT_SIZE,
  45. .bDescriptorType = USB_DT_ENDPOINT,
  46. .bEndpointAddress = USB_DIR_IN,
  47. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  48. .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
  49. };
  50. /**
  51. * hw_ep_bit: calculates the bit number
  52. * @num: endpoint number
  53. * @dir: endpoint direction
  54. *
  55. * This function returns bit number
  56. */
  57. static inline int hw_ep_bit(int num, int dir)
  58. {
  59. return num + (dir ? 16 : 0);
  60. }
  61. static inline int ep_to_bit(struct ci13xxx *udc, int n)
  62. {
  63. int fill = 16 - udc->hw_ep_max / 2;
  64. if (n >= udc->hw_ep_max / 2)
  65. n += fill;
  66. return n;
  67. }
  68. /**
  69. * hw_device_state: enables/disables interrupts & starts/stops device (execute
  70. * without interruption)
  71. * @dma: 0 => disable, !0 => enable and set dma engine
  72. *
  73. * This function returns an error code
  74. */
  75. static int hw_device_state(struct ci13xxx *udc, u32 dma)
  76. {
  77. if (dma) {
  78. hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
  79. /* interrupt, error, port change, reset, sleep/suspend */
  80. hw_write(udc, OP_USBINTR, ~0,
  81. USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
  82. hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
  83. } else {
  84. hw_write(udc, OP_USBCMD, USBCMD_RS, 0);
  85. hw_write(udc, OP_USBINTR, ~0, 0);
  86. }
  87. return 0;
  88. }
  89. /**
  90. * hw_ep_flush: flush endpoint fifo (execute without interruption)
  91. * @num: endpoint number
  92. * @dir: endpoint direction
  93. *
  94. * This function returns an error code
  95. */
  96. static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
  97. {
  98. int n = hw_ep_bit(num, dir);
  99. do {
  100. /* flush any pending transfer */
  101. hw_write(udc, OP_ENDPTFLUSH, BIT(n), BIT(n));
  102. while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
  103. cpu_relax();
  104. } while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
  105. return 0;
  106. }
  107. /**
  108. * hw_ep_disable: disables endpoint (execute without interruption)
  109. * @num: endpoint number
  110. * @dir: endpoint direction
  111. *
  112. * This function returns an error code
  113. */
  114. static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
  115. {
  116. hw_ep_flush(udc, num, dir);
  117. hw_write(udc, OP_ENDPTCTRL + num,
  118. dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
  119. return 0;
  120. }
  121. /**
  122. * hw_ep_enable: enables endpoint (execute without interruption)
  123. * @num: endpoint number
  124. * @dir: endpoint direction
  125. * @type: endpoint type
  126. *
  127. * This function returns an error code
  128. */
  129. static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
  130. {
  131. u32 mask, data;
  132. if (dir) {
  133. mask = ENDPTCTRL_TXT; /* type */
  134. data = type << ffs_nr(mask);
  135. mask |= ENDPTCTRL_TXS; /* unstall */
  136. mask |= ENDPTCTRL_TXR; /* reset data toggle */
  137. data |= ENDPTCTRL_TXR;
  138. mask |= ENDPTCTRL_TXE; /* enable */
  139. data |= ENDPTCTRL_TXE;
  140. } else {
  141. mask = ENDPTCTRL_RXT; /* type */
  142. data = type << ffs_nr(mask);
  143. mask |= ENDPTCTRL_RXS; /* unstall */
  144. mask |= ENDPTCTRL_RXR; /* reset data toggle */
  145. data |= ENDPTCTRL_RXR;
  146. mask |= ENDPTCTRL_RXE; /* enable */
  147. data |= ENDPTCTRL_RXE;
  148. }
  149. hw_write(udc, OP_ENDPTCTRL + num, mask, data);
  150. return 0;
  151. }
  152. /**
  153. * hw_ep_get_halt: return endpoint halt status
  154. * @num: endpoint number
  155. * @dir: endpoint direction
  156. *
  157. * This function returns 1 if endpoint halted
  158. */
  159. static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
  160. {
  161. u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
  162. return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
  163. }
  164. /**
  165. * hw_test_and_clear_setup_status: test & clear setup status (execute without
  166. * interruption)
  167. * @n: endpoint number
  168. *
  169. * This function returns setup status
  170. */
  171. static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
  172. {
  173. n = ep_to_bit(udc, n);
  174. return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
  175. }
  176. /**
  177. * hw_ep_prime: primes endpoint (execute without interruption)
  178. * @num: endpoint number
  179. * @dir: endpoint direction
  180. * @is_ctrl: true if control endpoint
  181. *
  182. * This function returns an error code
  183. */
  184. static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
  185. {
  186. int n = hw_ep_bit(num, dir);
  187. if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
  188. return -EAGAIN;
  189. hw_write(udc, OP_ENDPTPRIME, BIT(n), BIT(n));
  190. while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
  191. cpu_relax();
  192. if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
  193. return -EAGAIN;
  194. /* status shoult be tested according with manual but it doesn't work */
  195. return 0;
  196. }
  197. /**
  198. * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
  199. * without interruption)
  200. * @num: endpoint number
  201. * @dir: endpoint direction
  202. * @value: true => stall, false => unstall
  203. *
  204. * This function returns an error code
  205. */
  206. static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
  207. {
  208. if (value != 0 && value != 1)
  209. return -EINVAL;
  210. do {
  211. enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
  212. u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
  213. u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
  214. /* data toggle - reserved for EP0 but it's in ESS */
  215. hw_write(udc, reg, mask_xs|mask_xr,
  216. value ? mask_xs : mask_xr);
  217. } while (value != hw_ep_get_halt(udc, num, dir));
  218. return 0;
  219. }
  220. /**
  221. * hw_is_port_high_speed: test if port is high speed
  222. *
  223. * This function returns true if high speed port
  224. */
  225. static int hw_port_is_high_speed(struct ci13xxx *udc)
  226. {
  227. return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
  228. hw_read(udc, OP_PORTSC, PORTSC_HSP);
  229. }
  230. /**
  231. * hw_read_intr_enable: returns interrupt enable register
  232. *
  233. * This function returns register data
  234. */
  235. static u32 hw_read_intr_enable(struct ci13xxx *udc)
  236. {
  237. return hw_read(udc, OP_USBINTR, ~0);
  238. }
  239. /**
  240. * hw_read_intr_status: returns interrupt status register
  241. *
  242. * This function returns register data
  243. */
  244. static u32 hw_read_intr_status(struct ci13xxx *udc)
  245. {
  246. return hw_read(udc, OP_USBSTS, ~0);
  247. }
  248. /**
  249. * hw_test_and_clear_complete: test & clear complete status (execute without
  250. * interruption)
  251. * @n: endpoint number
  252. *
  253. * This function returns complete status
  254. */
  255. static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
  256. {
  257. n = ep_to_bit(udc, n);
  258. return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
  259. }
  260. /**
  261. * hw_test_and_clear_intr_active: test & clear active interrupts (execute
  262. * without interruption)
  263. *
  264. * This function returns active interrutps
  265. */
  266. static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
  267. {
  268. u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
  269. hw_write(udc, OP_USBSTS, ~0, reg);
  270. return reg;
  271. }
  272. /**
  273. * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
  274. * interruption)
  275. *
  276. * This function returns guard value
  277. */
  278. static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
  279. {
  280. return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
  281. }
  282. /**
  283. * hw_test_and_set_setup_guard: test & set setup guard (execute without
  284. * interruption)
  285. *
  286. * This function returns guard value
  287. */
  288. static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
  289. {
  290. return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
  291. }
  292. /**
  293. * hw_usb_set_address: configures USB address (execute without interruption)
  294. * @value: new USB address
  295. *
  296. * This function explicitly sets the address, without the "USBADRA" (advance)
  297. * feature, which is not supported by older versions of the controller.
  298. */
  299. static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
  300. {
  301. hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
  302. value << ffs_nr(DEVICEADDR_USBADR));
  303. }
  304. /**
  305. * hw_usb_reset: restart device after a bus reset (execute without
  306. * interruption)
  307. *
  308. * This function returns an error code
  309. */
  310. static int hw_usb_reset(struct ci13xxx *udc)
  311. {
  312. hw_usb_set_address(udc, 0);
  313. /* ESS flushes only at end?!? */
  314. hw_write(udc, OP_ENDPTFLUSH, ~0, ~0);
  315. /* clear setup token semaphores */
  316. hw_write(udc, OP_ENDPTSETUPSTAT, 0, 0);
  317. /* clear complete status */
  318. hw_write(udc, OP_ENDPTCOMPLETE, 0, 0);
  319. /* wait until all bits cleared */
  320. while (hw_read(udc, OP_ENDPTPRIME, ~0))
  321. udelay(10); /* not RTOS friendly */
  322. /* reset all endpoints ? */
  323. /* reset internal status and wait for further instructions
  324. no need to verify the port reset status (ESS does it) */
  325. return 0;
  326. }
  327. /******************************************************************************
  328. * UTIL block
  329. *****************************************************************************/
  330. /**
  331. * _usb_addr: calculates endpoint address from direction & number
  332. * @ep: endpoint
  333. */
  334. static inline u8 _usb_addr(struct ci13xxx_ep *ep)
  335. {
  336. return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
  337. }
  338. /**
  339. * _hardware_queue: configures a request at hardware level
  340. * @gadget: gadget
  341. * @mEp: endpoint
  342. *
  343. * This function returns an error code
  344. */
  345. static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
  346. {
  347. struct ci13xxx *udc = mEp->udc;
  348. unsigned i;
  349. int ret = 0;
  350. unsigned length = mReq->req.length;
  351. /* don't queue twice */
  352. if (mReq->req.status == -EALREADY)
  353. return -EALREADY;
  354. mReq->req.status = -EALREADY;
  355. if (length && mReq->req.dma == DMA_ADDR_INVALID) {
  356. mReq->req.dma = \
  357. dma_map_single(mEp->device, mReq->req.buf,
  358. length, mEp->dir ? DMA_TO_DEVICE :
  359. DMA_FROM_DEVICE);
  360. if (mReq->req.dma == 0)
  361. return -ENOMEM;
  362. mReq->map = 1;
  363. }
  364. if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
  365. mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
  366. &mReq->zdma);
  367. if (mReq->zptr == NULL) {
  368. if (mReq->map) {
  369. dma_unmap_single(mEp->device, mReq->req.dma,
  370. length, mEp->dir ? DMA_TO_DEVICE :
  371. DMA_FROM_DEVICE);
  372. mReq->req.dma = DMA_ADDR_INVALID;
  373. mReq->map = 0;
  374. }
  375. return -ENOMEM;
  376. }
  377. memset(mReq->zptr, 0, sizeof(*mReq->zptr));
  378. mReq->zptr->next = TD_TERMINATE;
  379. mReq->zptr->token = TD_STATUS_ACTIVE;
  380. if (!mReq->req.no_interrupt)
  381. mReq->zptr->token |= TD_IOC;
  382. }
  383. /*
  384. * TD configuration
  385. * TODO - handle requests which spawns into several TDs
  386. */
  387. memset(mReq->ptr, 0, sizeof(*mReq->ptr));
  388. mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
  389. mReq->ptr->token &= TD_TOTAL_BYTES;
  390. mReq->ptr->token |= TD_STATUS_ACTIVE;
  391. if (mReq->zptr) {
  392. mReq->ptr->next = mReq->zdma;
  393. } else {
  394. mReq->ptr->next = TD_TERMINATE;
  395. if (!mReq->req.no_interrupt)
  396. mReq->ptr->token |= TD_IOC;
  397. }
  398. mReq->ptr->page[0] = mReq->req.dma;
  399. for (i = 1; i < 5; i++)
  400. mReq->ptr->page[i] =
  401. (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
  402. if (!list_empty(&mEp->qh.queue)) {
  403. struct ci13xxx_req *mReqPrev;
  404. int n = hw_ep_bit(mEp->num, mEp->dir);
  405. int tmp_stat;
  406. mReqPrev = list_entry(mEp->qh.queue.prev,
  407. struct ci13xxx_req, queue);
  408. if (mReqPrev->zptr)
  409. mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
  410. else
  411. mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
  412. wmb();
  413. if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
  414. goto done;
  415. do {
  416. hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
  417. tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
  418. } while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
  419. hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
  420. if (tmp_stat)
  421. goto done;
  422. }
  423. /* QH configuration */
  424. mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
  425. mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
  426. mEp->qh.ptr->cap |= QH_ZLT;
  427. wmb(); /* synchronize before ep prime */
  428. ret = hw_ep_prime(udc, mEp->num, mEp->dir,
  429. mEp->type == USB_ENDPOINT_XFER_CONTROL);
  430. done:
  431. return ret;
  432. }
  433. /**
  434. * _hardware_dequeue: handles a request at hardware level
  435. * @gadget: gadget
  436. * @mEp: endpoint
  437. *
  438. * This function returns an error code
  439. */
  440. static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
  441. {
  442. if (mReq->req.status != -EALREADY)
  443. return -EINVAL;
  444. if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
  445. return -EBUSY;
  446. if (mReq->zptr) {
  447. if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
  448. return -EBUSY;
  449. dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
  450. mReq->zptr = NULL;
  451. }
  452. mReq->req.status = 0;
  453. if (mReq->map) {
  454. dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
  455. mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  456. mReq->req.dma = DMA_ADDR_INVALID;
  457. mReq->map = 0;
  458. }
  459. mReq->req.status = mReq->ptr->token & TD_STATUS;
  460. if ((TD_STATUS_HALTED & mReq->req.status) != 0)
  461. mReq->req.status = -1;
  462. else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
  463. mReq->req.status = -1;
  464. else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
  465. mReq->req.status = -1;
  466. mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
  467. mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
  468. mReq->req.actual = mReq->req.length - mReq->req.actual;
  469. mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
  470. return mReq->req.actual;
  471. }
  472. /**
  473. * _ep_nuke: dequeues all endpoint requests
  474. * @mEp: endpoint
  475. *
  476. * This function returns an error code
  477. * Caller must hold lock
  478. */
  479. static int _ep_nuke(struct ci13xxx_ep *mEp)
  480. __releases(mEp->lock)
  481. __acquires(mEp->lock)
  482. {
  483. if (mEp == NULL)
  484. return -EINVAL;
  485. hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
  486. while (!list_empty(&mEp->qh.queue)) {
  487. /* pop oldest request */
  488. struct ci13xxx_req *mReq = \
  489. list_entry(mEp->qh.queue.next,
  490. struct ci13xxx_req, queue);
  491. list_del_init(&mReq->queue);
  492. mReq->req.status = -ESHUTDOWN;
  493. if (mReq->req.complete != NULL) {
  494. spin_unlock(mEp->lock);
  495. mReq->req.complete(&mEp->ep, &mReq->req);
  496. spin_lock(mEp->lock);
  497. }
  498. }
  499. return 0;
  500. }
  501. /**
  502. * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
  503. * @gadget: gadget
  504. *
  505. * This function returns an error code
  506. */
  507. static int _gadget_stop_activity(struct usb_gadget *gadget)
  508. {
  509. struct usb_ep *ep;
  510. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  511. unsigned long flags;
  512. spin_lock_irqsave(&udc->lock, flags);
  513. udc->gadget.speed = USB_SPEED_UNKNOWN;
  514. udc->remote_wakeup = 0;
  515. udc->suspended = 0;
  516. spin_unlock_irqrestore(&udc->lock, flags);
  517. /* flush all endpoints */
  518. gadget_for_each_ep(ep, gadget) {
  519. usb_ep_fifo_flush(ep);
  520. }
  521. usb_ep_fifo_flush(&udc->ep0out->ep);
  522. usb_ep_fifo_flush(&udc->ep0in->ep);
  523. if (udc->driver)
  524. udc->driver->disconnect(gadget);
  525. /* make sure to disable all endpoints */
  526. gadget_for_each_ep(ep, gadget) {
  527. usb_ep_disable(ep);
  528. }
  529. if (udc->status != NULL) {
  530. usb_ep_free_request(&udc->ep0in->ep, udc->status);
  531. udc->status = NULL;
  532. }
  533. return 0;
  534. }
  535. /******************************************************************************
  536. * ISR block
  537. *****************************************************************************/
  538. /**
  539. * isr_reset_handler: USB reset interrupt handler
  540. * @udc: UDC device
  541. *
  542. * This function resets USB engine after a bus reset occurred
  543. */
  544. static void isr_reset_handler(struct ci13xxx *udc)
  545. __releases(udc->lock)
  546. __acquires(udc->lock)
  547. {
  548. int retval;
  549. dbg_event(0xFF, "BUS RST", 0);
  550. spin_unlock(&udc->lock);
  551. retval = _gadget_stop_activity(&udc->gadget);
  552. if (retval)
  553. goto done;
  554. retval = hw_usb_reset(udc);
  555. if (retval)
  556. goto done;
  557. udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
  558. if (udc->status == NULL)
  559. retval = -ENOMEM;
  560. done:
  561. spin_lock(&udc->lock);
  562. if (retval)
  563. dev_err(udc->dev, "error: %i\n", retval);
  564. }
  565. /**
  566. * isr_get_status_complete: get_status request complete function
  567. * @ep: endpoint
  568. * @req: request handled
  569. *
  570. * Caller must release lock
  571. */
  572. static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
  573. {
  574. if (ep == NULL || req == NULL)
  575. return;
  576. kfree(req->buf);
  577. usb_ep_free_request(ep, req);
  578. }
  579. /**
  580. * isr_get_status_response: get_status request response
  581. * @udc: udc struct
  582. * @setup: setup request packet
  583. *
  584. * This function returns an error code
  585. */
  586. static int isr_get_status_response(struct ci13xxx *udc,
  587. struct usb_ctrlrequest *setup)
  588. __releases(mEp->lock)
  589. __acquires(mEp->lock)
  590. {
  591. struct ci13xxx_ep *mEp = udc->ep0in;
  592. struct usb_request *req = NULL;
  593. gfp_t gfp_flags = GFP_ATOMIC;
  594. int dir, num, retval;
  595. if (mEp == NULL || setup == NULL)
  596. return -EINVAL;
  597. spin_unlock(mEp->lock);
  598. req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
  599. spin_lock(mEp->lock);
  600. if (req == NULL)
  601. return -ENOMEM;
  602. req->complete = isr_get_status_complete;
  603. req->length = 2;
  604. req->buf = kzalloc(req->length, gfp_flags);
  605. if (req->buf == NULL) {
  606. retval = -ENOMEM;
  607. goto err_free_req;
  608. }
  609. if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
  610. /* Assume that device is bus powered for now. */
  611. *(u16 *)req->buf = udc->remote_wakeup << 1;
  612. retval = 0;
  613. } else if ((setup->bRequestType & USB_RECIP_MASK) \
  614. == USB_RECIP_ENDPOINT) {
  615. dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
  616. TX : RX;
  617. num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
  618. *(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
  619. }
  620. /* else do nothing; reserved for future use */
  621. spin_unlock(mEp->lock);
  622. retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
  623. spin_lock(mEp->lock);
  624. if (retval)
  625. goto err_free_buf;
  626. return 0;
  627. err_free_buf:
  628. kfree(req->buf);
  629. err_free_req:
  630. spin_unlock(mEp->lock);
  631. usb_ep_free_request(&mEp->ep, req);
  632. spin_lock(mEp->lock);
  633. return retval;
  634. }
  635. /**
  636. * isr_setup_status_complete: setup_status request complete function
  637. * @ep: endpoint
  638. * @req: request handled
  639. *
  640. * Caller must release lock. Put the port in test mode if test mode
  641. * feature is selected.
  642. */
  643. static void
  644. isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
  645. {
  646. struct ci13xxx *udc = req->context;
  647. unsigned long flags;
  648. if (udc->setaddr) {
  649. hw_usb_set_address(udc, udc->address);
  650. udc->setaddr = false;
  651. }
  652. spin_lock_irqsave(&udc->lock, flags);
  653. if (udc->test_mode)
  654. hw_port_test_set(udc, udc->test_mode);
  655. spin_unlock_irqrestore(&udc->lock, flags);
  656. }
  657. /**
  658. * isr_setup_status_phase: queues the status phase of a setup transation
  659. * @udc: udc struct
  660. *
  661. * This function returns an error code
  662. */
  663. static int isr_setup_status_phase(struct ci13xxx *udc)
  664. __releases(mEp->lock)
  665. __acquires(mEp->lock)
  666. {
  667. int retval;
  668. struct ci13xxx_ep *mEp;
  669. mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
  670. udc->status->context = udc;
  671. udc->status->complete = isr_setup_status_complete;
  672. spin_unlock(mEp->lock);
  673. retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
  674. spin_lock(mEp->lock);
  675. return retval;
  676. }
  677. /**
  678. * isr_tr_complete_low: transaction complete low level handler
  679. * @mEp: endpoint
  680. *
  681. * This function returns an error code
  682. * Caller must hold lock
  683. */
  684. static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
  685. __releases(mEp->lock)
  686. __acquires(mEp->lock)
  687. {
  688. struct ci13xxx_req *mReq, *mReqTemp;
  689. struct ci13xxx_ep *mEpTemp = mEp;
  690. int uninitialized_var(retval);
  691. if (list_empty(&mEp->qh.queue))
  692. return -EINVAL;
  693. list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
  694. queue) {
  695. retval = _hardware_dequeue(mEp, mReq);
  696. if (retval < 0)
  697. break;
  698. list_del_init(&mReq->queue);
  699. dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
  700. if (mReq->req.complete != NULL) {
  701. spin_unlock(mEp->lock);
  702. if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
  703. mReq->req.length)
  704. mEpTemp = mEp->udc->ep0in;
  705. mReq->req.complete(&mEpTemp->ep, &mReq->req);
  706. spin_lock(mEp->lock);
  707. }
  708. }
  709. if (retval == -EBUSY)
  710. retval = 0;
  711. if (retval < 0)
  712. dbg_event(_usb_addr(mEp), "DONE", retval);
  713. return retval;
  714. }
  715. /**
  716. * isr_tr_complete_handler: transaction complete interrupt handler
  717. * @udc: UDC descriptor
  718. *
  719. * This function handles traffic events
  720. */
  721. static void isr_tr_complete_handler(struct ci13xxx *udc)
  722. __releases(udc->lock)
  723. __acquires(udc->lock)
  724. {
  725. unsigned i;
  726. u8 tmode = 0;
  727. for (i = 0; i < udc->hw_ep_max; i++) {
  728. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  729. int type, num, dir, err = -EINVAL;
  730. struct usb_ctrlrequest req;
  731. if (mEp->ep.desc == NULL)
  732. continue; /* not configured */
  733. if (hw_test_and_clear_complete(udc, i)) {
  734. err = isr_tr_complete_low(mEp);
  735. if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
  736. if (err > 0) /* needs status phase */
  737. err = isr_setup_status_phase(udc);
  738. if (err < 0) {
  739. dbg_event(_usb_addr(mEp),
  740. "ERROR", err);
  741. spin_unlock(&udc->lock);
  742. if (usb_ep_set_halt(&mEp->ep))
  743. dev_err(udc->dev,
  744. "error: ep_set_halt\n");
  745. spin_lock(&udc->lock);
  746. }
  747. }
  748. }
  749. if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
  750. !hw_test_and_clear_setup_status(udc, i))
  751. continue;
  752. if (i != 0) {
  753. dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
  754. continue;
  755. }
  756. /*
  757. * Flush data and handshake transactions of previous
  758. * setup packet.
  759. */
  760. _ep_nuke(udc->ep0out);
  761. _ep_nuke(udc->ep0in);
  762. /* read_setup_packet */
  763. do {
  764. hw_test_and_set_setup_guard(udc);
  765. memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
  766. } while (!hw_test_and_clear_setup_guard(udc));
  767. type = req.bRequestType;
  768. udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
  769. dbg_setup(_usb_addr(mEp), &req);
  770. switch (req.bRequest) {
  771. case USB_REQ_CLEAR_FEATURE:
  772. if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
  773. le16_to_cpu(req.wValue) ==
  774. USB_ENDPOINT_HALT) {
  775. if (req.wLength != 0)
  776. break;
  777. num = le16_to_cpu(req.wIndex);
  778. dir = num & USB_ENDPOINT_DIR_MASK;
  779. num &= USB_ENDPOINT_NUMBER_MASK;
  780. if (dir) /* TX */
  781. num += udc->hw_ep_max/2;
  782. if (!udc->ci13xxx_ep[num].wedge) {
  783. spin_unlock(&udc->lock);
  784. err = usb_ep_clear_halt(
  785. &udc->ci13xxx_ep[num].ep);
  786. spin_lock(&udc->lock);
  787. if (err)
  788. break;
  789. }
  790. err = isr_setup_status_phase(udc);
  791. } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
  792. le16_to_cpu(req.wValue) ==
  793. USB_DEVICE_REMOTE_WAKEUP) {
  794. if (req.wLength != 0)
  795. break;
  796. udc->remote_wakeup = 0;
  797. err = isr_setup_status_phase(udc);
  798. } else {
  799. goto delegate;
  800. }
  801. break;
  802. case USB_REQ_GET_STATUS:
  803. if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
  804. type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
  805. type != (USB_DIR_IN|USB_RECIP_INTERFACE))
  806. goto delegate;
  807. if (le16_to_cpu(req.wLength) != 2 ||
  808. le16_to_cpu(req.wValue) != 0)
  809. break;
  810. err = isr_get_status_response(udc, &req);
  811. break;
  812. case USB_REQ_SET_ADDRESS:
  813. if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
  814. goto delegate;
  815. if (le16_to_cpu(req.wLength) != 0 ||
  816. le16_to_cpu(req.wIndex) != 0)
  817. break;
  818. udc->address = (u8)le16_to_cpu(req.wValue);
  819. udc->setaddr = true;
  820. err = isr_setup_status_phase(udc);
  821. break;
  822. case USB_REQ_SET_FEATURE:
  823. if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
  824. le16_to_cpu(req.wValue) ==
  825. USB_ENDPOINT_HALT) {
  826. if (req.wLength != 0)
  827. break;
  828. num = le16_to_cpu(req.wIndex);
  829. dir = num & USB_ENDPOINT_DIR_MASK;
  830. num &= USB_ENDPOINT_NUMBER_MASK;
  831. if (dir) /* TX */
  832. num += udc->hw_ep_max/2;
  833. spin_unlock(&udc->lock);
  834. err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
  835. spin_lock(&udc->lock);
  836. if (!err)
  837. isr_setup_status_phase(udc);
  838. } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
  839. if (req.wLength != 0)
  840. break;
  841. switch (le16_to_cpu(req.wValue)) {
  842. case USB_DEVICE_REMOTE_WAKEUP:
  843. udc->remote_wakeup = 1;
  844. err = isr_setup_status_phase(udc);
  845. break;
  846. case USB_DEVICE_TEST_MODE:
  847. tmode = le16_to_cpu(req.wIndex) >> 8;
  848. switch (tmode) {
  849. case TEST_J:
  850. case TEST_K:
  851. case TEST_SE0_NAK:
  852. case TEST_PACKET:
  853. case TEST_FORCE_EN:
  854. udc->test_mode = tmode;
  855. err = isr_setup_status_phase(
  856. udc);
  857. break;
  858. default:
  859. break;
  860. }
  861. default:
  862. goto delegate;
  863. }
  864. } else {
  865. goto delegate;
  866. }
  867. break;
  868. default:
  869. delegate:
  870. if (req.wLength == 0) /* no data phase */
  871. udc->ep0_dir = TX;
  872. spin_unlock(&udc->lock);
  873. err = udc->driver->setup(&udc->gadget, &req);
  874. spin_lock(&udc->lock);
  875. break;
  876. }
  877. if (err < 0) {
  878. dbg_event(_usb_addr(mEp), "ERROR", err);
  879. spin_unlock(&udc->lock);
  880. if (usb_ep_set_halt(&mEp->ep))
  881. dev_err(udc->dev, "error: ep_set_halt\n");
  882. spin_lock(&udc->lock);
  883. }
  884. }
  885. }
  886. /******************************************************************************
  887. * ENDPT block
  888. *****************************************************************************/
  889. /**
  890. * ep_enable: configure endpoint, making it usable
  891. *
  892. * Check usb_ep_enable() at "usb_gadget.h" for details
  893. */
  894. static int ep_enable(struct usb_ep *ep,
  895. const struct usb_endpoint_descriptor *desc)
  896. {
  897. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  898. int retval = 0;
  899. unsigned long flags;
  900. if (ep == NULL || desc == NULL)
  901. return -EINVAL;
  902. spin_lock_irqsave(mEp->lock, flags);
  903. /* only internal SW should enable ctrl endpts */
  904. mEp->ep.desc = desc;
  905. if (!list_empty(&mEp->qh.queue))
  906. dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
  907. mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
  908. mEp->num = usb_endpoint_num(desc);
  909. mEp->type = usb_endpoint_type(desc);
  910. mEp->ep.maxpacket = usb_endpoint_maxp(desc);
  911. dbg_event(_usb_addr(mEp), "ENABLE", 0);
  912. mEp->qh.ptr->cap = 0;
  913. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  914. mEp->qh.ptr->cap |= QH_IOS;
  915. else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
  916. mEp->qh.ptr->cap &= ~QH_MULT;
  917. else
  918. mEp->qh.ptr->cap &= ~QH_ZLT;
  919. mEp->qh.ptr->cap |=
  920. (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
  921. mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
  922. /*
  923. * Enable endpoints in the HW other than ep0 as ep0
  924. * is always enabled
  925. */
  926. if (mEp->num)
  927. retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
  928. spin_unlock_irqrestore(mEp->lock, flags);
  929. return retval;
  930. }
  931. /**
  932. * ep_disable: endpoint is no longer usable
  933. *
  934. * Check usb_ep_disable() at "usb_gadget.h" for details
  935. */
  936. static int ep_disable(struct usb_ep *ep)
  937. {
  938. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  939. int direction, retval = 0;
  940. unsigned long flags;
  941. if (ep == NULL)
  942. return -EINVAL;
  943. else if (mEp->ep.desc == NULL)
  944. return -EBUSY;
  945. spin_lock_irqsave(mEp->lock, flags);
  946. /* only internal SW should disable ctrl endpts */
  947. direction = mEp->dir;
  948. do {
  949. dbg_event(_usb_addr(mEp), "DISABLE", 0);
  950. retval |= _ep_nuke(mEp);
  951. retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
  952. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  953. mEp->dir = (mEp->dir == TX) ? RX : TX;
  954. } while (mEp->dir != direction);
  955. mEp->ep.desc = NULL;
  956. spin_unlock_irqrestore(mEp->lock, flags);
  957. return retval;
  958. }
  959. /**
  960. * ep_alloc_request: allocate a request object to use with this endpoint
  961. *
  962. * Check usb_ep_alloc_request() at "usb_gadget.h" for details
  963. */
  964. static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  965. {
  966. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  967. struct ci13xxx_req *mReq = NULL;
  968. if (ep == NULL)
  969. return NULL;
  970. mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
  971. if (mReq != NULL) {
  972. INIT_LIST_HEAD(&mReq->queue);
  973. mReq->req.dma = DMA_ADDR_INVALID;
  974. mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
  975. &mReq->dma);
  976. if (mReq->ptr == NULL) {
  977. kfree(mReq);
  978. mReq = NULL;
  979. }
  980. }
  981. dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
  982. return (mReq == NULL) ? NULL : &mReq->req;
  983. }
  984. /**
  985. * ep_free_request: frees a request object
  986. *
  987. * Check usb_ep_free_request() at "usb_gadget.h" for details
  988. */
  989. static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
  990. {
  991. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  992. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  993. unsigned long flags;
  994. if (ep == NULL || req == NULL) {
  995. return;
  996. } else if (!list_empty(&mReq->queue)) {
  997. dev_err(mEp->udc->dev, "freeing queued request\n");
  998. return;
  999. }
  1000. spin_lock_irqsave(mEp->lock, flags);
  1001. if (mReq->ptr)
  1002. dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
  1003. kfree(mReq);
  1004. dbg_event(_usb_addr(mEp), "FREE", 0);
  1005. spin_unlock_irqrestore(mEp->lock, flags);
  1006. }
  1007. /**
  1008. * ep_queue: queues (submits) an I/O request to an endpoint
  1009. *
  1010. * Check usb_ep_queue()* at usb_gadget.h" for details
  1011. */
  1012. static int ep_queue(struct usb_ep *ep, struct usb_request *req,
  1013. gfp_t __maybe_unused gfp_flags)
  1014. {
  1015. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1016. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1017. struct ci13xxx *udc = mEp->udc;
  1018. int retval = 0;
  1019. unsigned long flags;
  1020. if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
  1021. return -EINVAL;
  1022. spin_lock_irqsave(mEp->lock, flags);
  1023. if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
  1024. if (req->length)
  1025. mEp = (udc->ep0_dir == RX) ?
  1026. udc->ep0out : udc->ep0in;
  1027. if (!list_empty(&mEp->qh.queue)) {
  1028. _ep_nuke(mEp);
  1029. retval = -EOVERFLOW;
  1030. dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
  1031. _usb_addr(mEp));
  1032. }
  1033. }
  1034. /* first nuke then test link, e.g. previous status has not sent */
  1035. if (!list_empty(&mReq->queue)) {
  1036. retval = -EBUSY;
  1037. dev_err(mEp->udc->dev, "request already in queue\n");
  1038. goto done;
  1039. }
  1040. if (req->length > 4 * CI13XXX_PAGE_SIZE) {
  1041. req->length = 4 * CI13XXX_PAGE_SIZE;
  1042. retval = -EMSGSIZE;
  1043. dev_warn(mEp->udc->dev, "request length truncated\n");
  1044. }
  1045. dbg_queue(_usb_addr(mEp), req, retval);
  1046. /* push request */
  1047. mReq->req.status = -EINPROGRESS;
  1048. mReq->req.actual = 0;
  1049. retval = _hardware_enqueue(mEp, mReq);
  1050. if (retval == -EALREADY) {
  1051. dbg_event(_usb_addr(mEp), "QUEUE", retval);
  1052. retval = 0;
  1053. }
  1054. if (!retval)
  1055. list_add_tail(&mReq->queue, &mEp->qh.queue);
  1056. done:
  1057. spin_unlock_irqrestore(mEp->lock, flags);
  1058. return retval;
  1059. }
  1060. /**
  1061. * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
  1062. *
  1063. * Check usb_ep_dequeue() at "usb_gadget.h" for details
  1064. */
  1065. static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  1066. {
  1067. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1068. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1069. unsigned long flags;
  1070. if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
  1071. mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
  1072. list_empty(&mEp->qh.queue))
  1073. return -EINVAL;
  1074. spin_lock_irqsave(mEp->lock, flags);
  1075. dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
  1076. hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
  1077. /* pop request */
  1078. list_del_init(&mReq->queue);
  1079. if (mReq->map) {
  1080. dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
  1081. mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1082. mReq->req.dma = DMA_ADDR_INVALID;
  1083. mReq->map = 0;
  1084. }
  1085. req->status = -ECONNRESET;
  1086. if (mReq->req.complete != NULL) {
  1087. spin_unlock(mEp->lock);
  1088. mReq->req.complete(&mEp->ep, &mReq->req);
  1089. spin_lock(mEp->lock);
  1090. }
  1091. spin_unlock_irqrestore(mEp->lock, flags);
  1092. return 0;
  1093. }
  1094. /**
  1095. * ep_set_halt: sets the endpoint halt feature
  1096. *
  1097. * Check usb_ep_set_halt() at "usb_gadget.h" for details
  1098. */
  1099. static int ep_set_halt(struct usb_ep *ep, int value)
  1100. {
  1101. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1102. int direction, retval = 0;
  1103. unsigned long flags;
  1104. if (ep == NULL || mEp->ep.desc == NULL)
  1105. return -EINVAL;
  1106. spin_lock_irqsave(mEp->lock, flags);
  1107. #ifndef STALL_IN
  1108. /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
  1109. if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
  1110. !list_empty(&mEp->qh.queue)) {
  1111. spin_unlock_irqrestore(mEp->lock, flags);
  1112. return -EAGAIN;
  1113. }
  1114. #endif
  1115. direction = mEp->dir;
  1116. do {
  1117. dbg_event(_usb_addr(mEp), "HALT", value);
  1118. retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
  1119. if (!value)
  1120. mEp->wedge = 0;
  1121. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1122. mEp->dir = (mEp->dir == TX) ? RX : TX;
  1123. } while (mEp->dir != direction);
  1124. spin_unlock_irqrestore(mEp->lock, flags);
  1125. return retval;
  1126. }
  1127. /**
  1128. * ep_set_wedge: sets the halt feature and ignores clear requests
  1129. *
  1130. * Check usb_ep_set_wedge() at "usb_gadget.h" for details
  1131. */
  1132. static int ep_set_wedge(struct usb_ep *ep)
  1133. {
  1134. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1135. unsigned long flags;
  1136. if (ep == NULL || mEp->ep.desc == NULL)
  1137. return -EINVAL;
  1138. spin_lock_irqsave(mEp->lock, flags);
  1139. dbg_event(_usb_addr(mEp), "WEDGE", 0);
  1140. mEp->wedge = 1;
  1141. spin_unlock_irqrestore(mEp->lock, flags);
  1142. return usb_ep_set_halt(ep);
  1143. }
  1144. /**
  1145. * ep_fifo_flush: flushes contents of a fifo
  1146. *
  1147. * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
  1148. */
  1149. static void ep_fifo_flush(struct usb_ep *ep)
  1150. {
  1151. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1152. unsigned long flags;
  1153. if (ep == NULL) {
  1154. dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
  1155. return;
  1156. }
  1157. spin_lock_irqsave(mEp->lock, flags);
  1158. dbg_event(_usb_addr(mEp), "FFLUSH", 0);
  1159. hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
  1160. spin_unlock_irqrestore(mEp->lock, flags);
  1161. }
  1162. /**
  1163. * Endpoint-specific part of the API to the USB controller hardware
  1164. * Check "usb_gadget.h" for details
  1165. */
  1166. static const struct usb_ep_ops usb_ep_ops = {
  1167. .enable = ep_enable,
  1168. .disable = ep_disable,
  1169. .alloc_request = ep_alloc_request,
  1170. .free_request = ep_free_request,
  1171. .queue = ep_queue,
  1172. .dequeue = ep_dequeue,
  1173. .set_halt = ep_set_halt,
  1174. .set_wedge = ep_set_wedge,
  1175. .fifo_flush = ep_fifo_flush,
  1176. };
  1177. /******************************************************************************
  1178. * GADGET block
  1179. *****************************************************************************/
  1180. static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
  1181. {
  1182. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1183. unsigned long flags;
  1184. int gadget_ready = 0;
  1185. if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
  1186. return -EOPNOTSUPP;
  1187. spin_lock_irqsave(&udc->lock, flags);
  1188. udc->vbus_active = is_active;
  1189. if (udc->driver)
  1190. gadget_ready = 1;
  1191. spin_unlock_irqrestore(&udc->lock, flags);
  1192. if (gadget_ready) {
  1193. if (is_active) {
  1194. pm_runtime_get_sync(&_gadget->dev);
  1195. hw_device_reset(udc, USBMODE_CM_DC);
  1196. hw_device_state(udc, udc->ep0out->qh.dma);
  1197. } else {
  1198. hw_device_state(udc, 0);
  1199. if (udc->udc_driver->notify_event)
  1200. udc->udc_driver->notify_event(udc,
  1201. CI13XXX_CONTROLLER_STOPPED_EVENT);
  1202. _gadget_stop_activity(&udc->gadget);
  1203. pm_runtime_put_sync(&_gadget->dev);
  1204. }
  1205. }
  1206. return 0;
  1207. }
  1208. static int ci13xxx_wakeup(struct usb_gadget *_gadget)
  1209. {
  1210. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1211. unsigned long flags;
  1212. int ret = 0;
  1213. spin_lock_irqsave(&udc->lock, flags);
  1214. if (!udc->remote_wakeup) {
  1215. ret = -EOPNOTSUPP;
  1216. goto out;
  1217. }
  1218. if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
  1219. ret = -EINVAL;
  1220. goto out;
  1221. }
  1222. hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
  1223. out:
  1224. spin_unlock_irqrestore(&udc->lock, flags);
  1225. return ret;
  1226. }
  1227. static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
  1228. {
  1229. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1230. if (udc->transceiver)
  1231. return usb_phy_set_power(udc->transceiver, mA);
  1232. return -ENOTSUPP;
  1233. }
  1234. static int ci13xxx_start(struct usb_gadget *gadget,
  1235. struct usb_gadget_driver *driver);
  1236. static int ci13xxx_stop(struct usb_gadget *gadget,
  1237. struct usb_gadget_driver *driver);
  1238. /**
  1239. * Device operations part of the API to the USB controller hardware,
  1240. * which don't involve endpoints (or i/o)
  1241. * Check "usb_gadget.h" for details
  1242. */
  1243. static const struct usb_gadget_ops usb_gadget_ops = {
  1244. .vbus_session = ci13xxx_vbus_session,
  1245. .wakeup = ci13xxx_wakeup,
  1246. .vbus_draw = ci13xxx_vbus_draw,
  1247. .udc_start = ci13xxx_start,
  1248. .udc_stop = ci13xxx_stop,
  1249. };
  1250. static int init_eps(struct ci13xxx *udc)
  1251. {
  1252. int retval = 0, i, j;
  1253. for (i = 0; i < udc->hw_ep_max/2; i++)
  1254. for (j = RX; j <= TX; j++) {
  1255. int k = i + j * udc->hw_ep_max/2;
  1256. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
  1257. scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
  1258. (j == TX) ? "in" : "out");
  1259. mEp->udc = udc;
  1260. mEp->lock = &udc->lock;
  1261. mEp->device = &udc->gadget.dev;
  1262. mEp->td_pool = udc->td_pool;
  1263. mEp->ep.name = mEp->name;
  1264. mEp->ep.ops = &usb_ep_ops;
  1265. mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
  1266. INIT_LIST_HEAD(&mEp->qh.queue);
  1267. mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
  1268. &mEp->qh.dma);
  1269. if (mEp->qh.ptr == NULL)
  1270. retval = -ENOMEM;
  1271. else
  1272. memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
  1273. /*
  1274. * set up shorthands for ep0 out and in endpoints,
  1275. * don't add to gadget's ep_list
  1276. */
  1277. if (i == 0) {
  1278. if (j == RX)
  1279. udc->ep0out = mEp;
  1280. else
  1281. udc->ep0in = mEp;
  1282. continue;
  1283. }
  1284. list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
  1285. }
  1286. return retval;
  1287. }
  1288. /**
  1289. * ci13xxx_start: register a gadget driver
  1290. * @gadget: our gadget
  1291. * @driver: the driver being registered
  1292. *
  1293. * Interrupts are enabled here.
  1294. */
  1295. static int ci13xxx_start(struct usb_gadget *gadget,
  1296. struct usb_gadget_driver *driver)
  1297. {
  1298. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  1299. unsigned long flags;
  1300. int retval = -ENOMEM;
  1301. if (driver->disconnect == NULL)
  1302. return -EINVAL;
  1303. udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
  1304. retval = usb_ep_enable(&udc->ep0out->ep);
  1305. if (retval)
  1306. return retval;
  1307. udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
  1308. retval = usb_ep_enable(&udc->ep0in->ep);
  1309. if (retval)
  1310. return retval;
  1311. spin_lock_irqsave(&udc->lock, flags);
  1312. udc->driver = driver;
  1313. pm_runtime_get_sync(&udc->gadget.dev);
  1314. if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
  1315. if (udc->vbus_active) {
  1316. if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
  1317. hw_device_reset(udc, USBMODE_CM_DC);
  1318. } else {
  1319. pm_runtime_put_sync(&udc->gadget.dev);
  1320. goto done;
  1321. }
  1322. }
  1323. retval = hw_device_state(udc, udc->ep0out->qh.dma);
  1324. if (retval)
  1325. pm_runtime_put_sync(&udc->gadget.dev);
  1326. done:
  1327. spin_unlock_irqrestore(&udc->lock, flags);
  1328. return retval;
  1329. }
  1330. /**
  1331. * ci13xxx_stop: unregister a gadget driver
  1332. */
  1333. static int ci13xxx_stop(struct usb_gadget *gadget,
  1334. struct usb_gadget_driver *driver)
  1335. {
  1336. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  1337. unsigned long flags;
  1338. spin_lock_irqsave(&udc->lock, flags);
  1339. if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
  1340. udc->vbus_active) {
  1341. hw_device_state(udc, 0);
  1342. if (udc->udc_driver->notify_event)
  1343. udc->udc_driver->notify_event(udc,
  1344. CI13XXX_CONTROLLER_STOPPED_EVENT);
  1345. udc->driver = NULL;
  1346. spin_unlock_irqrestore(&udc->lock, flags);
  1347. _gadget_stop_activity(&udc->gadget);
  1348. spin_lock_irqsave(&udc->lock, flags);
  1349. pm_runtime_put(&udc->gadget.dev);
  1350. }
  1351. spin_unlock_irqrestore(&udc->lock, flags);
  1352. return 0;
  1353. }
  1354. /******************************************************************************
  1355. * BUS block
  1356. *****************************************************************************/
  1357. /**
  1358. * udc_irq: udc interrupt handler
  1359. *
  1360. * This function returns IRQ_HANDLED if the IRQ has been handled
  1361. * It locks access to registers
  1362. */
  1363. static irqreturn_t udc_irq(struct ci13xxx *udc)
  1364. {
  1365. irqreturn_t retval;
  1366. u32 intr;
  1367. if (udc == NULL)
  1368. return IRQ_HANDLED;
  1369. spin_lock(&udc->lock);
  1370. if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
  1371. if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
  1372. USBMODE_CM_DC) {
  1373. spin_unlock(&udc->lock);
  1374. return IRQ_NONE;
  1375. }
  1376. }
  1377. intr = hw_test_and_clear_intr_active(udc);
  1378. dbg_interrupt(intr);
  1379. if (intr) {
  1380. /* order defines priority - do NOT change it */
  1381. if (USBi_URI & intr)
  1382. isr_reset_handler(udc);
  1383. if (USBi_PCI & intr) {
  1384. udc->gadget.speed = hw_port_is_high_speed(udc) ?
  1385. USB_SPEED_HIGH : USB_SPEED_FULL;
  1386. if (udc->suspended && udc->driver->resume) {
  1387. spin_unlock(&udc->lock);
  1388. udc->driver->resume(&udc->gadget);
  1389. spin_lock(&udc->lock);
  1390. udc->suspended = 0;
  1391. }
  1392. }
  1393. if (USBi_UI & intr)
  1394. isr_tr_complete_handler(udc);
  1395. if (USBi_SLI & intr) {
  1396. if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
  1397. udc->driver->suspend) {
  1398. udc->suspended = 1;
  1399. spin_unlock(&udc->lock);
  1400. udc->driver->suspend(&udc->gadget);
  1401. spin_lock(&udc->lock);
  1402. }
  1403. }
  1404. retval = IRQ_HANDLED;
  1405. } else {
  1406. retval = IRQ_NONE;
  1407. }
  1408. spin_unlock(&udc->lock);
  1409. return retval;
  1410. }
  1411. /**
  1412. * udc_release: driver release function
  1413. * @dev: device
  1414. *
  1415. * Currently does nothing
  1416. */
  1417. static void udc_release(struct device *dev)
  1418. {
  1419. }
  1420. /**
  1421. * udc_start: initialize gadget role
  1422. * @udc: chipidea controller
  1423. */
  1424. static int udc_start(struct ci13xxx *udc)
  1425. {
  1426. struct device *dev = udc->dev;
  1427. int retval = 0;
  1428. if (!udc)
  1429. return -EINVAL;
  1430. spin_lock_init(&udc->lock);
  1431. udc->gadget.ops = &usb_gadget_ops;
  1432. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1433. udc->gadget.max_speed = USB_SPEED_HIGH;
  1434. udc->gadget.is_otg = 0;
  1435. udc->gadget.name = udc->udc_driver->name;
  1436. INIT_LIST_HEAD(&udc->gadget.ep_list);
  1437. dev_set_name(&udc->gadget.dev, "gadget");
  1438. udc->gadget.dev.dma_mask = dev->dma_mask;
  1439. udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
  1440. udc->gadget.dev.parent = dev;
  1441. udc->gadget.dev.release = udc_release;
  1442. /* alloc resources */
  1443. udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
  1444. sizeof(struct ci13xxx_qh),
  1445. 64, CI13XXX_PAGE_SIZE);
  1446. if (udc->qh_pool == NULL)
  1447. return -ENOMEM;
  1448. udc->td_pool = dma_pool_create("ci13xxx_td", dev,
  1449. sizeof(struct ci13xxx_td),
  1450. 64, CI13XXX_PAGE_SIZE);
  1451. if (udc->td_pool == NULL) {
  1452. retval = -ENOMEM;
  1453. goto free_qh_pool;
  1454. }
  1455. retval = init_eps(udc);
  1456. if (retval)
  1457. goto free_pools;
  1458. udc->gadget.ep0 = &udc->ep0in->ep;
  1459. udc->transceiver = usb_get_transceiver();
  1460. if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
  1461. if (udc->transceiver == NULL) {
  1462. retval = -ENODEV;
  1463. goto free_pools;
  1464. }
  1465. }
  1466. if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
  1467. retval = hw_device_reset(udc, USBMODE_CM_DC);
  1468. if (retval)
  1469. goto put_transceiver;
  1470. }
  1471. retval = device_register(&udc->gadget.dev);
  1472. if (retval) {
  1473. put_device(&udc->gadget.dev);
  1474. goto put_transceiver;
  1475. }
  1476. retval = dbg_create_files(&udc->gadget.dev);
  1477. if (retval)
  1478. goto unreg_device;
  1479. if (udc->transceiver) {
  1480. retval = otg_set_peripheral(udc->transceiver->otg,
  1481. &udc->gadget);
  1482. if (retval)
  1483. goto remove_dbg;
  1484. }
  1485. retval = usb_add_gadget_udc(dev, &udc->gadget);
  1486. if (retval)
  1487. goto remove_trans;
  1488. pm_runtime_no_callbacks(&udc->gadget.dev);
  1489. pm_runtime_enable(&udc->gadget.dev);
  1490. return retval;
  1491. remove_trans:
  1492. if (udc->transceiver) {
  1493. otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
  1494. usb_put_transceiver(udc->transceiver);
  1495. }
  1496. dev_err(dev, "error = %i\n", retval);
  1497. remove_dbg:
  1498. dbg_remove_files(&udc->gadget.dev);
  1499. unreg_device:
  1500. device_unregister(&udc->gadget.dev);
  1501. put_transceiver:
  1502. if (udc->transceiver)
  1503. usb_put_transceiver(udc->transceiver);
  1504. free_pools:
  1505. dma_pool_destroy(udc->td_pool);
  1506. free_qh_pool:
  1507. dma_pool_destroy(udc->qh_pool);
  1508. return retval;
  1509. }
  1510. /**
  1511. * udc_remove: parent remove must call this to remove UDC
  1512. *
  1513. * No interrupts active, the IRQ has been released
  1514. */
  1515. static void udc_stop(struct ci13xxx *udc)
  1516. {
  1517. int i;
  1518. if (udc == NULL)
  1519. return;
  1520. usb_del_gadget_udc(&udc->gadget);
  1521. for (i = 0; i < udc->hw_ep_max; i++) {
  1522. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  1523. dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
  1524. }
  1525. dma_pool_destroy(udc->td_pool);
  1526. dma_pool_destroy(udc->qh_pool);
  1527. if (udc->transceiver) {
  1528. otg_set_peripheral(udc->transceiver->otg, NULL);
  1529. usb_put_transceiver(udc->transceiver);
  1530. }
  1531. dbg_remove_files(&udc->gadget.dev);
  1532. device_unregister(&udc->gadget.dev);
  1533. /* my kobject is dynamic, I swear! */
  1534. memset(&udc->gadget, 0, sizeof(udc->gadget));
  1535. }
  1536. /**
  1537. * ci_hdrc_gadget_init - initialize device related bits
  1538. * ci: the controller
  1539. *
  1540. * This function enables the gadget role, if the device is "device capable".
  1541. */
  1542. int ci_hdrc_gadget_init(struct ci13xxx *ci)
  1543. {
  1544. struct ci_role_driver *rdrv;
  1545. if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
  1546. return -ENXIO;
  1547. rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
  1548. if (!rdrv)
  1549. return -ENOMEM;
  1550. rdrv->start = udc_start;
  1551. rdrv->stop = udc_stop;
  1552. rdrv->irq = udc_irq;
  1553. rdrv->name = "gadget";
  1554. ci->roles[CI_ROLE_GADGET] = rdrv;
  1555. return 0;
  1556. }