udc.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835
  1. /*
  2. * udc.h - ChipIdea UDC driver
  3. *
  4. * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
  5. *
  6. * Author: David Lopo
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/init.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/module.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/io.h>
  21. #include <linux/irq.h>
  22. #include <linux/kernel.h>
  23. #include <linux/slab.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/usb/ch9.h>
  26. #include <linux/usb/gadget.h>
  27. #include <linux/usb/otg.h>
  28. #include <linux/usb/chipidea.h>
  29. #include "ci.h"
  30. #include "udc.h"
  31. #include "bits.h"
  32. #include "debug.h"
  33. /* control endpoint description */
  34. static const struct usb_endpoint_descriptor
  35. ctrl_endpt_out_desc = {
  36. .bLength = USB_DT_ENDPOINT_SIZE,
  37. .bDescriptorType = USB_DT_ENDPOINT,
  38. .bEndpointAddress = USB_DIR_OUT,
  39. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  40. .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
  41. };
  42. static const struct usb_endpoint_descriptor
  43. ctrl_endpt_in_desc = {
  44. .bLength = USB_DT_ENDPOINT_SIZE,
  45. .bDescriptorType = USB_DT_ENDPOINT,
  46. .bEndpointAddress = USB_DIR_IN,
  47. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  48. .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
  49. };
  50. /**
  51. * hw_ep_bit: calculates the bit number
  52. * @num: endpoint number
  53. * @dir: endpoint direction
  54. *
  55. * This function returns bit number
  56. */
  57. static inline int hw_ep_bit(int num, int dir)
  58. {
  59. return num + (dir ? 16 : 0);
  60. }
  61. static inline int ep_to_bit(struct ci13xxx *udc, int n)
  62. {
  63. int fill = 16 - udc->hw_ep_max / 2;
  64. if (n >= udc->hw_ep_max / 2)
  65. n += fill;
  66. return n;
  67. }
  68. /**
  69. * hw_device_state: enables/disables interrupts & starts/stops device (execute
  70. * without interruption)
  71. * @dma: 0 => disable, !0 => enable and set dma engine
  72. *
  73. * This function returns an error code
  74. */
  75. static int hw_device_state(struct ci13xxx *udc, u32 dma)
  76. {
  77. if (dma) {
  78. hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
  79. /* interrupt, error, port change, reset, sleep/suspend */
  80. hw_write(udc, OP_USBINTR, ~0,
  81. USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
  82. hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
  83. } else {
  84. hw_write(udc, OP_USBCMD, USBCMD_RS, 0);
  85. hw_write(udc, OP_USBINTR, ~0, 0);
  86. }
  87. return 0;
  88. }
  89. /**
  90. * hw_ep_flush: flush endpoint fifo (execute without interruption)
  91. * @num: endpoint number
  92. * @dir: endpoint direction
  93. *
  94. * This function returns an error code
  95. */
  96. static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
  97. {
  98. int n = hw_ep_bit(num, dir);
  99. do {
  100. /* flush any pending transfer */
  101. hw_write(udc, OP_ENDPTFLUSH, BIT(n), BIT(n));
  102. while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
  103. cpu_relax();
  104. } while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
  105. return 0;
  106. }
  107. /**
  108. * hw_ep_disable: disables endpoint (execute without interruption)
  109. * @num: endpoint number
  110. * @dir: endpoint direction
  111. *
  112. * This function returns an error code
  113. */
  114. static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
  115. {
  116. hw_ep_flush(udc, num, dir);
  117. hw_write(udc, OP_ENDPTCTRL + num,
  118. dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
  119. return 0;
  120. }
  121. /**
  122. * hw_ep_enable: enables endpoint (execute without interruption)
  123. * @num: endpoint number
  124. * @dir: endpoint direction
  125. * @type: endpoint type
  126. *
  127. * This function returns an error code
  128. */
  129. static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
  130. {
  131. u32 mask, data;
  132. if (dir) {
  133. mask = ENDPTCTRL_TXT; /* type */
  134. data = type << ffs_nr(mask);
  135. mask |= ENDPTCTRL_TXS; /* unstall */
  136. mask |= ENDPTCTRL_TXR; /* reset data toggle */
  137. data |= ENDPTCTRL_TXR;
  138. mask |= ENDPTCTRL_TXE; /* enable */
  139. data |= ENDPTCTRL_TXE;
  140. } else {
  141. mask = ENDPTCTRL_RXT; /* type */
  142. data = type << ffs_nr(mask);
  143. mask |= ENDPTCTRL_RXS; /* unstall */
  144. mask |= ENDPTCTRL_RXR; /* reset data toggle */
  145. data |= ENDPTCTRL_RXR;
  146. mask |= ENDPTCTRL_RXE; /* enable */
  147. data |= ENDPTCTRL_RXE;
  148. }
  149. hw_write(udc, OP_ENDPTCTRL + num, mask, data);
  150. return 0;
  151. }
  152. /**
  153. * hw_ep_get_halt: return endpoint halt status
  154. * @num: endpoint number
  155. * @dir: endpoint direction
  156. *
  157. * This function returns 1 if endpoint halted
  158. */
  159. static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
  160. {
  161. u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
  162. return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
  163. }
  164. /**
  165. * hw_test_and_clear_setup_status: test & clear setup status (execute without
  166. * interruption)
  167. * @n: endpoint number
  168. *
  169. * This function returns setup status
  170. */
  171. static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
  172. {
  173. n = ep_to_bit(udc, n);
  174. return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
  175. }
  176. /**
  177. * hw_ep_prime: primes endpoint (execute without interruption)
  178. * @num: endpoint number
  179. * @dir: endpoint direction
  180. * @is_ctrl: true if control endpoint
  181. *
  182. * This function returns an error code
  183. */
  184. static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
  185. {
  186. int n = hw_ep_bit(num, dir);
  187. if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
  188. return -EAGAIN;
  189. hw_write(udc, OP_ENDPTPRIME, BIT(n), BIT(n));
  190. while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
  191. cpu_relax();
  192. if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
  193. return -EAGAIN;
  194. /* status shoult be tested according with manual but it doesn't work */
  195. return 0;
  196. }
  197. /**
  198. * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
  199. * without interruption)
  200. * @num: endpoint number
  201. * @dir: endpoint direction
  202. * @value: true => stall, false => unstall
  203. *
  204. * This function returns an error code
  205. */
  206. static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
  207. {
  208. if (value != 0 && value != 1)
  209. return -EINVAL;
  210. do {
  211. enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
  212. u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
  213. u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
  214. /* data toggle - reserved for EP0 but it's in ESS */
  215. hw_write(udc, reg, mask_xs|mask_xr,
  216. value ? mask_xs : mask_xr);
  217. } while (value != hw_ep_get_halt(udc, num, dir));
  218. return 0;
  219. }
  220. /**
  221. * hw_is_port_high_speed: test if port is high speed
  222. *
  223. * This function returns true if high speed port
  224. */
  225. static int hw_port_is_high_speed(struct ci13xxx *udc)
  226. {
  227. return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
  228. hw_read(udc, OP_PORTSC, PORTSC_HSP);
  229. }
  230. /**
  231. * hw_read_intr_enable: returns interrupt enable register
  232. *
  233. * This function returns register data
  234. */
  235. static u32 hw_read_intr_enable(struct ci13xxx *udc)
  236. {
  237. return hw_read(udc, OP_USBINTR, ~0);
  238. }
  239. /**
  240. * hw_read_intr_status: returns interrupt status register
  241. *
  242. * This function returns register data
  243. */
  244. static u32 hw_read_intr_status(struct ci13xxx *udc)
  245. {
  246. return hw_read(udc, OP_USBSTS, ~0);
  247. }
  248. /**
  249. * hw_test_and_clear_complete: test & clear complete status (execute without
  250. * interruption)
  251. * @n: endpoint number
  252. *
  253. * This function returns complete status
  254. */
  255. static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
  256. {
  257. n = ep_to_bit(udc, n);
  258. return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
  259. }
  260. /**
  261. * hw_test_and_clear_intr_active: test & clear active interrupts (execute
  262. * without interruption)
  263. *
  264. * This function returns active interrutps
  265. */
  266. static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
  267. {
  268. u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
  269. hw_write(udc, OP_USBSTS, ~0, reg);
  270. return reg;
  271. }
  272. /**
  273. * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
  274. * interruption)
  275. *
  276. * This function returns guard value
  277. */
  278. static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
  279. {
  280. return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
  281. }
  282. /**
  283. * hw_test_and_set_setup_guard: test & set setup guard (execute without
  284. * interruption)
  285. *
  286. * This function returns guard value
  287. */
  288. static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
  289. {
  290. return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
  291. }
  292. /**
  293. * hw_usb_set_address: configures USB address (execute without interruption)
  294. * @value: new USB address
  295. *
  296. * This function explicitly sets the address, without the "USBADRA" (advance)
  297. * feature, which is not supported by older versions of the controller.
  298. */
  299. static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
  300. {
  301. hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
  302. value << ffs_nr(DEVICEADDR_USBADR));
  303. }
  304. /**
  305. * hw_usb_reset: restart device after a bus reset (execute without
  306. * interruption)
  307. *
  308. * This function returns an error code
  309. */
  310. static int hw_usb_reset(struct ci13xxx *udc)
  311. {
  312. hw_usb_set_address(udc, 0);
  313. /* ESS flushes only at end?!? */
  314. hw_write(udc, OP_ENDPTFLUSH, ~0, ~0);
  315. /* clear setup token semaphores */
  316. hw_write(udc, OP_ENDPTSETUPSTAT, 0, 0);
  317. /* clear complete status */
  318. hw_write(udc, OP_ENDPTCOMPLETE, 0, 0);
  319. /* wait until all bits cleared */
  320. while (hw_read(udc, OP_ENDPTPRIME, ~0))
  321. udelay(10); /* not RTOS friendly */
  322. /* reset all endpoints ? */
  323. /* reset internal status and wait for further instructions
  324. no need to verify the port reset status (ESS does it) */
  325. return 0;
  326. }
  327. /******************************************************************************
  328. * UTIL block
  329. *****************************************************************************/
  330. /**
  331. * _usb_addr: calculates endpoint address from direction & number
  332. * @ep: endpoint
  333. */
  334. static inline u8 _usb_addr(struct ci13xxx_ep *ep)
  335. {
  336. return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
  337. }
  338. /**
  339. * _hardware_queue: configures a request at hardware level
  340. * @gadget: gadget
  341. * @mEp: endpoint
  342. *
  343. * This function returns an error code
  344. */
  345. static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
  346. {
  347. struct ci13xxx *udc = mEp->udc;
  348. unsigned i;
  349. int ret = 0;
  350. unsigned length = mReq->req.length;
  351. /* don't queue twice */
  352. if (mReq->req.status == -EALREADY)
  353. return -EALREADY;
  354. mReq->req.status = -EALREADY;
  355. if (length && mReq->req.dma == DMA_ADDR_INVALID) {
  356. mReq->req.dma = \
  357. dma_map_single(mEp->device, mReq->req.buf,
  358. length, mEp->dir ? DMA_TO_DEVICE :
  359. DMA_FROM_DEVICE);
  360. if (mReq->req.dma == 0)
  361. return -ENOMEM;
  362. mReq->map = 1;
  363. }
  364. if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
  365. mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
  366. &mReq->zdma);
  367. if (mReq->zptr == NULL) {
  368. if (mReq->map) {
  369. dma_unmap_single(mEp->device, mReq->req.dma,
  370. length, mEp->dir ? DMA_TO_DEVICE :
  371. DMA_FROM_DEVICE);
  372. mReq->req.dma = DMA_ADDR_INVALID;
  373. mReq->map = 0;
  374. }
  375. return -ENOMEM;
  376. }
  377. memset(mReq->zptr, 0, sizeof(*mReq->zptr));
  378. mReq->zptr->next = TD_TERMINATE;
  379. mReq->zptr->token = TD_STATUS_ACTIVE;
  380. if (!mReq->req.no_interrupt)
  381. mReq->zptr->token |= TD_IOC;
  382. }
  383. /*
  384. * TD configuration
  385. * TODO - handle requests which spawns into several TDs
  386. */
  387. memset(mReq->ptr, 0, sizeof(*mReq->ptr));
  388. mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
  389. mReq->ptr->token &= TD_TOTAL_BYTES;
  390. mReq->ptr->token |= TD_STATUS_ACTIVE;
  391. if (mReq->zptr) {
  392. mReq->ptr->next = mReq->zdma;
  393. } else {
  394. mReq->ptr->next = TD_TERMINATE;
  395. if (!mReq->req.no_interrupt)
  396. mReq->ptr->token |= TD_IOC;
  397. }
  398. mReq->ptr->page[0] = mReq->req.dma;
  399. for (i = 1; i < 5; i++)
  400. mReq->ptr->page[i] =
  401. (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
  402. if (!list_empty(&mEp->qh.queue)) {
  403. struct ci13xxx_req *mReqPrev;
  404. int n = hw_ep_bit(mEp->num, mEp->dir);
  405. int tmp_stat;
  406. mReqPrev = list_entry(mEp->qh.queue.prev,
  407. struct ci13xxx_req, queue);
  408. if (mReqPrev->zptr)
  409. mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
  410. else
  411. mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
  412. wmb();
  413. if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
  414. goto done;
  415. do {
  416. hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
  417. tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
  418. } while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
  419. hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
  420. if (tmp_stat)
  421. goto done;
  422. }
  423. /* QH configuration */
  424. mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
  425. mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
  426. mEp->qh.ptr->cap |= QH_ZLT;
  427. wmb(); /* synchronize before ep prime */
  428. ret = hw_ep_prime(udc, mEp->num, mEp->dir,
  429. mEp->type == USB_ENDPOINT_XFER_CONTROL);
  430. done:
  431. return ret;
  432. }
  433. /**
  434. * _hardware_dequeue: handles a request at hardware level
  435. * @gadget: gadget
  436. * @mEp: endpoint
  437. *
  438. * This function returns an error code
  439. */
  440. static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
  441. {
  442. if (mReq->req.status != -EALREADY)
  443. return -EINVAL;
  444. if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
  445. return -EBUSY;
  446. if (mReq->zptr) {
  447. if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
  448. return -EBUSY;
  449. dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
  450. mReq->zptr = NULL;
  451. }
  452. mReq->req.status = 0;
  453. if (mReq->map) {
  454. dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
  455. mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  456. mReq->req.dma = DMA_ADDR_INVALID;
  457. mReq->map = 0;
  458. }
  459. mReq->req.status = mReq->ptr->token & TD_STATUS;
  460. if ((TD_STATUS_HALTED & mReq->req.status) != 0)
  461. mReq->req.status = -1;
  462. else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
  463. mReq->req.status = -1;
  464. else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
  465. mReq->req.status = -1;
  466. mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
  467. mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
  468. mReq->req.actual = mReq->req.length - mReq->req.actual;
  469. mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
  470. return mReq->req.actual;
  471. }
  472. /**
  473. * _ep_nuke: dequeues all endpoint requests
  474. * @mEp: endpoint
  475. *
  476. * This function returns an error code
  477. * Caller must hold lock
  478. */
  479. static int _ep_nuke(struct ci13xxx_ep *mEp)
  480. __releases(mEp->lock)
  481. __acquires(mEp->lock)
  482. {
  483. if (mEp == NULL)
  484. return -EINVAL;
  485. hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
  486. while (!list_empty(&mEp->qh.queue)) {
  487. /* pop oldest request */
  488. struct ci13xxx_req *mReq = \
  489. list_entry(mEp->qh.queue.next,
  490. struct ci13xxx_req, queue);
  491. list_del_init(&mReq->queue);
  492. mReq->req.status = -ESHUTDOWN;
  493. if (mReq->req.complete != NULL) {
  494. spin_unlock(mEp->lock);
  495. mReq->req.complete(&mEp->ep, &mReq->req);
  496. spin_lock(mEp->lock);
  497. }
  498. }
  499. return 0;
  500. }
  501. /**
  502. * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
  503. * @gadget: gadget
  504. *
  505. * This function returns an error code
  506. */
  507. static int _gadget_stop_activity(struct usb_gadget *gadget)
  508. {
  509. struct usb_ep *ep;
  510. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  511. unsigned long flags;
  512. if (gadget == NULL)
  513. return -EINVAL;
  514. spin_lock_irqsave(&udc->lock, flags);
  515. udc->gadget.speed = USB_SPEED_UNKNOWN;
  516. udc->remote_wakeup = 0;
  517. udc->suspended = 0;
  518. spin_unlock_irqrestore(&udc->lock, flags);
  519. /* flush all endpoints */
  520. gadget_for_each_ep(ep, gadget) {
  521. usb_ep_fifo_flush(ep);
  522. }
  523. usb_ep_fifo_flush(&udc->ep0out->ep);
  524. usb_ep_fifo_flush(&udc->ep0in->ep);
  525. if (udc->driver)
  526. udc->driver->disconnect(gadget);
  527. /* make sure to disable all endpoints */
  528. gadget_for_each_ep(ep, gadget) {
  529. usb_ep_disable(ep);
  530. }
  531. if (udc->status != NULL) {
  532. usb_ep_free_request(&udc->ep0in->ep, udc->status);
  533. udc->status = NULL;
  534. }
  535. return 0;
  536. }
  537. /******************************************************************************
  538. * ISR block
  539. *****************************************************************************/
  540. /**
  541. * isr_reset_handler: USB reset interrupt handler
  542. * @udc: UDC device
  543. *
  544. * This function resets USB engine after a bus reset occurred
  545. */
  546. static void isr_reset_handler(struct ci13xxx *udc)
  547. __releases(udc->lock)
  548. __acquires(udc->lock)
  549. {
  550. int retval;
  551. dbg_event(0xFF, "BUS RST", 0);
  552. spin_unlock(&udc->lock);
  553. retval = _gadget_stop_activity(&udc->gadget);
  554. if (retval)
  555. goto done;
  556. retval = hw_usb_reset(udc);
  557. if (retval)
  558. goto done;
  559. udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
  560. if (udc->status == NULL)
  561. retval = -ENOMEM;
  562. spin_lock(&udc->lock);
  563. done:
  564. if (retval)
  565. dev_err(udc->dev, "error: %i\n", retval);
  566. }
  567. /**
  568. * isr_get_status_complete: get_status request complete function
  569. * @ep: endpoint
  570. * @req: request handled
  571. *
  572. * Caller must release lock
  573. */
  574. static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
  575. {
  576. if (ep == NULL || req == NULL)
  577. return;
  578. kfree(req->buf);
  579. usb_ep_free_request(ep, req);
  580. }
  581. /**
  582. * isr_get_status_response: get_status request response
  583. * @udc: udc struct
  584. * @setup: setup request packet
  585. *
  586. * This function returns an error code
  587. */
  588. static int isr_get_status_response(struct ci13xxx *udc,
  589. struct usb_ctrlrequest *setup)
  590. __releases(mEp->lock)
  591. __acquires(mEp->lock)
  592. {
  593. struct ci13xxx_ep *mEp = udc->ep0in;
  594. struct usb_request *req = NULL;
  595. gfp_t gfp_flags = GFP_ATOMIC;
  596. int dir, num, retval;
  597. if (mEp == NULL || setup == NULL)
  598. return -EINVAL;
  599. spin_unlock(mEp->lock);
  600. req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
  601. spin_lock(mEp->lock);
  602. if (req == NULL)
  603. return -ENOMEM;
  604. req->complete = isr_get_status_complete;
  605. req->length = 2;
  606. req->buf = kzalloc(req->length, gfp_flags);
  607. if (req->buf == NULL) {
  608. retval = -ENOMEM;
  609. goto err_free_req;
  610. }
  611. if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
  612. /* Assume that device is bus powered for now. */
  613. *(u16 *)req->buf = udc->remote_wakeup << 1;
  614. retval = 0;
  615. } else if ((setup->bRequestType & USB_RECIP_MASK) \
  616. == USB_RECIP_ENDPOINT) {
  617. dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
  618. TX : RX;
  619. num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
  620. *(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
  621. }
  622. /* else do nothing; reserved for future use */
  623. spin_unlock(mEp->lock);
  624. retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
  625. spin_lock(mEp->lock);
  626. if (retval)
  627. goto err_free_buf;
  628. return 0;
  629. err_free_buf:
  630. kfree(req->buf);
  631. err_free_req:
  632. spin_unlock(mEp->lock);
  633. usb_ep_free_request(&mEp->ep, req);
  634. spin_lock(mEp->lock);
  635. return retval;
  636. }
  637. /**
  638. * isr_setup_status_complete: setup_status request complete function
  639. * @ep: endpoint
  640. * @req: request handled
  641. *
  642. * Caller must release lock. Put the port in test mode if test mode
  643. * feature is selected.
  644. */
  645. static void
  646. isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
  647. {
  648. struct ci13xxx *udc = req->context;
  649. unsigned long flags;
  650. if (udc->setaddr) {
  651. hw_usb_set_address(udc, udc->address);
  652. udc->setaddr = false;
  653. }
  654. spin_lock_irqsave(&udc->lock, flags);
  655. if (udc->test_mode)
  656. hw_port_test_set(udc, udc->test_mode);
  657. spin_unlock_irqrestore(&udc->lock, flags);
  658. }
  659. /**
  660. * isr_setup_status_phase: queues the status phase of a setup transation
  661. * @udc: udc struct
  662. *
  663. * This function returns an error code
  664. */
  665. static int isr_setup_status_phase(struct ci13xxx *udc)
  666. __releases(mEp->lock)
  667. __acquires(mEp->lock)
  668. {
  669. int retval;
  670. struct ci13xxx_ep *mEp;
  671. mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
  672. udc->status->context = udc;
  673. udc->status->complete = isr_setup_status_complete;
  674. spin_unlock(mEp->lock);
  675. retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
  676. spin_lock(mEp->lock);
  677. return retval;
  678. }
  679. /**
  680. * isr_tr_complete_low: transaction complete low level handler
  681. * @mEp: endpoint
  682. *
  683. * This function returns an error code
  684. * Caller must hold lock
  685. */
  686. static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
  687. __releases(mEp->lock)
  688. __acquires(mEp->lock)
  689. {
  690. struct ci13xxx_req *mReq, *mReqTemp;
  691. struct ci13xxx_ep *mEpTemp = mEp;
  692. int uninitialized_var(retval);
  693. if (list_empty(&mEp->qh.queue))
  694. return -EINVAL;
  695. list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
  696. queue) {
  697. retval = _hardware_dequeue(mEp, mReq);
  698. if (retval < 0)
  699. break;
  700. list_del_init(&mReq->queue);
  701. dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
  702. if (mReq->req.complete != NULL) {
  703. spin_unlock(mEp->lock);
  704. if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
  705. mReq->req.length)
  706. mEpTemp = mEp->udc->ep0in;
  707. mReq->req.complete(&mEpTemp->ep, &mReq->req);
  708. spin_lock(mEp->lock);
  709. }
  710. }
  711. if (retval == -EBUSY)
  712. retval = 0;
  713. if (retval < 0)
  714. dbg_event(_usb_addr(mEp), "DONE", retval);
  715. return retval;
  716. }
  717. /**
  718. * isr_tr_complete_handler: transaction complete interrupt handler
  719. * @udc: UDC descriptor
  720. *
  721. * This function handles traffic events
  722. */
  723. static void isr_tr_complete_handler(struct ci13xxx *udc)
  724. __releases(udc->lock)
  725. __acquires(udc->lock)
  726. {
  727. unsigned i;
  728. u8 tmode = 0;
  729. for (i = 0; i < udc->hw_ep_max; i++) {
  730. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  731. int type, num, dir, err = -EINVAL;
  732. struct usb_ctrlrequest req;
  733. if (mEp->ep.desc == NULL)
  734. continue; /* not configured */
  735. if (hw_test_and_clear_complete(udc, i)) {
  736. err = isr_tr_complete_low(mEp);
  737. if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
  738. if (err > 0) /* needs status phase */
  739. err = isr_setup_status_phase(udc);
  740. if (err < 0) {
  741. dbg_event(_usb_addr(mEp),
  742. "ERROR", err);
  743. spin_unlock(&udc->lock);
  744. if (usb_ep_set_halt(&mEp->ep))
  745. dev_err(udc->dev,
  746. "error: ep_set_halt\n");
  747. spin_lock(&udc->lock);
  748. }
  749. }
  750. }
  751. if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
  752. !hw_test_and_clear_setup_status(udc, i))
  753. continue;
  754. if (i != 0) {
  755. dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
  756. continue;
  757. }
  758. /*
  759. * Flush data and handshake transactions of previous
  760. * setup packet.
  761. */
  762. _ep_nuke(udc->ep0out);
  763. _ep_nuke(udc->ep0in);
  764. /* read_setup_packet */
  765. do {
  766. hw_test_and_set_setup_guard(udc);
  767. memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
  768. } while (!hw_test_and_clear_setup_guard(udc));
  769. type = req.bRequestType;
  770. udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
  771. dbg_setup(_usb_addr(mEp), &req);
  772. switch (req.bRequest) {
  773. case USB_REQ_CLEAR_FEATURE:
  774. if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
  775. le16_to_cpu(req.wValue) ==
  776. USB_ENDPOINT_HALT) {
  777. if (req.wLength != 0)
  778. break;
  779. num = le16_to_cpu(req.wIndex);
  780. dir = num & USB_ENDPOINT_DIR_MASK;
  781. num &= USB_ENDPOINT_NUMBER_MASK;
  782. if (dir) /* TX */
  783. num += udc->hw_ep_max/2;
  784. if (!udc->ci13xxx_ep[num].wedge) {
  785. spin_unlock(&udc->lock);
  786. err = usb_ep_clear_halt(
  787. &udc->ci13xxx_ep[num].ep);
  788. spin_lock(&udc->lock);
  789. if (err)
  790. break;
  791. }
  792. err = isr_setup_status_phase(udc);
  793. } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
  794. le16_to_cpu(req.wValue) ==
  795. USB_DEVICE_REMOTE_WAKEUP) {
  796. if (req.wLength != 0)
  797. break;
  798. udc->remote_wakeup = 0;
  799. err = isr_setup_status_phase(udc);
  800. } else {
  801. goto delegate;
  802. }
  803. break;
  804. case USB_REQ_GET_STATUS:
  805. if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
  806. type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
  807. type != (USB_DIR_IN|USB_RECIP_INTERFACE))
  808. goto delegate;
  809. if (le16_to_cpu(req.wLength) != 2 ||
  810. le16_to_cpu(req.wValue) != 0)
  811. break;
  812. err = isr_get_status_response(udc, &req);
  813. break;
  814. case USB_REQ_SET_ADDRESS:
  815. if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
  816. goto delegate;
  817. if (le16_to_cpu(req.wLength) != 0 ||
  818. le16_to_cpu(req.wIndex) != 0)
  819. break;
  820. udc->address = (u8)le16_to_cpu(req.wValue);
  821. udc->setaddr = true;
  822. err = isr_setup_status_phase(udc);
  823. break;
  824. case USB_REQ_SET_FEATURE:
  825. if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
  826. le16_to_cpu(req.wValue) ==
  827. USB_ENDPOINT_HALT) {
  828. if (req.wLength != 0)
  829. break;
  830. num = le16_to_cpu(req.wIndex);
  831. dir = num & USB_ENDPOINT_DIR_MASK;
  832. num &= USB_ENDPOINT_NUMBER_MASK;
  833. if (dir) /* TX */
  834. num += udc->hw_ep_max/2;
  835. spin_unlock(&udc->lock);
  836. err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
  837. spin_lock(&udc->lock);
  838. if (!err)
  839. isr_setup_status_phase(udc);
  840. } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
  841. if (req.wLength != 0)
  842. break;
  843. switch (le16_to_cpu(req.wValue)) {
  844. case USB_DEVICE_REMOTE_WAKEUP:
  845. udc->remote_wakeup = 1;
  846. err = isr_setup_status_phase(udc);
  847. break;
  848. case USB_DEVICE_TEST_MODE:
  849. tmode = le16_to_cpu(req.wIndex) >> 8;
  850. switch (tmode) {
  851. case TEST_J:
  852. case TEST_K:
  853. case TEST_SE0_NAK:
  854. case TEST_PACKET:
  855. case TEST_FORCE_EN:
  856. udc->test_mode = tmode;
  857. err = isr_setup_status_phase(
  858. udc);
  859. break;
  860. default:
  861. break;
  862. }
  863. default:
  864. goto delegate;
  865. }
  866. } else {
  867. goto delegate;
  868. }
  869. break;
  870. default:
  871. delegate:
  872. if (req.wLength == 0) /* no data phase */
  873. udc->ep0_dir = TX;
  874. spin_unlock(&udc->lock);
  875. err = udc->driver->setup(&udc->gadget, &req);
  876. spin_lock(&udc->lock);
  877. break;
  878. }
  879. if (err < 0) {
  880. dbg_event(_usb_addr(mEp), "ERROR", err);
  881. spin_unlock(&udc->lock);
  882. if (usb_ep_set_halt(&mEp->ep))
  883. dev_err(udc->dev, "error: ep_set_halt\n");
  884. spin_lock(&udc->lock);
  885. }
  886. }
  887. }
  888. /******************************************************************************
  889. * ENDPT block
  890. *****************************************************************************/
  891. /**
  892. * ep_enable: configure endpoint, making it usable
  893. *
  894. * Check usb_ep_enable() at "usb_gadget.h" for details
  895. */
  896. static int ep_enable(struct usb_ep *ep,
  897. const struct usb_endpoint_descriptor *desc)
  898. {
  899. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  900. int retval = 0;
  901. unsigned long flags;
  902. if (ep == NULL || desc == NULL)
  903. return -EINVAL;
  904. spin_lock_irqsave(mEp->lock, flags);
  905. /* only internal SW should enable ctrl endpts */
  906. mEp->ep.desc = desc;
  907. if (!list_empty(&mEp->qh.queue))
  908. dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
  909. mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
  910. mEp->num = usb_endpoint_num(desc);
  911. mEp->type = usb_endpoint_type(desc);
  912. mEp->ep.maxpacket = usb_endpoint_maxp(desc);
  913. dbg_event(_usb_addr(mEp), "ENABLE", 0);
  914. mEp->qh.ptr->cap = 0;
  915. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  916. mEp->qh.ptr->cap |= QH_IOS;
  917. else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
  918. mEp->qh.ptr->cap &= ~QH_MULT;
  919. else
  920. mEp->qh.ptr->cap &= ~QH_ZLT;
  921. mEp->qh.ptr->cap |=
  922. (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
  923. mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
  924. /*
  925. * Enable endpoints in the HW other than ep0 as ep0
  926. * is always enabled
  927. */
  928. if (mEp->num)
  929. retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
  930. spin_unlock_irqrestore(mEp->lock, flags);
  931. return retval;
  932. }
  933. /**
  934. * ep_disable: endpoint is no longer usable
  935. *
  936. * Check usb_ep_disable() at "usb_gadget.h" for details
  937. */
  938. static int ep_disable(struct usb_ep *ep)
  939. {
  940. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  941. int direction, retval = 0;
  942. unsigned long flags;
  943. if (ep == NULL)
  944. return -EINVAL;
  945. else if (mEp->ep.desc == NULL)
  946. return -EBUSY;
  947. spin_lock_irqsave(mEp->lock, flags);
  948. /* only internal SW should disable ctrl endpts */
  949. direction = mEp->dir;
  950. do {
  951. dbg_event(_usb_addr(mEp), "DISABLE", 0);
  952. retval |= _ep_nuke(mEp);
  953. retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
  954. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  955. mEp->dir = (mEp->dir == TX) ? RX : TX;
  956. } while (mEp->dir != direction);
  957. mEp->ep.desc = NULL;
  958. spin_unlock_irqrestore(mEp->lock, flags);
  959. return retval;
  960. }
  961. /**
  962. * ep_alloc_request: allocate a request object to use with this endpoint
  963. *
  964. * Check usb_ep_alloc_request() at "usb_gadget.h" for details
  965. */
  966. static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  967. {
  968. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  969. struct ci13xxx_req *mReq = NULL;
  970. if (ep == NULL)
  971. return NULL;
  972. mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
  973. if (mReq != NULL) {
  974. INIT_LIST_HEAD(&mReq->queue);
  975. mReq->req.dma = DMA_ADDR_INVALID;
  976. mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
  977. &mReq->dma);
  978. if (mReq->ptr == NULL) {
  979. kfree(mReq);
  980. mReq = NULL;
  981. }
  982. }
  983. dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
  984. return (mReq == NULL) ? NULL : &mReq->req;
  985. }
  986. /**
  987. * ep_free_request: frees a request object
  988. *
  989. * Check usb_ep_free_request() at "usb_gadget.h" for details
  990. */
  991. static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
  992. {
  993. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  994. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  995. unsigned long flags;
  996. if (ep == NULL || req == NULL) {
  997. return;
  998. } else if (!list_empty(&mReq->queue)) {
  999. dev_err(mEp->udc->dev, "freeing queued request\n");
  1000. return;
  1001. }
  1002. spin_lock_irqsave(mEp->lock, flags);
  1003. if (mReq->ptr)
  1004. dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
  1005. kfree(mReq);
  1006. dbg_event(_usb_addr(mEp), "FREE", 0);
  1007. spin_unlock_irqrestore(mEp->lock, flags);
  1008. }
  1009. /**
  1010. * ep_queue: queues (submits) an I/O request to an endpoint
  1011. *
  1012. * Check usb_ep_queue()* at usb_gadget.h" for details
  1013. */
  1014. static int ep_queue(struct usb_ep *ep, struct usb_request *req,
  1015. gfp_t __maybe_unused gfp_flags)
  1016. {
  1017. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1018. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1019. struct ci13xxx *udc = mEp->udc;
  1020. int retval = 0;
  1021. unsigned long flags;
  1022. if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
  1023. return -EINVAL;
  1024. spin_lock_irqsave(mEp->lock, flags);
  1025. if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
  1026. if (req->length)
  1027. mEp = (udc->ep0_dir == RX) ?
  1028. udc->ep0out : udc->ep0in;
  1029. if (!list_empty(&mEp->qh.queue)) {
  1030. _ep_nuke(mEp);
  1031. retval = -EOVERFLOW;
  1032. dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
  1033. _usb_addr(mEp));
  1034. }
  1035. }
  1036. /* first nuke then test link, e.g. previous status has not sent */
  1037. if (!list_empty(&mReq->queue)) {
  1038. retval = -EBUSY;
  1039. dev_err(mEp->udc->dev, "request already in queue\n");
  1040. goto done;
  1041. }
  1042. if (req->length > 4 * CI13XXX_PAGE_SIZE) {
  1043. req->length = 4 * CI13XXX_PAGE_SIZE;
  1044. retval = -EMSGSIZE;
  1045. dev_warn(mEp->udc->dev, "request length truncated\n");
  1046. }
  1047. dbg_queue(_usb_addr(mEp), req, retval);
  1048. /* push request */
  1049. mReq->req.status = -EINPROGRESS;
  1050. mReq->req.actual = 0;
  1051. retval = _hardware_enqueue(mEp, mReq);
  1052. if (retval == -EALREADY) {
  1053. dbg_event(_usb_addr(mEp), "QUEUE", retval);
  1054. retval = 0;
  1055. }
  1056. if (!retval)
  1057. list_add_tail(&mReq->queue, &mEp->qh.queue);
  1058. done:
  1059. spin_unlock_irqrestore(mEp->lock, flags);
  1060. return retval;
  1061. }
  1062. /**
  1063. * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
  1064. *
  1065. * Check usb_ep_dequeue() at "usb_gadget.h" for details
  1066. */
  1067. static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  1068. {
  1069. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1070. struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
  1071. unsigned long flags;
  1072. if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
  1073. mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
  1074. list_empty(&mEp->qh.queue))
  1075. return -EINVAL;
  1076. spin_lock_irqsave(mEp->lock, flags);
  1077. dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
  1078. hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
  1079. /* pop request */
  1080. list_del_init(&mReq->queue);
  1081. if (mReq->map) {
  1082. dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
  1083. mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1084. mReq->req.dma = DMA_ADDR_INVALID;
  1085. mReq->map = 0;
  1086. }
  1087. req->status = -ECONNRESET;
  1088. if (mReq->req.complete != NULL) {
  1089. spin_unlock(mEp->lock);
  1090. mReq->req.complete(&mEp->ep, &mReq->req);
  1091. spin_lock(mEp->lock);
  1092. }
  1093. spin_unlock_irqrestore(mEp->lock, flags);
  1094. return 0;
  1095. }
  1096. /**
  1097. * ep_set_halt: sets the endpoint halt feature
  1098. *
  1099. * Check usb_ep_set_halt() at "usb_gadget.h" for details
  1100. */
  1101. static int ep_set_halt(struct usb_ep *ep, int value)
  1102. {
  1103. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1104. int direction, retval = 0;
  1105. unsigned long flags;
  1106. if (ep == NULL || mEp->ep.desc == NULL)
  1107. return -EINVAL;
  1108. spin_lock_irqsave(mEp->lock, flags);
  1109. #ifndef STALL_IN
  1110. /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
  1111. if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
  1112. !list_empty(&mEp->qh.queue)) {
  1113. spin_unlock_irqrestore(mEp->lock, flags);
  1114. return -EAGAIN;
  1115. }
  1116. #endif
  1117. direction = mEp->dir;
  1118. do {
  1119. dbg_event(_usb_addr(mEp), "HALT", value);
  1120. retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
  1121. if (!value)
  1122. mEp->wedge = 0;
  1123. if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
  1124. mEp->dir = (mEp->dir == TX) ? RX : TX;
  1125. } while (mEp->dir != direction);
  1126. spin_unlock_irqrestore(mEp->lock, flags);
  1127. return retval;
  1128. }
  1129. /**
  1130. * ep_set_wedge: sets the halt feature and ignores clear requests
  1131. *
  1132. * Check usb_ep_set_wedge() at "usb_gadget.h" for details
  1133. */
  1134. static int ep_set_wedge(struct usb_ep *ep)
  1135. {
  1136. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1137. unsigned long flags;
  1138. if (ep == NULL || mEp->ep.desc == NULL)
  1139. return -EINVAL;
  1140. spin_lock_irqsave(mEp->lock, flags);
  1141. dbg_event(_usb_addr(mEp), "WEDGE", 0);
  1142. mEp->wedge = 1;
  1143. spin_unlock_irqrestore(mEp->lock, flags);
  1144. return usb_ep_set_halt(ep);
  1145. }
  1146. /**
  1147. * ep_fifo_flush: flushes contents of a fifo
  1148. *
  1149. * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
  1150. */
  1151. static void ep_fifo_flush(struct usb_ep *ep)
  1152. {
  1153. struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
  1154. unsigned long flags;
  1155. if (ep == NULL) {
  1156. dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
  1157. return;
  1158. }
  1159. spin_lock_irqsave(mEp->lock, flags);
  1160. dbg_event(_usb_addr(mEp), "FFLUSH", 0);
  1161. hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
  1162. spin_unlock_irqrestore(mEp->lock, flags);
  1163. }
  1164. /**
  1165. * Endpoint-specific part of the API to the USB controller hardware
  1166. * Check "usb_gadget.h" for details
  1167. */
  1168. static const struct usb_ep_ops usb_ep_ops = {
  1169. .enable = ep_enable,
  1170. .disable = ep_disable,
  1171. .alloc_request = ep_alloc_request,
  1172. .free_request = ep_free_request,
  1173. .queue = ep_queue,
  1174. .dequeue = ep_dequeue,
  1175. .set_halt = ep_set_halt,
  1176. .set_wedge = ep_set_wedge,
  1177. .fifo_flush = ep_fifo_flush,
  1178. };
  1179. /******************************************************************************
  1180. * GADGET block
  1181. *****************************************************************************/
  1182. static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
  1183. {
  1184. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1185. unsigned long flags;
  1186. int gadget_ready = 0;
  1187. if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
  1188. return -EOPNOTSUPP;
  1189. spin_lock_irqsave(&udc->lock, flags);
  1190. udc->vbus_active = is_active;
  1191. if (udc->driver)
  1192. gadget_ready = 1;
  1193. spin_unlock_irqrestore(&udc->lock, flags);
  1194. if (gadget_ready) {
  1195. if (is_active) {
  1196. pm_runtime_get_sync(&_gadget->dev);
  1197. hw_device_reset(udc);
  1198. hw_device_state(udc, udc->ep0out->qh.dma);
  1199. } else {
  1200. hw_device_state(udc, 0);
  1201. if (udc->udc_driver->notify_event)
  1202. udc->udc_driver->notify_event(udc,
  1203. CI13XXX_CONTROLLER_STOPPED_EVENT);
  1204. _gadget_stop_activity(&udc->gadget);
  1205. pm_runtime_put_sync(&_gadget->dev);
  1206. }
  1207. }
  1208. return 0;
  1209. }
  1210. static int ci13xxx_wakeup(struct usb_gadget *_gadget)
  1211. {
  1212. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1213. unsigned long flags;
  1214. int ret = 0;
  1215. spin_lock_irqsave(&udc->lock, flags);
  1216. if (!udc->remote_wakeup) {
  1217. ret = -EOPNOTSUPP;
  1218. goto out;
  1219. }
  1220. if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
  1221. ret = -EINVAL;
  1222. goto out;
  1223. }
  1224. hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
  1225. out:
  1226. spin_unlock_irqrestore(&udc->lock, flags);
  1227. return ret;
  1228. }
  1229. static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
  1230. {
  1231. struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
  1232. if (udc->transceiver)
  1233. return usb_phy_set_power(udc->transceiver, mA);
  1234. return -ENOTSUPP;
  1235. }
  1236. static int ci13xxx_start(struct usb_gadget *gadget,
  1237. struct usb_gadget_driver *driver);
  1238. static int ci13xxx_stop(struct usb_gadget *gadget,
  1239. struct usb_gadget_driver *driver);
  1240. /**
  1241. * Device operations part of the API to the USB controller hardware,
  1242. * which don't involve endpoints (or i/o)
  1243. * Check "usb_gadget.h" for details
  1244. */
  1245. static const struct usb_gadget_ops usb_gadget_ops = {
  1246. .vbus_session = ci13xxx_vbus_session,
  1247. .wakeup = ci13xxx_wakeup,
  1248. .vbus_draw = ci13xxx_vbus_draw,
  1249. .udc_start = ci13xxx_start,
  1250. .udc_stop = ci13xxx_stop,
  1251. };
  1252. static int init_eps(struct ci13xxx *udc)
  1253. {
  1254. int retval = 0, i, j;
  1255. for (i = 0; i < udc->hw_ep_max/2; i++)
  1256. for (j = RX; j <= TX; j++) {
  1257. int k = i + j * udc->hw_ep_max/2;
  1258. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
  1259. scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
  1260. (j == TX) ? "in" : "out");
  1261. mEp->udc = udc;
  1262. mEp->lock = &udc->lock;
  1263. mEp->device = &udc->gadget.dev;
  1264. mEp->td_pool = udc->td_pool;
  1265. mEp->ep.name = mEp->name;
  1266. mEp->ep.ops = &usb_ep_ops;
  1267. mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
  1268. INIT_LIST_HEAD(&mEp->qh.queue);
  1269. mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
  1270. &mEp->qh.dma);
  1271. if (mEp->qh.ptr == NULL)
  1272. retval = -ENOMEM;
  1273. else
  1274. memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
  1275. /*
  1276. * set up shorthands for ep0 out and in endpoints,
  1277. * don't add to gadget's ep_list
  1278. */
  1279. if (i == 0) {
  1280. if (j == RX)
  1281. udc->ep0out = mEp;
  1282. else
  1283. udc->ep0in = mEp;
  1284. continue;
  1285. }
  1286. list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
  1287. }
  1288. return retval;
  1289. }
  1290. /**
  1291. * ci13xxx_start: register a gadget driver
  1292. * @gadget: our gadget
  1293. * @driver: the driver being registered
  1294. *
  1295. * Interrupts are enabled here.
  1296. */
  1297. static int ci13xxx_start(struct usb_gadget *gadget,
  1298. struct usb_gadget_driver *driver)
  1299. {
  1300. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  1301. unsigned long flags;
  1302. int retval = -ENOMEM;
  1303. if (driver->disconnect == NULL)
  1304. return -EINVAL;
  1305. udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
  1306. retval = usb_ep_enable(&udc->ep0out->ep);
  1307. if (retval)
  1308. return retval;
  1309. udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
  1310. retval = usb_ep_enable(&udc->ep0in->ep);
  1311. if (retval)
  1312. return retval;
  1313. spin_lock_irqsave(&udc->lock, flags);
  1314. udc->driver = driver;
  1315. pm_runtime_get_sync(&udc->gadget.dev);
  1316. if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
  1317. if (udc->vbus_active) {
  1318. if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
  1319. hw_device_reset(udc);
  1320. } else {
  1321. pm_runtime_put_sync(&udc->gadget.dev);
  1322. goto done;
  1323. }
  1324. }
  1325. retval = hw_device_state(udc, udc->ep0out->qh.dma);
  1326. if (retval)
  1327. pm_runtime_put_sync(&udc->gadget.dev);
  1328. done:
  1329. spin_unlock_irqrestore(&udc->lock, flags);
  1330. return retval;
  1331. }
  1332. /**
  1333. * ci13xxx_stop: unregister a gadget driver
  1334. */
  1335. static int ci13xxx_stop(struct usb_gadget *gadget,
  1336. struct usb_gadget_driver *driver)
  1337. {
  1338. struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
  1339. unsigned long flags;
  1340. spin_lock_irqsave(&udc->lock, flags);
  1341. if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
  1342. udc->vbus_active) {
  1343. hw_device_state(udc, 0);
  1344. if (udc->udc_driver->notify_event)
  1345. udc->udc_driver->notify_event(udc,
  1346. CI13XXX_CONTROLLER_STOPPED_EVENT);
  1347. udc->driver = NULL;
  1348. spin_unlock_irqrestore(&udc->lock, flags);
  1349. _gadget_stop_activity(&udc->gadget);
  1350. spin_lock_irqsave(&udc->lock, flags);
  1351. pm_runtime_put(&udc->gadget.dev);
  1352. }
  1353. spin_unlock_irqrestore(&udc->lock, flags);
  1354. return 0;
  1355. }
  1356. /******************************************************************************
  1357. * BUS block
  1358. *****************************************************************************/
  1359. /**
  1360. * udc_irq: udc interrupt handler
  1361. *
  1362. * This function returns IRQ_HANDLED if the IRQ has been handled
  1363. * It locks access to registers
  1364. */
  1365. static irqreturn_t udc_irq(struct ci13xxx *udc)
  1366. {
  1367. irqreturn_t retval;
  1368. u32 intr;
  1369. if (udc == NULL)
  1370. return IRQ_HANDLED;
  1371. spin_lock(&udc->lock);
  1372. if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
  1373. if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
  1374. USBMODE_CM_DEVICE) {
  1375. spin_unlock(&udc->lock);
  1376. return IRQ_NONE;
  1377. }
  1378. }
  1379. intr = hw_test_and_clear_intr_active(udc);
  1380. dbg_interrupt(intr);
  1381. if (intr) {
  1382. /* order defines priority - do NOT change it */
  1383. if (USBi_URI & intr)
  1384. isr_reset_handler(udc);
  1385. if (USBi_PCI & intr) {
  1386. udc->gadget.speed = hw_port_is_high_speed(udc) ?
  1387. USB_SPEED_HIGH : USB_SPEED_FULL;
  1388. if (udc->suspended && udc->driver->resume) {
  1389. spin_unlock(&udc->lock);
  1390. udc->driver->resume(&udc->gadget);
  1391. spin_lock(&udc->lock);
  1392. udc->suspended = 0;
  1393. }
  1394. }
  1395. if (USBi_UI & intr)
  1396. isr_tr_complete_handler(udc);
  1397. if (USBi_SLI & intr) {
  1398. if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
  1399. udc->driver->suspend) {
  1400. udc->suspended = 1;
  1401. spin_unlock(&udc->lock);
  1402. udc->driver->suspend(&udc->gadget);
  1403. spin_lock(&udc->lock);
  1404. }
  1405. }
  1406. retval = IRQ_HANDLED;
  1407. } else {
  1408. retval = IRQ_NONE;
  1409. }
  1410. spin_unlock(&udc->lock);
  1411. return retval;
  1412. }
  1413. /**
  1414. * udc_release: driver release function
  1415. * @dev: device
  1416. *
  1417. * Currently does nothing
  1418. */
  1419. static void udc_release(struct device *dev)
  1420. {
  1421. }
  1422. /**
  1423. * udc_start: initialize gadget role
  1424. * @udc: chipidea controller
  1425. */
  1426. static int udc_start(struct ci13xxx *udc)
  1427. {
  1428. struct device *dev = udc->dev;
  1429. int retval = 0;
  1430. if (!udc)
  1431. return -EINVAL;
  1432. spin_lock_init(&udc->lock);
  1433. udc->gadget.ops = &usb_gadget_ops;
  1434. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1435. udc->gadget.max_speed = USB_SPEED_HIGH;
  1436. udc->gadget.is_otg = 0;
  1437. udc->gadget.name = udc->udc_driver->name;
  1438. INIT_LIST_HEAD(&udc->gadget.ep_list);
  1439. dev_set_name(&udc->gadget.dev, "gadget");
  1440. udc->gadget.dev.dma_mask = dev->dma_mask;
  1441. udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
  1442. udc->gadget.dev.parent = dev;
  1443. udc->gadget.dev.release = udc_release;
  1444. /* alloc resources */
  1445. udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
  1446. sizeof(struct ci13xxx_qh),
  1447. 64, CI13XXX_PAGE_SIZE);
  1448. if (udc->qh_pool == NULL)
  1449. return -ENOMEM;
  1450. udc->td_pool = dma_pool_create("ci13xxx_td", dev,
  1451. sizeof(struct ci13xxx_td),
  1452. 64, CI13XXX_PAGE_SIZE);
  1453. if (udc->td_pool == NULL) {
  1454. retval = -ENOMEM;
  1455. goto free_qh_pool;
  1456. }
  1457. retval = init_eps(udc);
  1458. if (retval)
  1459. goto free_pools;
  1460. udc->gadget.ep0 = &udc->ep0in->ep;
  1461. udc->transceiver = usb_get_transceiver();
  1462. if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
  1463. if (udc->transceiver == NULL) {
  1464. retval = -ENODEV;
  1465. goto free_pools;
  1466. }
  1467. }
  1468. if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
  1469. retval = hw_device_reset(udc);
  1470. if (retval)
  1471. goto put_transceiver;
  1472. }
  1473. retval = device_register(&udc->gadget.dev);
  1474. if (retval) {
  1475. put_device(&udc->gadget.dev);
  1476. goto put_transceiver;
  1477. }
  1478. retval = dbg_create_files(&udc->gadget.dev);
  1479. if (retval)
  1480. goto unreg_device;
  1481. if (udc->transceiver) {
  1482. retval = otg_set_peripheral(udc->transceiver->otg,
  1483. &udc->gadget);
  1484. if (retval)
  1485. goto remove_dbg;
  1486. }
  1487. retval = usb_add_gadget_udc(dev, &udc->gadget);
  1488. if (retval)
  1489. goto remove_trans;
  1490. pm_runtime_no_callbacks(&udc->gadget.dev);
  1491. pm_runtime_enable(&udc->gadget.dev);
  1492. return retval;
  1493. remove_trans:
  1494. if (udc->transceiver) {
  1495. otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
  1496. usb_put_transceiver(udc->transceiver);
  1497. }
  1498. dev_err(dev, "error = %i\n", retval);
  1499. remove_dbg:
  1500. dbg_remove_files(&udc->gadget.dev);
  1501. unreg_device:
  1502. device_unregister(&udc->gadget.dev);
  1503. put_transceiver:
  1504. if (udc->transceiver)
  1505. usb_put_transceiver(udc->transceiver);
  1506. free_pools:
  1507. dma_pool_destroy(udc->td_pool);
  1508. free_qh_pool:
  1509. dma_pool_destroy(udc->qh_pool);
  1510. return retval;
  1511. }
  1512. /**
  1513. * udc_remove: parent remove must call this to remove UDC
  1514. *
  1515. * No interrupts active, the IRQ has been released
  1516. */
  1517. static void udc_stop(struct ci13xxx *udc)
  1518. {
  1519. int i;
  1520. if (udc == NULL)
  1521. return;
  1522. usb_del_gadget_udc(&udc->gadget);
  1523. for (i = 0; i < udc->hw_ep_max; i++) {
  1524. struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
  1525. dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
  1526. }
  1527. dma_pool_destroy(udc->td_pool);
  1528. dma_pool_destroy(udc->qh_pool);
  1529. if (udc->transceiver) {
  1530. otg_set_peripheral(udc->transceiver->otg, NULL);
  1531. usb_put_transceiver(udc->transceiver);
  1532. }
  1533. dbg_remove_files(&udc->gadget.dev);
  1534. device_unregister(&udc->gadget.dev);
  1535. /* my kobject is dynamic, I swear! */
  1536. memset(&udc->gadget, 0, sizeof(udc->gadget));
  1537. }
  1538. /**
  1539. * ci_hdrc_gadget_init - initialize device related bits
  1540. * ci: the controller
  1541. *
  1542. * This function enables the gadget role, if the device is "device capable".
  1543. */
  1544. int ci_hdrc_gadget_init(struct ci13xxx *ci)
  1545. {
  1546. struct ci_role_driver *rdrv;
  1547. if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
  1548. return -ENXIO;
  1549. rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
  1550. if (!rdrv)
  1551. return -ENOMEM;
  1552. rdrv->start = udc_start;
  1553. rdrv->stop = udc_stop;
  1554. rdrv->irq = udc_irq;
  1555. rdrv->name = "gadget";
  1556. ci->roles[CI_ROLE_GADGET] = rdrv;
  1557. return 0;
  1558. }