s3c_udc_otg_xfer_dma.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444
  1. /*
  2. * drivers/usb/gadget/s3c_udc_otg_xfer_dma.c
  3. * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers
  4. *
  5. * Copyright (C) 2009 for Samsung Electronics
  6. *
  7. * BSP Support for Samsung's UDC driver
  8. * available at:
  9. * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git
  10. *
  11. * State machine bugfixes:
  12. * Marek Szyprowski <m.szyprowski@samsung.com>
  13. *
  14. * Ported to u-boot:
  15. * Marek Szyprowski <m.szyprowski@samsung.com>
  16. * Lukasz Majewski <l.majewski@samsumg.com>
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2 of the License, or
  21. * (at your option) any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  31. *
  32. */
  33. static u8 clear_feature_num;
  34. int clear_feature_flag;
  35. /* Bulk-Only Mass Storage Reset (class-specific request) */
  36. #define GET_MAX_LUN_REQUEST 0xFE
  37. #define BOT_RESET_REQUEST 0xFF
  38. static inline void s3c_udc_ep0_zlp(struct s3c_udc *dev)
  39. {
  40. u32 ep_ctrl;
  41. flush_dcache_range((unsigned long) usb_ctrl_dma_addr,
  42. (unsigned long) usb_ctrl_dma_addr
  43. + DMA_BUFFER_SIZE);
  44. writel(usb_ctrl_dma_addr, &reg->in_endp[EP0_CON].diepdma);
  45. writel(DIEPT_SIZ_PKT_CNT(1), &reg->in_endp[EP0_CON].dieptsiz);
  46. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  47. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  48. &reg->in_endp[EP0_CON].diepctl);
  49. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  50. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  51. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  52. }
  53. void s3c_udc_pre_setup(void)
  54. {
  55. u32 ep_ctrl;
  56. DEBUG_IN_EP("%s : Prepare Setup packets.\n", __func__);
  57. invalidate_dcache_range((unsigned long) usb_ctrl_dma_addr,
  58. (unsigned long) usb_ctrl_dma_addr
  59. + DMA_BUFFER_SIZE);
  60. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  61. &reg->out_endp[EP0_CON].doeptsiz);
  62. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  63. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  64. writel(ep_ctrl|DEPCTL_EPENA, &reg->out_endp[EP0_CON].doepctl);
  65. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  66. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  67. DEBUG_EP0("%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  68. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  69. }
  70. static inline void s3c_ep0_complete_out(void)
  71. {
  72. u32 ep_ctrl;
  73. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  74. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  75. DEBUG_EP0("%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  76. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  77. DEBUG_IN_EP("%s : Prepare Complete Out packet.\n", __func__);
  78. invalidate_dcache_range((unsigned long) usb_ctrl_dma_addr,
  79. (unsigned long) usb_ctrl_dma_addr
  80. + DMA_BUFFER_SIZE);
  81. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  82. &reg->out_endp[EP0_CON].doeptsiz);
  83. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  84. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  85. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  86. &reg->out_endp[EP0_CON].doepctl);
  87. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  88. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  89. DEBUG_EP0("%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  90. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  91. }
  92. static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req)
  93. {
  94. u32 *buf, ctrl;
  95. u32 length, pktcnt;
  96. u32 ep_num = ep_index(ep);
  97. buf = req->req.buf + req->req.actual;
  98. length = min(req->req.length - req->req.actual, (int)ep->ep.maxpacket);
  99. ep->len = length;
  100. ep->dma_buf = buf;
  101. invalidate_dcache_range((unsigned long) ep->dev->dma_buf[ep_num],
  102. (unsigned long) ep->dev->dma_buf[ep_num]
  103. + DMA_BUFFER_SIZE);
  104. if (length == 0)
  105. pktcnt = 1;
  106. else
  107. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  108. pktcnt = 1;
  109. ctrl = readl(&reg->out_endp[ep_num].doepctl);
  110. writel(the_controller->dma_addr[ep_index(ep)+1],
  111. &reg->out_endp[ep_num].doepdma);
  112. writel(DOEPT_SIZ_PKT_CNT(pktcnt) | DOEPT_SIZ_XFER_SIZE(length),
  113. &reg->out_endp[ep_num].doeptsiz);
  114. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->out_endp[ep_num].doepctl);
  115. DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
  116. "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
  117. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  118. __func__, ep_num,
  119. readl(&reg->out_endp[ep_num].doepdma),
  120. readl(&reg->out_endp[ep_num].doeptsiz),
  121. readl(&reg->out_endp[ep_num].doepctl),
  122. buf, pktcnt, length);
  123. return 0;
  124. }
  125. int setdma_tx(struct s3c_ep *ep, struct s3c_request *req)
  126. {
  127. u32 *buf, ctrl = 0;
  128. u32 length, pktcnt;
  129. u32 ep_num = ep_index(ep);
  130. u32 *p = the_controller->dma_buf[ep_index(ep)+1];
  131. buf = req->req.buf + req->req.actual;
  132. length = req->req.length - req->req.actual;
  133. if (ep_num == EP0_CON)
  134. length = min_t(length, (u32)ep_maxpacket(ep));
  135. ep->len = length;
  136. ep->dma_buf = buf;
  137. memcpy(p, ep->dma_buf, length);
  138. flush_dcache_range((unsigned long) p ,
  139. (unsigned long) p + DMA_BUFFER_SIZE);
  140. if (length == 0)
  141. pktcnt = 1;
  142. else
  143. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  144. /* Flush the endpoint's Tx FIFO */
  145. writel(TX_FIFO_NUMBER(ep->fifo_num), &reg->grstctl);
  146. writel(TX_FIFO_NUMBER(ep->fifo_num) | TX_FIFO_FLUSH, &reg->grstctl);
  147. while (readl(&reg->grstctl) & TX_FIFO_FLUSH)
  148. ;
  149. writel(the_controller->dma_addr[ep_index(ep)+1],
  150. &reg->in_endp[ep_num].diepdma);
  151. writel(DIEPT_SIZ_PKT_CNT(pktcnt) | DIEPT_SIZ_XFER_SIZE(length),
  152. &reg->in_endp[ep_num].dieptsiz);
  153. ctrl = readl(&reg->in_endp[ep_num].diepctl);
  154. /* Write the FIFO number to be used for this endpoint */
  155. ctrl &= DIEPCTL_TX_FIFO_NUM_MASK;
  156. ctrl |= DIEPCTL_TX_FIFO_NUM(ep->fifo_num);
  157. /* Clear reserved (Next EP) bits */
  158. ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT));
  159. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->in_endp[ep_num].diepctl);
  160. DEBUG_IN_EP("%s:EP%d TX DMA start : DIEPDMA0 = 0x%x,"
  161. "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n"
  162. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  163. __func__, ep_num,
  164. readl(&reg->in_endp[ep_num].diepdma),
  165. readl(&reg->in_endp[ep_num].dieptsiz),
  166. readl(&reg->in_endp[ep_num].diepctl),
  167. buf, pktcnt, length);
  168. return length;
  169. }
  170. static void complete_rx(struct s3c_udc *dev, u8 ep_num)
  171. {
  172. struct s3c_ep *ep = &dev->ep[ep_num];
  173. struct s3c_request *req = NULL;
  174. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  175. u32 *p = the_controller->dma_buf[ep_index(ep)+1];
  176. if (list_empty(&ep->queue)) {
  177. DEBUG_OUT_EP("%s: RX DMA done : NULL REQ on OUT EP-%d\n",
  178. __func__, ep_num);
  179. return;
  180. }
  181. req = list_entry(ep->queue.next, struct s3c_request, queue);
  182. ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);
  183. if (ep_num == EP0_CON)
  184. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
  185. else
  186. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);
  187. xfer_size = ep->len - xfer_size;
  188. invalidate_dcache_range((unsigned long) p,
  189. (unsigned long) p + DMA_BUFFER_SIZE);
  190. memcpy(ep->dma_buf, p, ep->len);
  191. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  192. is_short = (xfer_size < ep->ep.maxpacket);
  193. DEBUG_OUT_EP("%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
  194. "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
  195. __func__, ep_num, req->req.actual, req->req.length,
  196. is_short, ep_tsr, xfer_size);
  197. if (is_short || req->req.actual == req->req.length) {
  198. if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
  199. DEBUG_OUT_EP(" => Send ZLP\n");
  200. s3c_udc_ep0_zlp(dev);
  201. /* packet will be completed in complete_tx() */
  202. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  203. } else {
  204. done(ep, req, 0);
  205. if (!list_empty(&ep->queue)) {
  206. req = list_entry(ep->queue.next,
  207. struct s3c_request, queue);
  208. DEBUG_OUT_EP("%s: Next Rx request start...\n",
  209. __func__);
  210. setdma_rx(ep, req);
  211. }
  212. }
  213. } else
  214. setdma_rx(ep, req);
  215. }
  216. static void complete_tx(struct s3c_udc *dev, u8 ep_num)
  217. {
  218. struct s3c_ep *ep = &dev->ep[ep_num];
  219. struct s3c_request *req;
  220. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  221. u32 last;
  222. if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) {
  223. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  224. s3c_ep0_complete_out();
  225. return;
  226. }
  227. if (list_empty(&ep->queue)) {
  228. DEBUG_IN_EP("%s: TX DMA done : NULL REQ on IN EP-%d\n",
  229. __func__, ep_num);
  230. return;
  231. }
  232. req = list_entry(ep->queue.next, struct s3c_request, queue);
  233. ep_tsr = readl(&reg->in_endp[ep_num].dieptsiz);
  234. xfer_size = ep->len;
  235. is_short = (xfer_size < ep->ep.maxpacket);
  236. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  237. DEBUG_IN_EP("%s: TX DMA done : ep = %d, tx bytes = %d/%d, "
  238. "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n",
  239. __func__, ep_num, req->req.actual, req->req.length,
  240. is_short, ep_tsr, xfer_size);
  241. if (ep_num == 0) {
  242. if (dev->ep0state == DATA_STATE_XMIT) {
  243. DEBUG_IN_EP("%s: ep_num = %d, ep0stat =="
  244. "DATA_STATE_XMIT\n",
  245. __func__, ep_num);
  246. last = write_fifo_ep0(ep, req);
  247. if (last)
  248. dev->ep0state = WAIT_FOR_COMPLETE;
  249. } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) {
  250. DEBUG_IN_EP("%s: ep_num = %d, completing request\n",
  251. __func__, ep_num);
  252. done(ep, req, 0);
  253. dev->ep0state = WAIT_FOR_SETUP;
  254. } else if (dev->ep0state == WAIT_FOR_COMPLETE) {
  255. DEBUG_IN_EP("%s: ep_num = %d, completing request\n",
  256. __func__, ep_num);
  257. done(ep, req, 0);
  258. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  259. s3c_ep0_complete_out();
  260. } else {
  261. DEBUG_IN_EP("%s: ep_num = %d, invalid ep state\n",
  262. __func__, ep_num);
  263. }
  264. return;
  265. }
  266. if (req->req.actual == req->req.length)
  267. done(ep, req, 0);
  268. if (!list_empty(&ep->queue)) {
  269. req = list_entry(ep->queue.next, struct s3c_request, queue);
  270. DEBUG_IN_EP("%s: Next Tx request start...\n", __func__);
  271. setdma_tx(ep, req);
  272. }
  273. }
  274. static inline void s3c_udc_check_tx_queue(struct s3c_udc *dev, u8 ep_num)
  275. {
  276. struct s3c_ep *ep = &dev->ep[ep_num];
  277. struct s3c_request *req;
  278. DEBUG_IN_EP("%s: Check queue, ep_num = %d\n", __func__, ep_num);
  279. if (!list_empty(&ep->queue)) {
  280. req = list_entry(ep->queue.next, struct s3c_request, queue);
  281. DEBUG_IN_EP("%s: Next Tx request(0x%p) start...\n",
  282. __func__, req);
  283. if (ep_is_in(ep))
  284. setdma_tx(ep, req);
  285. else
  286. setdma_rx(ep, req);
  287. } else {
  288. DEBUG_IN_EP("%s: NULL REQ on IN EP-%d\n", __func__, ep_num);
  289. return;
  290. }
  291. }
  292. static void process_ep_in_intr(struct s3c_udc *dev)
  293. {
  294. u32 ep_intr, ep_intr_status;
  295. u8 ep_num = 0;
  296. ep_intr = readl(&reg->daint);
  297. DEBUG_IN_EP("*** %s: EP In interrupt : DAINT = 0x%x\n",
  298. __func__, ep_intr);
  299. ep_intr &= DAINT_MASK;
  300. while (ep_intr) {
  301. if (ep_intr & DAINT_IN_EP_INT(1)) {
  302. ep_intr_status = readl(&reg->in_endp[ep_num].diepint);
  303. DEBUG_IN_EP("\tEP%d-IN : DIEPINT = 0x%x\n",
  304. ep_num, ep_intr_status);
  305. /* Interrupt Clear */
  306. writel(ep_intr_status, &reg->in_endp[ep_num].diepint);
  307. if (ep_intr_status & TRANSFER_DONE) {
  308. complete_tx(dev, ep_num);
  309. if (ep_num == 0) {
  310. if (dev->ep0state ==
  311. WAIT_FOR_IN_COMPLETE)
  312. dev->ep0state = WAIT_FOR_SETUP;
  313. if (dev->ep0state == WAIT_FOR_SETUP)
  314. s3c_udc_pre_setup();
  315. /* continue transfer after
  316. set_clear_halt for DMA mode */
  317. if (clear_feature_flag == 1) {
  318. s3c_udc_check_tx_queue(dev,
  319. clear_feature_num);
  320. clear_feature_flag = 0;
  321. }
  322. }
  323. }
  324. }
  325. ep_num++;
  326. ep_intr >>= 1;
  327. }
  328. }
  329. static void process_ep_out_intr(struct s3c_udc *dev)
  330. {
  331. u32 ep_intr, ep_intr_status;
  332. u8 ep_num = 0;
  333. ep_intr = readl(&reg->daint);
  334. DEBUG_OUT_EP("*** %s: EP OUT interrupt : DAINT = 0x%x\n",
  335. __func__, ep_intr);
  336. ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK;
  337. while (ep_intr) {
  338. if (ep_intr & 0x1) {
  339. ep_intr_status = readl(&reg->out_endp[ep_num].doepint);
  340. DEBUG_OUT_EP("\tEP%d-OUT : DOEPINT = 0x%x\n",
  341. ep_num, ep_intr_status);
  342. /* Interrupt Clear */
  343. writel(ep_intr_status, &reg->out_endp[ep_num].doepint);
  344. if (ep_num == 0) {
  345. if (ep_intr_status & TRANSFER_DONE) {
  346. if (dev->ep0state !=
  347. WAIT_FOR_OUT_COMPLETE)
  348. complete_rx(dev, ep_num);
  349. else {
  350. dev->ep0state = WAIT_FOR_SETUP;
  351. s3c_udc_pre_setup();
  352. }
  353. }
  354. if (ep_intr_status &
  355. CTRL_OUT_EP_SETUP_PHASE_DONE) {
  356. DEBUG_OUT_EP("SETUP packet arrived\n");
  357. s3c_handle_ep0(dev);
  358. }
  359. } else {
  360. if (ep_intr_status & TRANSFER_DONE)
  361. complete_rx(dev, ep_num);
  362. }
  363. }
  364. ep_num++;
  365. ep_intr >>= 1;
  366. }
  367. }
  368. /*
  369. * usb client interrupt handler.
  370. */
  371. static int s3c_udc_irq(int irq, void *_dev)
  372. {
  373. struct s3c_udc *dev = _dev;
  374. u32 intr_status;
  375. u32 usb_status, gintmsk;
  376. unsigned long flags;
  377. spin_lock_irqsave(&dev->lock, flags);
  378. intr_status = readl(&reg->gintsts);
  379. gintmsk = readl(&reg->gintmsk);
  380. DEBUG_ISR("\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x,"
  381. "DAINT : 0x%x, DAINTMSK : 0x%x\n",
  382. __func__, intr_status, state_names[dev->ep0state], gintmsk,
  383. readl(&reg->daint), readl(&reg->daintmsk));
  384. if (!intr_status) {
  385. spin_unlock_irqrestore(&dev->lock, flags);
  386. return IRQ_HANDLED;
  387. }
  388. if (intr_status & INT_ENUMDONE) {
  389. DEBUG_ISR("\tSpeed Detection interrupt\n");
  390. writel(INT_ENUMDONE, &reg->gintsts);
  391. usb_status = (readl(&reg->dsts) & 0x6);
  392. if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) {
  393. DEBUG_ISR("\t\tFull Speed Detection\n");
  394. set_max_pktsize(dev, USB_SPEED_FULL);
  395. } else {
  396. DEBUG_ISR("\t\tHigh Speed Detection : 0x%x\n",
  397. usb_status);
  398. set_max_pktsize(dev, USB_SPEED_HIGH);
  399. }
  400. }
  401. if (intr_status & INT_EARLY_SUSPEND) {
  402. DEBUG_ISR("\tEarly suspend interrupt\n");
  403. writel(INT_EARLY_SUSPEND, &reg->gintsts);
  404. }
  405. if (intr_status & INT_SUSPEND) {
  406. usb_status = readl(&reg->dsts);
  407. DEBUG_ISR("\tSuspend interrupt :(DSTS):0x%x\n", usb_status);
  408. writel(INT_SUSPEND, &reg->gintsts);
  409. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  410. && dev->driver) {
  411. if (dev->driver->suspend)
  412. dev->driver->suspend(&dev->gadget);
  413. /* HACK to let gadget detect disconnected state */
  414. if (dev->driver->disconnect) {
  415. spin_unlock_irqrestore(&dev->lock, flags);
  416. dev->driver->disconnect(&dev->gadget);
  417. spin_lock_irqsave(&dev->lock, flags);
  418. }
  419. }
  420. }
  421. if (intr_status & INT_RESUME) {
  422. DEBUG_ISR("\tResume interrupt\n");
  423. writel(INT_RESUME, &reg->gintsts);
  424. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  425. && dev->driver
  426. && dev->driver->resume) {
  427. dev->driver->resume(&dev->gadget);
  428. }
  429. }
  430. if (intr_status & INT_RESET) {
  431. usb_status = readl(&reg->gotgctl);
  432. DEBUG_ISR("\tReset interrupt - (GOTGCTL):0x%x\n", usb_status);
  433. writel(INT_RESET, &reg->gintsts);
  434. if ((usb_status & 0xc0000) == (0x3 << 18)) {
  435. if (reset_available) {
  436. DEBUG_ISR("\t\tOTG core got reset (%d)!!\n",
  437. reset_available);
  438. reconfig_usbd();
  439. dev->ep0state = WAIT_FOR_SETUP;
  440. reset_available = 0;
  441. s3c_udc_pre_setup();
  442. } else
  443. reset_available = 1;
  444. } else {
  445. reset_available = 1;
  446. DEBUG_ISR("\t\tRESET handling skipped\n");
  447. }
  448. }
  449. if (intr_status & INT_IN_EP)
  450. process_ep_in_intr(dev);
  451. if (intr_status & INT_OUT_EP)
  452. process_ep_out_intr(dev);
  453. spin_unlock_irqrestore(&dev->lock, flags);
  454. return IRQ_HANDLED;
  455. }
  456. /** Queue one request
  457. * Kickstart transfer if needed
  458. */
  459. static int s3c_queue(struct usb_ep *_ep, struct usb_request *_req,
  460. gfp_t gfp_flags)
  461. {
  462. struct s3c_request *req;
  463. struct s3c_ep *ep;
  464. struct s3c_udc *dev;
  465. unsigned long flags;
  466. u32 ep_num, gintsts;
  467. req = container_of(_req, struct s3c_request, req);
  468. if (unlikely(!_req || !_req->complete || !_req->buf
  469. || !list_empty(&req->queue))) {
  470. DEBUG("%s: bad params\n", __func__);
  471. return -EINVAL;
  472. }
  473. ep = container_of(_ep, struct s3c_ep, ep);
  474. if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
  475. DEBUG("%s: bad ep: %s, %d, %x\n", __func__,
  476. ep->ep.name, !ep->desc, _ep);
  477. return -EINVAL;
  478. }
  479. ep_num = ep_index(ep);
  480. dev = ep->dev;
  481. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  482. DEBUG("%s: bogus device state %p\n", __func__, dev->driver);
  483. return -ESHUTDOWN;
  484. }
  485. spin_lock_irqsave(&dev->lock, flags);
  486. _req->status = -EINPROGRESS;
  487. _req->actual = 0;
  488. /* kickstart this i/o queue? */
  489. DEBUG("\n*** %s: %s-%s req = %p, len = %d, buf = %p"
  490. "Q empty = %d, stopped = %d\n",
  491. __func__, _ep->name, ep_is_in(ep) ? "in" : "out",
  492. _req, _req->length, _req->buf,
  493. list_empty(&ep->queue), ep->stopped);
  494. #ifdef DEBUG_S3C_UDC
  495. {
  496. int i, len = _req->length;
  497. printf("pkt = ");
  498. if (len > 64)
  499. len = 64;
  500. for (i = 0; i < len; i++) {
  501. printf("%02x", ((u8 *)_req->buf)[i]);
  502. if ((i & 7) == 7)
  503. printf(" ");
  504. }
  505. printf("\n");
  506. }
  507. #endif
  508. if (list_empty(&ep->queue) && !ep->stopped) {
  509. if (ep_num == 0) {
  510. /* EP0 */
  511. list_add_tail(&req->queue, &ep->queue);
  512. s3c_ep0_kick(dev, ep);
  513. req = 0;
  514. } else if (ep_is_in(ep)) {
  515. gintsts = readl(&reg->gintsts);
  516. DEBUG_IN_EP("%s: ep_is_in, S3C_UDC_OTG_GINTSTS=0x%x\n",
  517. __func__, gintsts);
  518. setdma_tx(ep, req);
  519. } else {
  520. gintsts = readl(&reg->gintsts);
  521. DEBUG_OUT_EP("%s:ep_is_out, S3C_UDC_OTG_GINTSTS=0x%x\n",
  522. __func__, gintsts);
  523. setdma_rx(ep, req);
  524. }
  525. }
  526. /* pio or dma irq handler advances the queue. */
  527. if (likely(req != 0))
  528. list_add_tail(&req->queue, &ep->queue);
  529. spin_unlock_irqrestore(&dev->lock, flags);
  530. return 0;
  531. }
  532. /****************************************************************/
  533. /* End Point 0 related functions */
  534. /****************************************************************/
  535. /* return: 0 = still running, 1 = completed, negative = errno */
  536. static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req)
  537. {
  538. u32 max;
  539. unsigned count;
  540. int is_last;
  541. max = ep_maxpacket(ep);
  542. DEBUG_EP0("%s: max = %d\n", __func__, max);
  543. count = setdma_tx(ep, req);
  544. /* last packet is usually short (or a zlp) */
  545. if (likely(count != max))
  546. is_last = 1;
  547. else {
  548. if (likely(req->req.length != req->req.actual + count)
  549. || req->req.zero)
  550. is_last = 0;
  551. else
  552. is_last = 1;
  553. }
  554. DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __func__,
  555. ep->ep.name, count,
  556. is_last ? "/L" : "",
  557. req->req.length - req->req.actual - count, req);
  558. /* requests complete when all IN data is in the FIFO */
  559. if (is_last) {
  560. ep->dev->ep0state = WAIT_FOR_SETUP;
  561. return 1;
  562. }
  563. return 0;
  564. }
  565. int s3c_fifo_read(struct s3c_ep *ep, u32 *cp, int max)
  566. {
  567. u32 bytes;
  568. bytes = sizeof(struct usb_ctrlrequest);
  569. invalidate_dcache_range((unsigned long) ep->dev->dma_buf[ep_index(ep)],
  570. (unsigned long) ep->dev->dma_buf[ep_index(ep)]
  571. + DMA_BUFFER_SIZE);
  572. DEBUG_EP0("%s: bytes=%d, ep_index=%d %p\n", __func__,
  573. bytes, ep_index(ep), ep->dev->dma_buf[ep_index(ep)]);
  574. return bytes;
  575. }
  576. /**
  577. * udc_set_address - set the USB address for this device
  578. * @address:
  579. *
  580. * Called from control endpoint function
  581. * after it decodes a set address setup packet.
  582. */
  583. static void udc_set_address(struct s3c_udc *dev, unsigned char address)
  584. {
  585. u32 ctrl = readl(&reg->dcfg);
  586. writel(DEVICE_ADDRESS(address) | ctrl, &reg->dcfg);
  587. s3c_udc_ep0_zlp(dev);
  588. DEBUG_EP0("%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
  589. __func__, address, readl(&reg->dcfg));
  590. dev->usb_address = address;
  591. }
  592. static inline void s3c_udc_ep0_set_stall(struct s3c_ep *ep)
  593. {
  594. struct s3c_udc *dev;
  595. u32 ep_ctrl = 0;
  596. dev = ep->dev;
  597. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  598. /* set the disable and stall bits */
  599. if (ep_ctrl & DEPCTL_EPENA)
  600. ep_ctrl |= DEPCTL_EPDIS;
  601. ep_ctrl |= DEPCTL_STALL;
  602. writel(ep_ctrl, &reg->in_endp[EP0_CON].diepctl);
  603. DEBUG_EP0("%s: set ep%d stall, DIEPCTL0 = 0x%x\n",
  604. __func__, ep_index(ep), &reg->in_endp[EP0_CON].diepctl);
  605. /*
  606. * The application can only set this bit, and the core clears it,
  607. * when a SETUP token is received for this endpoint
  608. */
  609. dev->ep0state = WAIT_FOR_SETUP;
  610. s3c_udc_pre_setup();
  611. }
  612. static void s3c_ep0_read(struct s3c_udc *dev)
  613. {
  614. struct s3c_request *req;
  615. struct s3c_ep *ep = &dev->ep[0];
  616. int ret;
  617. if (!list_empty(&ep->queue)) {
  618. req = list_entry(ep->queue.next, struct s3c_request, queue);
  619. } else {
  620. DEBUG("%s: ---> BUG\n", __func__);
  621. BUG();
  622. return;
  623. }
  624. DEBUG_EP0("%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  625. __func__, req, req->req.length, req->req.actual);
  626. if (req->req.length == 0) {
  627. /* zlp for Set_configuration, Set_interface,
  628. * or Bulk-Only mass storge reset */
  629. ep->len = 0;
  630. s3c_udc_ep0_zlp(dev);
  631. DEBUG_EP0("%s: req.length = 0, bRequest = %d\n",
  632. __func__, usb_ctrl->bRequest);
  633. return;
  634. }
  635. ret = setdma_rx(ep, req);
  636. }
  637. /*
  638. * DATA_STATE_XMIT
  639. */
  640. static int s3c_ep0_write(struct s3c_udc *dev)
  641. {
  642. struct s3c_request *req;
  643. struct s3c_ep *ep = &dev->ep[0];
  644. int ret, need_zlp = 0;
  645. if (list_empty(&ep->queue))
  646. req = 0;
  647. else
  648. req = list_entry(ep->queue.next, struct s3c_request, queue);
  649. if (!req) {
  650. DEBUG_EP0("%s: NULL REQ\n", __func__);
  651. return 0;
  652. }
  653. DEBUG_EP0("%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  654. __func__, req, req->req.length, req->req.actual);
  655. if (req->req.length - req->req.actual == ep0_fifo_size) {
  656. /* Next write will end with the packet size, */
  657. /* so we need Zero-length-packet */
  658. need_zlp = 1;
  659. }
  660. ret = write_fifo_ep0(ep, req);
  661. if ((ret == 1) && !need_zlp) {
  662. /* Last packet */
  663. dev->ep0state = WAIT_FOR_COMPLETE;
  664. DEBUG_EP0("%s: finished, waiting for status\n", __func__);
  665. } else {
  666. dev->ep0state = DATA_STATE_XMIT;
  667. DEBUG_EP0("%s: not finished\n", __func__);
  668. }
  669. return 1;
  670. }
  671. u16 g_status;
  672. int s3c_udc_get_status(struct s3c_udc *dev,
  673. struct usb_ctrlrequest *crq)
  674. {
  675. u8 ep_num = crq->wIndex & 0x7F;
  676. u32 ep_ctrl;
  677. u32 *p = the_controller->dma_buf[1];
  678. DEBUG_SETUP("%s: *** USB_REQ_GET_STATUS\n", __func__);
  679. printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK);
  680. switch (crq->bRequestType & USB_RECIP_MASK) {
  681. case USB_RECIP_INTERFACE:
  682. g_status = 0;
  683. DEBUG_SETUP("\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n",
  684. g_status);
  685. break;
  686. case USB_RECIP_DEVICE:
  687. g_status = 0x1; /* Self powered */
  688. DEBUG_SETUP("\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n",
  689. g_status);
  690. break;
  691. case USB_RECIP_ENDPOINT:
  692. if (crq->wLength > 2) {
  693. DEBUG_SETUP("\tGET_STATUS:Not support EP or wLength\n");
  694. return 1;
  695. }
  696. g_status = dev->ep[ep_num].stopped;
  697. DEBUG_SETUP("\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n",
  698. g_status);
  699. break;
  700. default:
  701. return 1;
  702. }
  703. memcpy(p, &g_status, sizeof(g_status));
  704. flush_dcache_range((unsigned long) p,
  705. (unsigned long) p + DMA_BUFFER_SIZE);
  706. writel(the_controller->dma_addr[1], &reg->in_endp[EP0_CON].diepdma);
  707. writel(DIEPT_SIZ_PKT_CNT(1) | DIEPT_SIZ_XFER_SIZE(2),
  708. &reg->in_endp[EP0_CON].dieptsiz);
  709. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  710. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  711. &reg->in_endp[EP0_CON].diepctl);
  712. dev->ep0state = WAIT_FOR_NULL_COMPLETE;
  713. return 0;
  714. }
  715. static void s3c_udc_set_nak(struct s3c_ep *ep)
  716. {
  717. u8 ep_num;
  718. u32 ep_ctrl = 0;
  719. ep_num = ep_index(ep);
  720. DEBUG("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  721. if (ep_is_in(ep)) {
  722. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  723. ep_ctrl |= DEPCTL_SNAK;
  724. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  725. DEBUG("%s: set NAK, DIEPCTL%d = 0x%x\n",
  726. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  727. } else {
  728. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  729. ep_ctrl |= DEPCTL_SNAK;
  730. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  731. DEBUG("%s: set NAK, DOEPCTL%d = 0x%x\n",
  732. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  733. }
  734. return;
  735. }
  736. void s3c_udc_ep_set_stall(struct s3c_ep *ep)
  737. {
  738. u8 ep_num;
  739. u32 ep_ctrl = 0;
  740. ep_num = ep_index(ep);
  741. DEBUG("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  742. if (ep_is_in(ep)) {
  743. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  744. /* set the disable and stall bits */
  745. if (ep_ctrl & DEPCTL_EPENA)
  746. ep_ctrl |= DEPCTL_EPDIS;
  747. ep_ctrl |= DEPCTL_STALL;
  748. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  749. DEBUG("%s: set stall, DIEPCTL%d = 0x%x\n",
  750. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  751. } else {
  752. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  753. /* set the stall bit */
  754. ep_ctrl |= DEPCTL_STALL;
  755. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  756. DEBUG("%s: set stall, DOEPCTL%d = 0x%x\n",
  757. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  758. }
  759. return;
  760. }
  761. void s3c_udc_ep_clear_stall(struct s3c_ep *ep)
  762. {
  763. u8 ep_num;
  764. u32 ep_ctrl = 0;
  765. ep_num = ep_index(ep);
  766. DEBUG("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  767. if (ep_is_in(ep)) {
  768. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  769. /* clear stall bit */
  770. ep_ctrl &= ~DEPCTL_STALL;
  771. /*
  772. * USB Spec 9.4.5: For endpoints using data toggle, regardless
  773. * of whether an endpoint has the Halt feature set, a
  774. * ClearFeature(ENDPOINT_HALT) request always results in the
  775. * data toggle being reinitialized to DATA0.
  776. */
  777. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  778. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  779. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  780. }
  781. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  782. DEBUG("%s: cleared stall, DIEPCTL%d = 0x%x\n",
  783. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  784. } else {
  785. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  786. /* clear stall bit */
  787. ep_ctrl &= ~DEPCTL_STALL;
  788. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  789. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  790. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  791. }
  792. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  793. DEBUG("%s: cleared stall, DOEPCTL%d = 0x%x\n",
  794. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  795. }
  796. return;
  797. }
  798. static int s3c_udc_set_halt(struct usb_ep *_ep, int value)
  799. {
  800. struct s3c_ep *ep;
  801. struct s3c_udc *dev;
  802. unsigned long flags;
  803. u8 ep_num;
  804. ep = container_of(_ep, struct s3c_ep, ep);
  805. ep_num = ep_index(ep);
  806. if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON ||
  807. ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) {
  808. DEBUG("%s: %s bad ep or descriptor\n", __func__, ep->ep.name);
  809. return -EINVAL;
  810. }
  811. /* Attempt to halt IN ep will fail if any transfer requests
  812. * are still queue */
  813. if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
  814. DEBUG("%s: %s queue not empty, req = %p\n",
  815. __func__, ep->ep.name,
  816. list_entry(ep->queue.next, struct s3c_request, queue));
  817. return -EAGAIN;
  818. }
  819. dev = ep->dev;
  820. DEBUG("%s: ep_num = %d, value = %d\n", __func__, ep_num, value);
  821. spin_lock_irqsave(&dev->lock, flags);
  822. if (value == 0) {
  823. ep->stopped = 0;
  824. s3c_udc_ep_clear_stall(ep);
  825. } else {
  826. if (ep_num == 0)
  827. dev->ep0state = WAIT_FOR_SETUP;
  828. ep->stopped = 1;
  829. s3c_udc_ep_set_stall(ep);
  830. }
  831. spin_unlock_irqrestore(&dev->lock, flags);
  832. return 0;
  833. }
  834. void s3c_udc_ep_activate(struct s3c_ep *ep)
  835. {
  836. u8 ep_num;
  837. u32 ep_ctrl = 0, daintmsk = 0;
  838. ep_num = ep_index(ep);
  839. /* Read DEPCTLn register */
  840. if (ep_is_in(ep)) {
  841. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  842. daintmsk = 1 << ep_num;
  843. } else {
  844. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  845. daintmsk = (1 << ep_num) << DAINT_OUT_BIT;
  846. }
  847. DEBUG("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n",
  848. __func__, ep_num, ep_ctrl, ep_is_in(ep));
  849. /* If the EP is already active don't change the EP Control
  850. * register. */
  851. if (!(ep_ctrl & DEPCTL_USBACTEP)) {
  852. ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK) |
  853. (ep->bmAttributes << DEPCTL_TYPE_BIT);
  854. ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) |
  855. (ep->ep.maxpacket << DEPCTL_MPS_BIT);
  856. ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP | DEPCTL_SNAK);
  857. if (ep_is_in(ep)) {
  858. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  859. DEBUG("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n",
  860. __func__, ep_num, ep_num,
  861. readl(&reg->in_endp[ep_num].diepctl));
  862. } else {
  863. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  864. DEBUG("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n",
  865. __func__, ep_num, ep_num,
  866. readl(&reg->out_endp[ep_num].doepctl));
  867. }
  868. }
  869. /* Unmask EP Interrtupt */
  870. writel(readl(&reg->daintmsk)|daintmsk, &reg->daintmsk);
  871. DEBUG("%s: DAINTMSK = 0x%x\n", __func__, readl(&reg->daintmsk));
  872. }
  873. static int s3c_udc_clear_feature(struct usb_ep *_ep)
  874. {
  875. struct s3c_udc *dev;
  876. struct s3c_ep *ep;
  877. u8 ep_num;
  878. ep = container_of(_ep, struct s3c_ep, ep);
  879. ep_num = ep_index(ep);
  880. dev = ep->dev;
  881. DEBUG_SETUP("%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n",
  882. __func__, ep_num, ep_is_in(ep), clear_feature_flag);
  883. if (usb_ctrl->wLength != 0) {
  884. DEBUG_SETUP("\tCLEAR_FEATURE: wLength is not zero.....\n");
  885. return 1;
  886. }
  887. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  888. case USB_RECIP_DEVICE:
  889. switch (usb_ctrl->wValue) {
  890. case USB_DEVICE_REMOTE_WAKEUP:
  891. DEBUG_SETUP("\tOFF:USB_DEVICE_REMOTE_WAKEUP\n");
  892. break;
  893. case USB_DEVICE_TEST_MODE:
  894. DEBUG_SETUP("\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n");
  895. /** @todo Add CLEAR_FEATURE for TEST modes. */
  896. break;
  897. }
  898. s3c_udc_ep0_zlp(dev);
  899. break;
  900. case USB_RECIP_ENDPOINT:
  901. DEBUG_SETUP("\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n",
  902. usb_ctrl->wValue);
  903. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  904. if (ep_num == 0) {
  905. s3c_udc_ep0_set_stall(ep);
  906. return 0;
  907. }
  908. s3c_udc_ep0_zlp(dev);
  909. s3c_udc_ep_clear_stall(ep);
  910. s3c_udc_ep_activate(ep);
  911. ep->stopped = 0;
  912. clear_feature_num = ep_num;
  913. clear_feature_flag = 1;
  914. }
  915. break;
  916. }
  917. return 0;
  918. }
  919. static int s3c_udc_set_feature(struct usb_ep *_ep)
  920. {
  921. struct s3c_udc *dev;
  922. struct s3c_ep *ep;
  923. u8 ep_num;
  924. ep = container_of(_ep, struct s3c_ep, ep);
  925. ep_num = ep_index(ep);
  926. dev = ep->dev;
  927. DEBUG_SETUP("%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n",
  928. __func__, ep_num);
  929. if (usb_ctrl->wLength != 0) {
  930. DEBUG_SETUP("\tSET_FEATURE: wLength is not zero.....\n");
  931. return 1;
  932. }
  933. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  934. case USB_RECIP_DEVICE:
  935. switch (usb_ctrl->wValue) {
  936. case USB_DEVICE_REMOTE_WAKEUP:
  937. DEBUG_SETUP("\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n");
  938. break;
  939. case USB_DEVICE_B_HNP_ENABLE:
  940. DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
  941. break;
  942. case USB_DEVICE_A_HNP_SUPPORT:
  943. /* RH port supports HNP */
  944. DEBUG_SETUP("\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n");
  945. break;
  946. case USB_DEVICE_A_ALT_HNP_SUPPORT:
  947. /* other RH port does */
  948. DEBUG_SETUP("\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
  949. break;
  950. }
  951. s3c_udc_ep0_zlp(dev);
  952. return 0;
  953. case USB_RECIP_INTERFACE:
  954. DEBUG_SETUP("\tSET_FEATURE: USB_RECIP_INTERFACE\n");
  955. break;
  956. case USB_RECIP_ENDPOINT:
  957. DEBUG_SETUP("\tSET_FEATURE: USB_RECIP_ENDPOINT\n");
  958. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  959. if (ep_num == 0) {
  960. s3c_udc_ep0_set_stall(ep);
  961. return 0;
  962. }
  963. ep->stopped = 1;
  964. s3c_udc_ep_set_stall(ep);
  965. }
  966. s3c_udc_ep0_zlp(dev);
  967. return 0;
  968. }
  969. return 1;
  970. }
  971. /*
  972. * WAIT_FOR_SETUP (OUT_PKT_RDY)
  973. */
  974. void s3c_ep0_setup(struct s3c_udc *dev)
  975. {
  976. struct s3c_ep *ep = &dev->ep[0];
  977. int i, bytes, is_in;
  978. u8 ep_num;
  979. /* Nuke all previous transfers */
  980. nuke(ep, -EPROTO);
  981. /* read control req from fifo (8 bytes) */
  982. bytes = s3c_fifo_read(ep, (u32 *)usb_ctrl, 8);
  983. DEBUG_SETUP("%s: bRequestType = 0x%x(%s), bRequest = 0x%x"
  984. "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n",
  985. __func__, usb_ctrl->bRequestType,
  986. (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT",
  987. usb_ctrl->bRequest,
  988. usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex);
  989. #ifdef DEBUG_S3C_UDC
  990. {
  991. int i, len = sizeof(*usb_ctrl);
  992. char *p = usb_ctrl;
  993. printf("pkt = ");
  994. for (i = 0; i < len; i++) {
  995. printf("%02x", ((u8 *)p)[i]);
  996. if ((i & 7) == 7)
  997. printf(" ");
  998. }
  999. printf("\n");
  1000. }
  1001. #endif
  1002. if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST &&
  1003. usb_ctrl->wLength != 1) {
  1004. DEBUG_SETUP("\t%s:GET_MAX_LUN_REQUEST:invalid",
  1005. __func__);
  1006. DEBUG_SETUP("wLength = %d, setup returned\n",
  1007. usb_ctrl->wLength);
  1008. s3c_udc_ep0_set_stall(ep);
  1009. dev->ep0state = WAIT_FOR_SETUP;
  1010. return;
  1011. } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST &&
  1012. usb_ctrl->wLength != 0) {
  1013. /* Bulk-Only *mass storge reset of class-specific request */
  1014. DEBUG_SETUP("%s:BOT Rest:invalid wLength =%d, setup returned\n",
  1015. __func__, usb_ctrl->wLength);
  1016. s3c_udc_ep0_set_stall(ep);
  1017. dev->ep0state = WAIT_FOR_SETUP;
  1018. return;
  1019. }
  1020. /* Set direction of EP0 */
  1021. if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) {
  1022. ep->bEndpointAddress |= USB_DIR_IN;
  1023. is_in = 1;
  1024. } else {
  1025. ep->bEndpointAddress &= ~USB_DIR_IN;
  1026. is_in = 0;
  1027. }
  1028. /* cope with automagic for some standard requests. */
  1029. dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK)
  1030. == USB_TYPE_STANDARD;
  1031. dev->req_config = 0;
  1032. dev->req_pending = 1;
  1033. /* Handle some SETUP packets ourselves */
  1034. if (dev->req_std) {
  1035. switch (usb_ctrl->bRequest) {
  1036. case USB_REQ_SET_ADDRESS:
  1037. DEBUG_SETUP("%s: *** USB_REQ_SET_ADDRESS (%d)\n",
  1038. __func__, usb_ctrl->wValue);
  1039. if (usb_ctrl->bRequestType
  1040. != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
  1041. break;
  1042. udc_set_address(dev, usb_ctrl->wValue);
  1043. return;
  1044. case USB_REQ_SET_CONFIGURATION:
  1045. DEBUG_SETUP("=====================================\n");
  1046. DEBUG_SETUP("%s: USB_REQ_SET_CONFIGURATION (%d)\n",
  1047. __func__, usb_ctrl->wValue);
  1048. if (usb_ctrl->bRequestType == USB_RECIP_DEVICE) {
  1049. reset_available = 1;
  1050. dev->req_config = 1;
  1051. }
  1052. break;
  1053. case USB_REQ_GET_DESCRIPTOR:
  1054. DEBUG_SETUP("%s: *** USB_REQ_GET_DESCRIPTOR\n",
  1055. __func__);
  1056. break;
  1057. case USB_REQ_SET_INTERFACE:
  1058. DEBUG_SETUP("%s: *** USB_REQ_SET_INTERFACE (%d)\n",
  1059. __func__, usb_ctrl->wValue);
  1060. if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE) {
  1061. reset_available = 1;
  1062. dev->req_config = 1;
  1063. }
  1064. break;
  1065. case USB_REQ_GET_CONFIGURATION:
  1066. DEBUG_SETUP("%s: *** USB_REQ_GET_CONFIGURATION\n",
  1067. __func__);
  1068. break;
  1069. case USB_REQ_GET_STATUS:
  1070. if (!s3c_udc_get_status(dev, usb_ctrl))
  1071. return;
  1072. break;
  1073. case USB_REQ_CLEAR_FEATURE:
  1074. ep_num = usb_ctrl->wIndex & 0x7f;
  1075. if (!s3c_udc_clear_feature(&dev->ep[ep_num].ep))
  1076. return;
  1077. break;
  1078. case USB_REQ_SET_FEATURE:
  1079. ep_num = usb_ctrl->wIndex & 0x7f;
  1080. if (!s3c_udc_set_feature(&dev->ep[ep_num].ep))
  1081. return;
  1082. break;
  1083. default:
  1084. DEBUG_SETUP("%s: *** Default of usb_ctrl->bRequest=0x%x"
  1085. "happened.\n", __func__, usb_ctrl->bRequest);
  1086. break;
  1087. }
  1088. }
  1089. if (likely(dev->driver)) {
  1090. /* device-2-host (IN) or no data setup command,
  1091. * process immediately */
  1092. DEBUG_SETUP("%s:usb_ctrlreq will be passed to fsg_setup()\n",
  1093. __func__);
  1094. spin_unlock(&dev->lock);
  1095. i = dev->driver->setup(&dev->gadget, usb_ctrl);
  1096. spin_lock(&dev->lock);
  1097. if (i < 0) {
  1098. if (dev->req_config) {
  1099. DEBUG_SETUP("\tconfig change 0x%02x fail %d?\n",
  1100. (u32)usb_ctrl->bRequest, i);
  1101. return;
  1102. }
  1103. /* setup processing failed, force stall */
  1104. s3c_udc_ep0_set_stall(ep);
  1105. dev->ep0state = WAIT_FOR_SETUP;
  1106. DEBUG_SETUP("\tdev->driver->setup failed (%d),"
  1107. " bRequest = %d\n",
  1108. i, usb_ctrl->bRequest);
  1109. } else if (dev->req_pending) {
  1110. dev->req_pending = 0;
  1111. DEBUG_SETUP("\tdev->req_pending...\n");
  1112. }
  1113. DEBUG_SETUP("\tep0state = %s\n", state_names[dev->ep0state]);
  1114. }
  1115. }
  1116. /*
  1117. * handle ep0 interrupt
  1118. */
  1119. static void s3c_handle_ep0(struct s3c_udc *dev)
  1120. {
  1121. if (dev->ep0state == WAIT_FOR_SETUP) {
  1122. DEBUG_OUT_EP("%s: WAIT_FOR_SETUP\n", __func__);
  1123. s3c_ep0_setup(dev);
  1124. } else {
  1125. DEBUG_OUT_EP("%s: strange state!!(state = %s)\n",
  1126. __func__, state_names[dev->ep0state]);
  1127. }
  1128. }
  1129. static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep)
  1130. {
  1131. DEBUG_EP0("%s: ep_is_in = %d\n", __func__, ep_is_in(ep));
  1132. if (ep_is_in(ep)) {
  1133. dev->ep0state = DATA_STATE_XMIT;
  1134. s3c_ep0_write(dev);
  1135. } else {
  1136. dev->ep0state = DATA_STATE_RECV;
  1137. s3c_ep0_read(dev);
  1138. }
  1139. }