s3c_udc_otg_xfer_dma.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /*
  2. * drivers/usb/gadget/s3c_udc_otg_xfer_dma.c
  3. * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers
  4. *
  5. * Copyright (C) 2009 for Samsung Electronics
  6. *
  7. * BSP Support for Samsung's UDC driver
  8. * available at:
  9. * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git
  10. *
  11. * State machine bugfixes:
  12. * Marek Szyprowski <m.szyprowski@samsung.com>
  13. *
  14. * Ported to u-boot:
  15. * Marek Szyprowski <m.szyprowski@samsung.com>
  16. * Lukasz Majewski <l.majewski@samsumg.com>
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2 of the License, or
  21. * (at your option) any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  31. *
  32. */
  33. static u8 clear_feature_num;
  34. int clear_feature_flag;
  35. /* Bulk-Only Mass Storage Reset (class-specific request) */
  36. #define GET_MAX_LUN_REQUEST 0xFE
  37. #define BOT_RESET_REQUEST 0xFF
  38. static inline void s3c_udc_ep0_zlp(struct s3c_udc *dev)
  39. {
  40. u32 ep_ctrl;
  41. flush_dcache_range((unsigned long) usb_ctrl_dma_addr,
  42. (unsigned long) usb_ctrl_dma_addr
  43. + DMA_BUFFER_SIZE);
  44. writel(usb_ctrl_dma_addr, &reg->in_endp[EP0_CON].diepdma);
  45. writel(DIEPT_SIZ_PKT_CNT(1), &reg->in_endp[EP0_CON].dieptsiz);
  46. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  47. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  48. &reg->in_endp[EP0_CON].diepctl);
  49. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  50. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  51. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  52. }
  53. void s3c_udc_pre_setup(void)
  54. {
  55. u32 ep_ctrl;
  56. debug_cond(DEBUG_IN_EP, "%s : Prepare Setup packets.\n", __func__);
  57. invalidate_dcache_range((unsigned long) usb_ctrl_dma_addr,
  58. (unsigned long) usb_ctrl_dma_addr
  59. + DMA_BUFFER_SIZE);
  60. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  61. &reg->out_endp[EP0_CON].doeptsiz);
  62. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  63. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  64. writel(ep_ctrl|DEPCTL_EPENA, &reg->out_endp[EP0_CON].doepctl);
  65. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  66. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  67. DEBUG_EP0("%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  68. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  69. }
  70. static inline void s3c_ep0_complete_out(void)
  71. {
  72. u32 ep_ctrl;
  73. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  74. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  75. DEBUG_EP0("%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  76. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  77. debug_cond(DEBUG_IN_EP,
  78. "%s : Prepare Complete Out packet.\n", __func__);
  79. invalidate_dcache_range((unsigned long) usb_ctrl_dma_addr,
  80. (unsigned long) usb_ctrl_dma_addr
  81. + DMA_BUFFER_SIZE);
  82. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  83. &reg->out_endp[EP0_CON].doeptsiz);
  84. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  85. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  86. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  87. &reg->out_endp[EP0_CON].doepctl);
  88. DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  89. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  90. DEBUG_EP0("%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  91. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  92. }
  93. static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req)
  94. {
  95. u32 *buf, ctrl;
  96. u32 length, pktcnt;
  97. u32 ep_num = ep_index(ep);
  98. buf = req->req.buf + req->req.actual;
  99. length = min(req->req.length - req->req.actual, (int)ep->ep.maxpacket);
  100. ep->len = length;
  101. ep->dma_buf = buf;
  102. invalidate_dcache_range((unsigned long) ep->dev->dma_buf[ep_num],
  103. (unsigned long) ep->dev->dma_buf[ep_num]
  104. + DMA_BUFFER_SIZE);
  105. if (length == 0)
  106. pktcnt = 1;
  107. else
  108. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  109. pktcnt = 1;
  110. ctrl = readl(&reg->out_endp[ep_num].doepctl);
  111. writel(the_controller->dma_addr[ep_index(ep)+1],
  112. &reg->out_endp[ep_num].doepdma);
  113. writel(DOEPT_SIZ_PKT_CNT(pktcnt) | DOEPT_SIZ_XFER_SIZE(length),
  114. &reg->out_endp[ep_num].doeptsiz);
  115. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->out_endp[ep_num].doepctl);
  116. DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
  117. "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
  118. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  119. __func__, ep_num,
  120. readl(&reg->out_endp[ep_num].doepdma),
  121. readl(&reg->out_endp[ep_num].doeptsiz),
  122. readl(&reg->out_endp[ep_num].doepctl),
  123. buf, pktcnt, length);
  124. return 0;
  125. }
  126. int setdma_tx(struct s3c_ep *ep, struct s3c_request *req)
  127. {
  128. u32 *buf, ctrl = 0;
  129. u32 length, pktcnt;
  130. u32 ep_num = ep_index(ep);
  131. u32 *p = the_controller->dma_buf[ep_index(ep)+1];
  132. buf = req->req.buf + req->req.actual;
  133. length = req->req.length - req->req.actual;
  134. if (ep_num == EP0_CON)
  135. length = min_t(length, (u32)ep_maxpacket(ep));
  136. ep->len = length;
  137. ep->dma_buf = buf;
  138. memcpy(p, ep->dma_buf, length);
  139. flush_dcache_range((unsigned long) p ,
  140. (unsigned long) p + DMA_BUFFER_SIZE);
  141. if (length == 0)
  142. pktcnt = 1;
  143. else
  144. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  145. /* Flush the endpoint's Tx FIFO */
  146. writel(TX_FIFO_NUMBER(ep->fifo_num), &reg->grstctl);
  147. writel(TX_FIFO_NUMBER(ep->fifo_num) | TX_FIFO_FLUSH, &reg->grstctl);
  148. while (readl(&reg->grstctl) & TX_FIFO_FLUSH)
  149. ;
  150. writel(the_controller->dma_addr[ep_index(ep)+1],
  151. &reg->in_endp[ep_num].diepdma);
  152. writel(DIEPT_SIZ_PKT_CNT(pktcnt) | DIEPT_SIZ_XFER_SIZE(length),
  153. &reg->in_endp[ep_num].dieptsiz);
  154. ctrl = readl(&reg->in_endp[ep_num].diepctl);
  155. /* Write the FIFO number to be used for this endpoint */
  156. ctrl &= DIEPCTL_TX_FIFO_NUM_MASK;
  157. ctrl |= DIEPCTL_TX_FIFO_NUM(ep->fifo_num);
  158. /* Clear reserved (Next EP) bits */
  159. ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT));
  160. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->in_endp[ep_num].diepctl);
  161. debug_cond(DEBUG_IN_EP,
  162. "%s:EP%d TX DMA start : DIEPDMA0 = 0x%x,"
  163. "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n"
  164. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  165. __func__, ep_num,
  166. readl(&reg->in_endp[ep_num].diepdma),
  167. readl(&reg->in_endp[ep_num].dieptsiz),
  168. readl(&reg->in_endp[ep_num].diepctl),
  169. buf, pktcnt, length);
  170. return length;
  171. }
  172. static void complete_rx(struct s3c_udc *dev, u8 ep_num)
  173. {
  174. struct s3c_ep *ep = &dev->ep[ep_num];
  175. struct s3c_request *req = NULL;
  176. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  177. u32 *p = the_controller->dma_buf[ep_index(ep)+1];
  178. if (list_empty(&ep->queue)) {
  179. DEBUG_OUT_EP("%s: RX DMA done : NULL REQ on OUT EP-%d\n",
  180. __func__, ep_num);
  181. return;
  182. }
  183. req = list_entry(ep->queue.next, struct s3c_request, queue);
  184. ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);
  185. if (ep_num == EP0_CON)
  186. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
  187. else
  188. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);
  189. xfer_size = ep->len - xfer_size;
  190. invalidate_dcache_range((unsigned long) p,
  191. (unsigned long) p + DMA_BUFFER_SIZE);
  192. memcpy(ep->dma_buf, p, ep->len);
  193. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  194. is_short = (xfer_size < ep->ep.maxpacket);
  195. DEBUG_OUT_EP("%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
  196. "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
  197. __func__, ep_num, req->req.actual, req->req.length,
  198. is_short, ep_tsr, xfer_size);
  199. if (is_short || req->req.actual == req->req.length) {
  200. if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
  201. DEBUG_OUT_EP(" => Send ZLP\n");
  202. s3c_udc_ep0_zlp(dev);
  203. /* packet will be completed in complete_tx() */
  204. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  205. } else {
  206. done(ep, req, 0);
  207. if (!list_empty(&ep->queue)) {
  208. req = list_entry(ep->queue.next,
  209. struct s3c_request, queue);
  210. DEBUG_OUT_EP("%s: Next Rx request start...\n",
  211. __func__);
  212. setdma_rx(ep, req);
  213. }
  214. }
  215. } else
  216. setdma_rx(ep, req);
  217. }
  218. static void complete_tx(struct s3c_udc *dev, u8 ep_num)
  219. {
  220. struct s3c_ep *ep = &dev->ep[ep_num];
  221. struct s3c_request *req;
  222. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  223. u32 last;
  224. if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) {
  225. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  226. s3c_ep0_complete_out();
  227. return;
  228. }
  229. if (list_empty(&ep->queue)) {
  230. debug_cond(DEBUG_IN_EP,
  231. "%s: TX DMA done : NULL REQ on IN EP-%d\n",
  232. __func__, ep_num);
  233. return;
  234. }
  235. req = list_entry(ep->queue.next, struct s3c_request, queue);
  236. ep_tsr = readl(&reg->in_endp[ep_num].dieptsiz);
  237. xfer_size = ep->len;
  238. is_short = (xfer_size < ep->ep.maxpacket);
  239. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  240. debug_cond(DEBUG_IN_EP,
  241. "%s: TX DMA done : ep = %d, tx bytes = %d/%d, "
  242. "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n",
  243. __func__, ep_num, req->req.actual, req->req.length,
  244. is_short, ep_tsr, xfer_size);
  245. if (ep_num == 0) {
  246. if (dev->ep0state == DATA_STATE_XMIT) {
  247. debug_cond(DEBUG_IN_EP,
  248. "%s: ep_num = %d, ep0stat =="
  249. "DATA_STATE_XMIT\n",
  250. __func__, ep_num);
  251. last = write_fifo_ep0(ep, req);
  252. if (last)
  253. dev->ep0state = WAIT_FOR_COMPLETE;
  254. } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) {
  255. debug_cond(DEBUG_IN_EP,
  256. "%s: ep_num = %d, completing request\n",
  257. __func__, ep_num);
  258. done(ep, req, 0);
  259. dev->ep0state = WAIT_FOR_SETUP;
  260. } else if (dev->ep0state == WAIT_FOR_COMPLETE) {
  261. debug_cond(DEBUG_IN_EP,
  262. "%s: ep_num = %d, completing request\n",
  263. __func__, ep_num);
  264. done(ep, req, 0);
  265. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  266. s3c_ep0_complete_out();
  267. } else {
  268. debug_cond(DEBUG_IN_EP,
  269. "%s: ep_num = %d, invalid ep state\n",
  270. __func__, ep_num);
  271. }
  272. return;
  273. }
  274. if (req->req.actual == req->req.length)
  275. done(ep, req, 0);
  276. if (!list_empty(&ep->queue)) {
  277. req = list_entry(ep->queue.next, struct s3c_request, queue);
  278. debug_cond(DEBUG_IN_EP,
  279. "%s: Next Tx request start...\n", __func__);
  280. setdma_tx(ep, req);
  281. }
  282. }
  283. static inline void s3c_udc_check_tx_queue(struct s3c_udc *dev, u8 ep_num)
  284. {
  285. struct s3c_ep *ep = &dev->ep[ep_num];
  286. struct s3c_request *req;
  287. debug_cond(DEBUG_IN_EP,
  288. "%s: Check queue, ep_num = %d\n", __func__, ep_num);
  289. if (!list_empty(&ep->queue)) {
  290. req = list_entry(ep->queue.next, struct s3c_request, queue);
  291. debug_cond(DEBUG_IN_EP,
  292. "%s: Next Tx request(0x%p) start...\n",
  293. __func__, req);
  294. if (ep_is_in(ep))
  295. setdma_tx(ep, req);
  296. else
  297. setdma_rx(ep, req);
  298. } else {
  299. debug_cond(DEBUG_IN_EP,
  300. "%s: NULL REQ on IN EP-%d\n", __func__, ep_num);
  301. return;
  302. }
  303. }
  304. static void process_ep_in_intr(struct s3c_udc *dev)
  305. {
  306. u32 ep_intr, ep_intr_status;
  307. u8 ep_num = 0;
  308. ep_intr = readl(&reg->daint);
  309. debug_cond(DEBUG_IN_EP,
  310. "*** %s: EP In interrupt : DAINT = 0x%x\n", __func__, ep_intr);
  311. ep_intr &= DAINT_MASK;
  312. while (ep_intr) {
  313. if (ep_intr & DAINT_IN_EP_INT(1)) {
  314. ep_intr_status = readl(&reg->in_endp[ep_num].diepint);
  315. debug_cond(DEBUG_IN_EP, "\tEP%d-IN : DIEPINT = 0x%x\n",
  316. ep_num, ep_intr_status);
  317. /* Interrupt Clear */
  318. writel(ep_intr_status, &reg->in_endp[ep_num].diepint);
  319. if (ep_intr_status & TRANSFER_DONE) {
  320. complete_tx(dev, ep_num);
  321. if (ep_num == 0) {
  322. if (dev->ep0state ==
  323. WAIT_FOR_IN_COMPLETE)
  324. dev->ep0state = WAIT_FOR_SETUP;
  325. if (dev->ep0state == WAIT_FOR_SETUP)
  326. s3c_udc_pre_setup();
  327. /* continue transfer after
  328. set_clear_halt for DMA mode */
  329. if (clear_feature_flag == 1) {
  330. s3c_udc_check_tx_queue(dev,
  331. clear_feature_num);
  332. clear_feature_flag = 0;
  333. }
  334. }
  335. }
  336. }
  337. ep_num++;
  338. ep_intr >>= 1;
  339. }
  340. }
  341. static void process_ep_out_intr(struct s3c_udc *dev)
  342. {
  343. u32 ep_intr, ep_intr_status;
  344. u8 ep_num = 0;
  345. ep_intr = readl(&reg->daint);
  346. DEBUG_OUT_EP("*** %s: EP OUT interrupt : DAINT = 0x%x\n",
  347. __func__, ep_intr);
  348. ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK;
  349. while (ep_intr) {
  350. if (ep_intr & 0x1) {
  351. ep_intr_status = readl(&reg->out_endp[ep_num].doepint);
  352. DEBUG_OUT_EP("\tEP%d-OUT : DOEPINT = 0x%x\n",
  353. ep_num, ep_intr_status);
  354. /* Interrupt Clear */
  355. writel(ep_intr_status, &reg->out_endp[ep_num].doepint);
  356. if (ep_num == 0) {
  357. if (ep_intr_status & TRANSFER_DONE) {
  358. if (dev->ep0state !=
  359. WAIT_FOR_OUT_COMPLETE)
  360. complete_rx(dev, ep_num);
  361. else {
  362. dev->ep0state = WAIT_FOR_SETUP;
  363. s3c_udc_pre_setup();
  364. }
  365. }
  366. if (ep_intr_status &
  367. CTRL_OUT_EP_SETUP_PHASE_DONE) {
  368. DEBUG_OUT_EP("SETUP packet arrived\n");
  369. s3c_handle_ep0(dev);
  370. }
  371. } else {
  372. if (ep_intr_status & TRANSFER_DONE)
  373. complete_rx(dev, ep_num);
  374. }
  375. }
  376. ep_num++;
  377. ep_intr >>= 1;
  378. }
  379. }
  380. /*
  381. * usb client interrupt handler.
  382. */
  383. static int s3c_udc_irq(int irq, void *_dev)
  384. {
  385. struct s3c_udc *dev = _dev;
  386. u32 intr_status;
  387. u32 usb_status, gintmsk;
  388. unsigned long flags;
  389. spin_lock_irqsave(&dev->lock, flags);
  390. intr_status = readl(&reg->gintsts);
  391. gintmsk = readl(&reg->gintmsk);
  392. debug_cond(DEBUG_ISR,
  393. "\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x,"
  394. "DAINT : 0x%x, DAINTMSK : 0x%x\n",
  395. __func__, intr_status, state_names[dev->ep0state], gintmsk,
  396. readl(&reg->daint), readl(&reg->daintmsk));
  397. if (!intr_status) {
  398. spin_unlock_irqrestore(&dev->lock, flags);
  399. return IRQ_HANDLED;
  400. }
  401. if (intr_status & INT_ENUMDONE) {
  402. debug_cond(DEBUG_ISR, "\tSpeed Detection interrupt\n");
  403. writel(INT_ENUMDONE, &reg->gintsts);
  404. usb_status = (readl(&reg->dsts) & 0x6);
  405. if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) {
  406. debug_cond(DEBUG_ISR, "\t\tFull Speed Detection\n");
  407. set_max_pktsize(dev, USB_SPEED_FULL);
  408. } else {
  409. debug_cond(DEBUG_ISR,
  410. "\t\tHigh Speed Detection : 0x%x\n",
  411. usb_status);
  412. set_max_pktsize(dev, USB_SPEED_HIGH);
  413. }
  414. }
  415. if (intr_status & INT_EARLY_SUSPEND) {
  416. debug_cond(DEBUG_ISR, "\tEarly suspend interrupt\n");
  417. writel(INT_EARLY_SUSPEND, &reg->gintsts);
  418. }
  419. if (intr_status & INT_SUSPEND) {
  420. usb_status = readl(&reg->dsts);
  421. debug_cond(DEBUG_ISR,
  422. "\tSuspend interrupt :(DSTS):0x%x\n", usb_status);
  423. writel(INT_SUSPEND, &reg->gintsts);
  424. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  425. && dev->driver) {
  426. if (dev->driver->suspend)
  427. dev->driver->suspend(&dev->gadget);
  428. /* HACK to let gadget detect disconnected state */
  429. if (dev->driver->disconnect) {
  430. spin_unlock_irqrestore(&dev->lock, flags);
  431. dev->driver->disconnect(&dev->gadget);
  432. spin_lock_irqsave(&dev->lock, flags);
  433. }
  434. }
  435. }
  436. if (intr_status & INT_RESUME) {
  437. debug_cond(DEBUG_ISR, "\tResume interrupt\n");
  438. writel(INT_RESUME, &reg->gintsts);
  439. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  440. && dev->driver
  441. && dev->driver->resume) {
  442. dev->driver->resume(&dev->gadget);
  443. }
  444. }
  445. if (intr_status & INT_RESET) {
  446. usb_status = readl(&reg->gotgctl);
  447. debug_cond(DEBUG_ISR,
  448. "\tReset interrupt - (GOTGCTL):0x%x\n", usb_status);
  449. writel(INT_RESET, &reg->gintsts);
  450. if ((usb_status & 0xc0000) == (0x3 << 18)) {
  451. if (reset_available) {
  452. debug_cond(DEBUG_ISR,
  453. "\t\tOTG core got reset (%d)!!\n",
  454. reset_available);
  455. reconfig_usbd();
  456. dev->ep0state = WAIT_FOR_SETUP;
  457. reset_available = 0;
  458. s3c_udc_pre_setup();
  459. } else
  460. reset_available = 1;
  461. } else {
  462. reset_available = 1;
  463. debug_cond(DEBUG_ISR, "\t\tRESET handling skipped\n");
  464. }
  465. }
  466. if (intr_status & INT_IN_EP)
  467. process_ep_in_intr(dev);
  468. if (intr_status & INT_OUT_EP)
  469. process_ep_out_intr(dev);
  470. spin_unlock_irqrestore(&dev->lock, flags);
  471. return IRQ_HANDLED;
  472. }
  473. /** Queue one request
  474. * Kickstart transfer if needed
  475. */
  476. static int s3c_queue(struct usb_ep *_ep, struct usb_request *_req,
  477. gfp_t gfp_flags)
  478. {
  479. struct s3c_request *req;
  480. struct s3c_ep *ep;
  481. struct s3c_udc *dev;
  482. unsigned long flags;
  483. u32 ep_num, gintsts;
  484. req = container_of(_req, struct s3c_request, req);
  485. if (unlikely(!_req || !_req->complete || !_req->buf
  486. || !list_empty(&req->queue))) {
  487. debug("%s: bad params\n", __func__);
  488. return -EINVAL;
  489. }
  490. ep = container_of(_ep, struct s3c_ep, ep);
  491. if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
  492. debug("%s: bad ep: %s, %d, %p\n", __func__,
  493. ep->ep.name, !ep->desc, _ep);
  494. return -EINVAL;
  495. }
  496. ep_num = ep_index(ep);
  497. dev = ep->dev;
  498. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  499. debug("%s: bogus device state %p\n", __func__, dev->driver);
  500. return -ESHUTDOWN;
  501. }
  502. spin_lock_irqsave(&dev->lock, flags);
  503. _req->status = -EINPROGRESS;
  504. _req->actual = 0;
  505. /* kickstart this i/o queue? */
  506. debug("\n*** %s: %s-%s req = %p, len = %d, buf = %p"
  507. "Q empty = %d, stopped = %d\n",
  508. __func__, _ep->name, ep_is_in(ep) ? "in" : "out",
  509. _req, _req->length, _req->buf,
  510. list_empty(&ep->queue), ep->stopped);
  511. #ifdef DEBUG_S3C_UDC
  512. {
  513. int i, len = _req->length;
  514. printf("pkt = ");
  515. if (len > 64)
  516. len = 64;
  517. for (i = 0; i < len; i++) {
  518. printf("%02x", ((u8 *)_req->buf)[i]);
  519. if ((i & 7) == 7)
  520. printf(" ");
  521. }
  522. printf("\n");
  523. }
  524. #endif
  525. if (list_empty(&ep->queue) && !ep->stopped) {
  526. if (ep_num == 0) {
  527. /* EP0 */
  528. list_add_tail(&req->queue, &ep->queue);
  529. s3c_ep0_kick(dev, ep);
  530. req = 0;
  531. } else if (ep_is_in(ep)) {
  532. gintsts = readl(&reg->gintsts);
  533. debug_cond(DEBUG_IN_EP,
  534. "%s: ep_is_in, S3C_UDC_OTG_GINTSTS=0x%x\n",
  535. __func__, gintsts);
  536. setdma_tx(ep, req);
  537. } else {
  538. gintsts = readl(&reg->gintsts);
  539. DEBUG_OUT_EP("%s:ep_is_out, S3C_UDC_OTG_GINTSTS=0x%x\n",
  540. __func__, gintsts);
  541. setdma_rx(ep, req);
  542. }
  543. }
  544. /* pio or dma irq handler advances the queue. */
  545. if (likely(req != 0))
  546. list_add_tail(&req->queue, &ep->queue);
  547. spin_unlock_irqrestore(&dev->lock, flags);
  548. return 0;
  549. }
  550. /****************************************************************/
  551. /* End Point 0 related functions */
  552. /****************************************************************/
  553. /* return: 0 = still running, 1 = completed, negative = errno */
  554. static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req)
  555. {
  556. u32 max;
  557. unsigned count;
  558. int is_last;
  559. max = ep_maxpacket(ep);
  560. DEBUG_EP0("%s: max = %d\n", __func__, max);
  561. count = setdma_tx(ep, req);
  562. /* last packet is usually short (or a zlp) */
  563. if (likely(count != max))
  564. is_last = 1;
  565. else {
  566. if (likely(req->req.length != req->req.actual + count)
  567. || req->req.zero)
  568. is_last = 0;
  569. else
  570. is_last = 1;
  571. }
  572. DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __func__,
  573. ep->ep.name, count,
  574. is_last ? "/L" : "",
  575. req->req.length - req->req.actual - count, req);
  576. /* requests complete when all IN data is in the FIFO */
  577. if (is_last) {
  578. ep->dev->ep0state = WAIT_FOR_SETUP;
  579. return 1;
  580. }
  581. return 0;
  582. }
  583. int s3c_fifo_read(struct s3c_ep *ep, u32 *cp, int max)
  584. {
  585. u32 bytes;
  586. bytes = sizeof(struct usb_ctrlrequest);
  587. invalidate_dcache_range((unsigned long) ep->dev->dma_buf[ep_index(ep)],
  588. (unsigned long) ep->dev->dma_buf[ep_index(ep)]
  589. + DMA_BUFFER_SIZE);
  590. DEBUG_EP0("%s: bytes=%d, ep_index=%d %p\n", __func__,
  591. bytes, ep_index(ep), ep->dev->dma_buf[ep_index(ep)]);
  592. return bytes;
  593. }
  594. /**
  595. * udc_set_address - set the USB address for this device
  596. * @address:
  597. *
  598. * Called from control endpoint function
  599. * after it decodes a set address setup packet.
  600. */
  601. static void udc_set_address(struct s3c_udc *dev, unsigned char address)
  602. {
  603. u32 ctrl = readl(&reg->dcfg);
  604. writel(DEVICE_ADDRESS(address) | ctrl, &reg->dcfg);
  605. s3c_udc_ep0_zlp(dev);
  606. DEBUG_EP0("%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
  607. __func__, address, readl(&reg->dcfg));
  608. dev->usb_address = address;
  609. }
  610. static inline void s3c_udc_ep0_set_stall(struct s3c_ep *ep)
  611. {
  612. struct s3c_udc *dev;
  613. u32 ep_ctrl = 0;
  614. dev = ep->dev;
  615. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  616. /* set the disable and stall bits */
  617. if (ep_ctrl & DEPCTL_EPENA)
  618. ep_ctrl |= DEPCTL_EPDIS;
  619. ep_ctrl |= DEPCTL_STALL;
  620. writel(ep_ctrl, &reg->in_endp[EP0_CON].diepctl);
  621. DEBUG_EP0("%s: set ep%d stall, DIEPCTL0 = 0x%x\n",
  622. __func__, ep_index(ep), &reg->in_endp[EP0_CON].diepctl);
  623. /*
  624. * The application can only set this bit, and the core clears it,
  625. * when a SETUP token is received for this endpoint
  626. */
  627. dev->ep0state = WAIT_FOR_SETUP;
  628. s3c_udc_pre_setup();
  629. }
  630. static void s3c_ep0_read(struct s3c_udc *dev)
  631. {
  632. struct s3c_request *req;
  633. struct s3c_ep *ep = &dev->ep[0];
  634. if (!list_empty(&ep->queue)) {
  635. req = list_entry(ep->queue.next, struct s3c_request, queue);
  636. } else {
  637. debug("%s: ---> BUG\n", __func__);
  638. BUG();
  639. return;
  640. }
  641. DEBUG_EP0("%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  642. __func__, req, req->req.length, req->req.actual);
  643. if (req->req.length == 0) {
  644. /* zlp for Set_configuration, Set_interface,
  645. * or Bulk-Only mass storge reset */
  646. ep->len = 0;
  647. s3c_udc_ep0_zlp(dev);
  648. DEBUG_EP0("%s: req.length = 0, bRequest = %d\n",
  649. __func__, usb_ctrl->bRequest);
  650. return;
  651. }
  652. setdma_rx(ep, req);
  653. }
  654. /*
  655. * DATA_STATE_XMIT
  656. */
  657. static int s3c_ep0_write(struct s3c_udc *dev)
  658. {
  659. struct s3c_request *req;
  660. struct s3c_ep *ep = &dev->ep[0];
  661. int ret, need_zlp = 0;
  662. if (list_empty(&ep->queue))
  663. req = 0;
  664. else
  665. req = list_entry(ep->queue.next, struct s3c_request, queue);
  666. if (!req) {
  667. DEBUG_EP0("%s: NULL REQ\n", __func__);
  668. return 0;
  669. }
  670. DEBUG_EP0("%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  671. __func__, req, req->req.length, req->req.actual);
  672. if (req->req.length - req->req.actual == ep0_fifo_size) {
  673. /* Next write will end with the packet size, */
  674. /* so we need Zero-length-packet */
  675. need_zlp = 1;
  676. }
  677. ret = write_fifo_ep0(ep, req);
  678. if ((ret == 1) && !need_zlp) {
  679. /* Last packet */
  680. dev->ep0state = WAIT_FOR_COMPLETE;
  681. DEBUG_EP0("%s: finished, waiting for status\n", __func__);
  682. } else {
  683. dev->ep0state = DATA_STATE_XMIT;
  684. DEBUG_EP0("%s: not finished\n", __func__);
  685. }
  686. return 1;
  687. }
  688. u16 g_status;
  689. int s3c_udc_get_status(struct s3c_udc *dev,
  690. struct usb_ctrlrequest *crq)
  691. {
  692. u8 ep_num = crq->wIndex & 0x7F;
  693. u32 ep_ctrl;
  694. u32 *p = the_controller->dma_buf[1];
  695. DEBUG_SETUP("%s: *** USB_REQ_GET_STATUS\n", __func__);
  696. printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK);
  697. switch (crq->bRequestType & USB_RECIP_MASK) {
  698. case USB_RECIP_INTERFACE:
  699. g_status = 0;
  700. DEBUG_SETUP("\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n",
  701. g_status);
  702. break;
  703. case USB_RECIP_DEVICE:
  704. g_status = 0x1; /* Self powered */
  705. DEBUG_SETUP("\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n",
  706. g_status);
  707. break;
  708. case USB_RECIP_ENDPOINT:
  709. if (crq->wLength > 2) {
  710. DEBUG_SETUP("\tGET_STATUS:Not support EP or wLength\n");
  711. return 1;
  712. }
  713. g_status = dev->ep[ep_num].stopped;
  714. DEBUG_SETUP("\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n",
  715. g_status);
  716. break;
  717. default:
  718. return 1;
  719. }
  720. memcpy(p, &g_status, sizeof(g_status));
  721. flush_dcache_range((unsigned long) p,
  722. (unsigned long) p + DMA_BUFFER_SIZE);
  723. writel(the_controller->dma_addr[1], &reg->in_endp[EP0_CON].diepdma);
  724. writel(DIEPT_SIZ_PKT_CNT(1) | DIEPT_SIZ_XFER_SIZE(2),
  725. &reg->in_endp[EP0_CON].dieptsiz);
  726. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  727. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  728. &reg->in_endp[EP0_CON].diepctl);
  729. dev->ep0state = WAIT_FOR_NULL_COMPLETE;
  730. return 0;
  731. }
  732. static void s3c_udc_set_nak(struct s3c_ep *ep)
  733. {
  734. u8 ep_num;
  735. u32 ep_ctrl = 0;
  736. ep_num = ep_index(ep);
  737. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  738. if (ep_is_in(ep)) {
  739. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  740. ep_ctrl |= DEPCTL_SNAK;
  741. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  742. debug("%s: set NAK, DIEPCTL%d = 0x%x\n",
  743. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  744. } else {
  745. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  746. ep_ctrl |= DEPCTL_SNAK;
  747. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  748. debug("%s: set NAK, DOEPCTL%d = 0x%x\n",
  749. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  750. }
  751. return;
  752. }
  753. void s3c_udc_ep_set_stall(struct s3c_ep *ep)
  754. {
  755. u8 ep_num;
  756. u32 ep_ctrl = 0;
  757. ep_num = ep_index(ep);
  758. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  759. if (ep_is_in(ep)) {
  760. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  761. /* set the disable and stall bits */
  762. if (ep_ctrl & DEPCTL_EPENA)
  763. ep_ctrl |= DEPCTL_EPDIS;
  764. ep_ctrl |= DEPCTL_STALL;
  765. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  766. debug("%s: set stall, DIEPCTL%d = 0x%x\n",
  767. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  768. } else {
  769. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  770. /* set the stall bit */
  771. ep_ctrl |= DEPCTL_STALL;
  772. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  773. debug("%s: set stall, DOEPCTL%d = 0x%x\n",
  774. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  775. }
  776. return;
  777. }
  778. void s3c_udc_ep_clear_stall(struct s3c_ep *ep)
  779. {
  780. u8 ep_num;
  781. u32 ep_ctrl = 0;
  782. ep_num = ep_index(ep);
  783. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  784. if (ep_is_in(ep)) {
  785. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  786. /* clear stall bit */
  787. ep_ctrl &= ~DEPCTL_STALL;
  788. /*
  789. * USB Spec 9.4.5: For endpoints using data toggle, regardless
  790. * of whether an endpoint has the Halt feature set, a
  791. * ClearFeature(ENDPOINT_HALT) request always results in the
  792. * data toggle being reinitialized to DATA0.
  793. */
  794. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  795. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  796. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  797. }
  798. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  799. debug("%s: cleared stall, DIEPCTL%d = 0x%x\n",
  800. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  801. } else {
  802. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  803. /* clear stall bit */
  804. ep_ctrl &= ~DEPCTL_STALL;
  805. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  806. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  807. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  808. }
  809. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  810. debug("%s: cleared stall, DOEPCTL%d = 0x%x\n",
  811. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  812. }
  813. return;
  814. }
  815. static int s3c_udc_set_halt(struct usb_ep *_ep, int value)
  816. {
  817. struct s3c_ep *ep;
  818. struct s3c_udc *dev;
  819. unsigned long flags;
  820. u8 ep_num;
  821. ep = container_of(_ep, struct s3c_ep, ep);
  822. ep_num = ep_index(ep);
  823. if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON ||
  824. ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) {
  825. debug("%s: %s bad ep or descriptor\n", __func__, ep->ep.name);
  826. return -EINVAL;
  827. }
  828. /* Attempt to halt IN ep will fail if any transfer requests
  829. * are still queue */
  830. if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
  831. debug("%s: %s queue not empty, req = %p\n",
  832. __func__, ep->ep.name,
  833. list_entry(ep->queue.next, struct s3c_request, queue));
  834. return -EAGAIN;
  835. }
  836. dev = ep->dev;
  837. debug("%s: ep_num = %d, value = %d\n", __func__, ep_num, value);
  838. spin_lock_irqsave(&dev->lock, flags);
  839. if (value == 0) {
  840. ep->stopped = 0;
  841. s3c_udc_ep_clear_stall(ep);
  842. } else {
  843. if (ep_num == 0)
  844. dev->ep0state = WAIT_FOR_SETUP;
  845. ep->stopped = 1;
  846. s3c_udc_ep_set_stall(ep);
  847. }
  848. spin_unlock_irqrestore(&dev->lock, flags);
  849. return 0;
  850. }
  851. void s3c_udc_ep_activate(struct s3c_ep *ep)
  852. {
  853. u8 ep_num;
  854. u32 ep_ctrl = 0, daintmsk = 0;
  855. ep_num = ep_index(ep);
  856. /* Read DEPCTLn register */
  857. if (ep_is_in(ep)) {
  858. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  859. daintmsk = 1 << ep_num;
  860. } else {
  861. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  862. daintmsk = (1 << ep_num) << DAINT_OUT_BIT;
  863. }
  864. debug("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n",
  865. __func__, ep_num, ep_ctrl, ep_is_in(ep));
  866. /* If the EP is already active don't change the EP Control
  867. * register. */
  868. if (!(ep_ctrl & DEPCTL_USBACTEP)) {
  869. ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK) |
  870. (ep->bmAttributes << DEPCTL_TYPE_BIT);
  871. ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) |
  872. (ep->ep.maxpacket << DEPCTL_MPS_BIT);
  873. ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP | DEPCTL_SNAK);
  874. if (ep_is_in(ep)) {
  875. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  876. debug("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n",
  877. __func__, ep_num, ep_num,
  878. readl(&reg->in_endp[ep_num].diepctl));
  879. } else {
  880. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  881. debug("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n",
  882. __func__, ep_num, ep_num,
  883. readl(&reg->out_endp[ep_num].doepctl));
  884. }
  885. }
  886. /* Unmask EP Interrtupt */
  887. writel(readl(&reg->daintmsk)|daintmsk, &reg->daintmsk);
  888. debug("%s: DAINTMSK = 0x%x\n", __func__, readl(&reg->daintmsk));
  889. }
  890. static int s3c_udc_clear_feature(struct usb_ep *_ep)
  891. {
  892. struct s3c_udc *dev;
  893. struct s3c_ep *ep;
  894. u8 ep_num;
  895. ep = container_of(_ep, struct s3c_ep, ep);
  896. ep_num = ep_index(ep);
  897. dev = ep->dev;
  898. DEBUG_SETUP("%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n",
  899. __func__, ep_num, ep_is_in(ep), clear_feature_flag);
  900. if (usb_ctrl->wLength != 0) {
  901. DEBUG_SETUP("\tCLEAR_FEATURE: wLength is not zero.....\n");
  902. return 1;
  903. }
  904. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  905. case USB_RECIP_DEVICE:
  906. switch (usb_ctrl->wValue) {
  907. case USB_DEVICE_REMOTE_WAKEUP:
  908. DEBUG_SETUP("\tOFF:USB_DEVICE_REMOTE_WAKEUP\n");
  909. break;
  910. case USB_DEVICE_TEST_MODE:
  911. DEBUG_SETUP("\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n");
  912. /** @todo Add CLEAR_FEATURE for TEST modes. */
  913. break;
  914. }
  915. s3c_udc_ep0_zlp(dev);
  916. break;
  917. case USB_RECIP_ENDPOINT:
  918. DEBUG_SETUP("\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n",
  919. usb_ctrl->wValue);
  920. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  921. if (ep_num == 0) {
  922. s3c_udc_ep0_set_stall(ep);
  923. return 0;
  924. }
  925. s3c_udc_ep0_zlp(dev);
  926. s3c_udc_ep_clear_stall(ep);
  927. s3c_udc_ep_activate(ep);
  928. ep->stopped = 0;
  929. clear_feature_num = ep_num;
  930. clear_feature_flag = 1;
  931. }
  932. break;
  933. }
  934. return 0;
  935. }
  936. static int s3c_udc_set_feature(struct usb_ep *_ep)
  937. {
  938. struct s3c_udc *dev;
  939. struct s3c_ep *ep;
  940. u8 ep_num;
  941. ep = container_of(_ep, struct s3c_ep, ep);
  942. ep_num = ep_index(ep);
  943. dev = ep->dev;
  944. DEBUG_SETUP("%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n",
  945. __func__, ep_num);
  946. if (usb_ctrl->wLength != 0) {
  947. DEBUG_SETUP("\tSET_FEATURE: wLength is not zero.....\n");
  948. return 1;
  949. }
  950. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  951. case USB_RECIP_DEVICE:
  952. switch (usb_ctrl->wValue) {
  953. case USB_DEVICE_REMOTE_WAKEUP:
  954. DEBUG_SETUP("\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n");
  955. break;
  956. case USB_DEVICE_B_HNP_ENABLE:
  957. DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
  958. break;
  959. case USB_DEVICE_A_HNP_SUPPORT:
  960. /* RH port supports HNP */
  961. DEBUG_SETUP("\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n");
  962. break;
  963. case USB_DEVICE_A_ALT_HNP_SUPPORT:
  964. /* other RH port does */
  965. DEBUG_SETUP("\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
  966. break;
  967. }
  968. s3c_udc_ep0_zlp(dev);
  969. return 0;
  970. case USB_RECIP_INTERFACE:
  971. DEBUG_SETUP("\tSET_FEATURE: USB_RECIP_INTERFACE\n");
  972. break;
  973. case USB_RECIP_ENDPOINT:
  974. DEBUG_SETUP("\tSET_FEATURE: USB_RECIP_ENDPOINT\n");
  975. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  976. if (ep_num == 0) {
  977. s3c_udc_ep0_set_stall(ep);
  978. return 0;
  979. }
  980. ep->stopped = 1;
  981. s3c_udc_ep_set_stall(ep);
  982. }
  983. s3c_udc_ep0_zlp(dev);
  984. return 0;
  985. }
  986. return 1;
  987. }
  988. /*
  989. * WAIT_FOR_SETUP (OUT_PKT_RDY)
  990. */
  991. void s3c_ep0_setup(struct s3c_udc *dev)
  992. {
  993. struct s3c_ep *ep = &dev->ep[0];
  994. int i;
  995. u8 ep_num;
  996. /* Nuke all previous transfers */
  997. nuke(ep, -EPROTO);
  998. /* read control req from fifo (8 bytes) */
  999. s3c_fifo_read(ep, (u32 *)usb_ctrl, 8);
  1000. DEBUG_SETUP("%s: bRequestType = 0x%x(%s), bRequest = 0x%x"
  1001. "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n",
  1002. __func__, usb_ctrl->bRequestType,
  1003. (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT",
  1004. usb_ctrl->bRequest,
  1005. usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex);
  1006. #ifdef DEBUG_S3C_UDC
  1007. {
  1008. int i, len = sizeof(*usb_ctrl);
  1009. char *p = (char *)usb_ctrl;
  1010. printf("pkt = ");
  1011. for (i = 0; i < len; i++) {
  1012. printf("%02x", ((u8 *)p)[i]);
  1013. if ((i & 7) == 7)
  1014. printf(" ");
  1015. }
  1016. printf("\n");
  1017. }
  1018. #endif
  1019. if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST &&
  1020. usb_ctrl->wLength != 1) {
  1021. DEBUG_SETUP("\t%s:GET_MAX_LUN_REQUEST:invalid",
  1022. __func__);
  1023. DEBUG_SETUP("wLength = %d, setup returned\n",
  1024. usb_ctrl->wLength);
  1025. s3c_udc_ep0_set_stall(ep);
  1026. dev->ep0state = WAIT_FOR_SETUP;
  1027. return;
  1028. } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST &&
  1029. usb_ctrl->wLength != 0) {
  1030. /* Bulk-Only *mass storge reset of class-specific request */
  1031. DEBUG_SETUP("%s:BOT Rest:invalid wLength =%d, setup returned\n",
  1032. __func__, usb_ctrl->wLength);
  1033. s3c_udc_ep0_set_stall(ep);
  1034. dev->ep0state = WAIT_FOR_SETUP;
  1035. return;
  1036. }
  1037. /* Set direction of EP0 */
  1038. if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) {
  1039. ep->bEndpointAddress |= USB_DIR_IN;
  1040. } else {
  1041. ep->bEndpointAddress &= ~USB_DIR_IN;
  1042. }
  1043. /* cope with automagic for some standard requests. */
  1044. dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK)
  1045. == USB_TYPE_STANDARD;
  1046. dev->req_config = 0;
  1047. dev->req_pending = 1;
  1048. /* Handle some SETUP packets ourselves */
  1049. if (dev->req_std) {
  1050. switch (usb_ctrl->bRequest) {
  1051. case USB_REQ_SET_ADDRESS:
  1052. DEBUG_SETUP("%s: *** USB_REQ_SET_ADDRESS (%d)\n",
  1053. __func__, usb_ctrl->wValue);
  1054. if (usb_ctrl->bRequestType
  1055. != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
  1056. break;
  1057. udc_set_address(dev, usb_ctrl->wValue);
  1058. return;
  1059. case USB_REQ_SET_CONFIGURATION:
  1060. DEBUG_SETUP("=====================================\n");
  1061. DEBUG_SETUP("%s: USB_REQ_SET_CONFIGURATION (%d)\n",
  1062. __func__, usb_ctrl->wValue);
  1063. if (usb_ctrl->bRequestType == USB_RECIP_DEVICE) {
  1064. reset_available = 1;
  1065. dev->req_config = 1;
  1066. }
  1067. break;
  1068. case USB_REQ_GET_DESCRIPTOR:
  1069. DEBUG_SETUP("%s: *** USB_REQ_GET_DESCRIPTOR\n",
  1070. __func__);
  1071. break;
  1072. case USB_REQ_SET_INTERFACE:
  1073. DEBUG_SETUP("%s: *** USB_REQ_SET_INTERFACE (%d)\n",
  1074. __func__, usb_ctrl->wValue);
  1075. if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE) {
  1076. reset_available = 1;
  1077. dev->req_config = 1;
  1078. }
  1079. break;
  1080. case USB_REQ_GET_CONFIGURATION:
  1081. DEBUG_SETUP("%s: *** USB_REQ_GET_CONFIGURATION\n",
  1082. __func__);
  1083. break;
  1084. case USB_REQ_GET_STATUS:
  1085. if (!s3c_udc_get_status(dev, usb_ctrl))
  1086. return;
  1087. break;
  1088. case USB_REQ_CLEAR_FEATURE:
  1089. ep_num = usb_ctrl->wIndex & 0x7f;
  1090. if (!s3c_udc_clear_feature(&dev->ep[ep_num].ep))
  1091. return;
  1092. break;
  1093. case USB_REQ_SET_FEATURE:
  1094. ep_num = usb_ctrl->wIndex & 0x7f;
  1095. if (!s3c_udc_set_feature(&dev->ep[ep_num].ep))
  1096. return;
  1097. break;
  1098. default:
  1099. DEBUG_SETUP("%s: *** Default of usb_ctrl->bRequest=0x%x"
  1100. "happened.\n", __func__, usb_ctrl->bRequest);
  1101. break;
  1102. }
  1103. }
  1104. if (likely(dev->driver)) {
  1105. /* device-2-host (IN) or no data setup command,
  1106. * process immediately */
  1107. DEBUG_SETUP("%s:usb_ctrlreq will be passed to fsg_setup()\n",
  1108. __func__);
  1109. spin_unlock(&dev->lock);
  1110. i = dev->driver->setup(&dev->gadget, usb_ctrl);
  1111. spin_lock(&dev->lock);
  1112. if (i < 0) {
  1113. if (dev->req_config) {
  1114. DEBUG_SETUP("\tconfig change 0x%02x fail %d?\n",
  1115. (u32)usb_ctrl->bRequest, i);
  1116. return;
  1117. }
  1118. /* setup processing failed, force stall */
  1119. s3c_udc_ep0_set_stall(ep);
  1120. dev->ep0state = WAIT_FOR_SETUP;
  1121. DEBUG_SETUP("\tdev->driver->setup failed (%d),"
  1122. " bRequest = %d\n",
  1123. i, usb_ctrl->bRequest);
  1124. } else if (dev->req_pending) {
  1125. dev->req_pending = 0;
  1126. DEBUG_SETUP("\tdev->req_pending...\n");
  1127. }
  1128. DEBUG_SETUP("\tep0state = %s\n", state_names[dev->ep0state]);
  1129. }
  1130. }
  1131. /*
  1132. * handle ep0 interrupt
  1133. */
  1134. static void s3c_handle_ep0(struct s3c_udc *dev)
  1135. {
  1136. if (dev->ep0state == WAIT_FOR_SETUP) {
  1137. DEBUG_OUT_EP("%s: WAIT_FOR_SETUP\n", __func__);
  1138. s3c_ep0_setup(dev);
  1139. } else {
  1140. DEBUG_OUT_EP("%s: strange state!!(state = %s)\n",
  1141. __func__, state_names[dev->ep0state]);
  1142. }
  1143. }
  1144. static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep)
  1145. {
  1146. DEBUG_EP0("%s: ep_is_in = %d\n", __func__, ep_is_in(ep));
  1147. if (ep_is_in(ep)) {
  1148. dev->ep0state = DATA_STATE_XMIT;
  1149. s3c_ep0_write(dev);
  1150. } else {
  1151. dev->ep0state = DATA_STATE_RECV;
  1152. s3c_ep0_read(dev);
  1153. }
  1154. }