ep0.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /**
  2. * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
  3. *
  4. * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Felipe Balbi <balbi@ti.com>,
  7. * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions, and the following disclaimer,
  14. * without modification.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. The names of the above-listed copyright holders may not be used
  19. * to endorse or promote products derived from this software without
  20. * specific prior written permission.
  21. *
  22. * ALTERNATIVELY, this software may be distributed under the terms of the
  23. * GNU General Public License ("GPL") version 2, as published by the Free
  24. * Software Foundation.
  25. *
  26. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  27. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  28. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  29. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  30. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  31. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  32. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  33. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  34. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  35. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  36. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. */
  38. #include <linux/kernel.h>
  39. #include <linux/slab.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/pm_runtime.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/io.h>
  45. #include <linux/list.h>
  46. #include <linux/dma-mapping.h>
  47. #include <linux/usb/ch9.h>
  48. #include <linux/usb/gadget.h>
  49. #include <linux/usb/composite.h>
  50. #include "core.h"
  51. #include "gadget.h"
  52. #include "io.h"
  53. static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
  54. static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
  55. {
  56. switch (state) {
  57. case EP0_UNCONNECTED:
  58. return "Unconnected";
  59. case EP0_SETUP_PHASE:
  60. return "Setup Phase";
  61. case EP0_DATA_PHASE:
  62. return "Data Phase";
  63. case EP0_STATUS_PHASE:
  64. return "Status Phase";
  65. default:
  66. return "UNKNOWN";
  67. }
  68. }
  69. static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
  70. u32 len, u32 type)
  71. {
  72. struct dwc3_gadget_ep_cmd_params params;
  73. struct dwc3_trb_hw *trb_hw;
  74. struct dwc3_trb trb;
  75. struct dwc3_ep *dep;
  76. int ret;
  77. dep = dwc->eps[epnum];
  78. if (dep->flags & DWC3_EP_BUSY) {
  79. dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
  80. return 0;
  81. }
  82. trb_hw = dwc->ep0_trb;
  83. memset(&trb, 0, sizeof(trb));
  84. trb.trbctl = type;
  85. trb.bplh = buf_dma;
  86. trb.length = len;
  87. trb.hwo = 1;
  88. trb.lst = 1;
  89. trb.ioc = 1;
  90. trb.isp_imi = 1;
  91. dwc3_trb_to_hw(&trb, trb_hw);
  92. memset(&params, 0, sizeof(params));
  93. params.param0 = upper_32_bits(dwc->ep0_trb_addr);
  94. params.param1 = lower_32_bits(dwc->ep0_trb_addr);
  95. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  96. DWC3_DEPCMD_STARTTRANSFER, &params);
  97. if (ret < 0) {
  98. dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
  99. return ret;
  100. }
  101. dep->flags |= DWC3_EP_BUSY;
  102. dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
  103. dep->number);
  104. dwc->ep0_next_event = DWC3_EP0_COMPLETE;
  105. return 0;
  106. }
  107. static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
  108. struct dwc3_request *req)
  109. {
  110. struct dwc3 *dwc = dep->dwc;
  111. int ret = 0;
  112. req->request.actual = 0;
  113. req->request.status = -EINPROGRESS;
  114. req->epnum = dep->number;
  115. list_add_tail(&req->list, &dep->request_list);
  116. /*
  117. * Gadget driver might not be quick enough to queue a request
  118. * before we get a Transfer Not Ready event on this endpoint.
  119. *
  120. * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
  121. * flag is set, it's telling us that as soon as Gadget queues the
  122. * required request, we should kick the transfer here because the
  123. * IRQ we were waiting for is long gone.
  124. */
  125. if (dep->flags & DWC3_EP_PENDING_REQUEST) {
  126. unsigned direction;
  127. direction = !!(dep->flags & DWC3_EP0_DIR_IN);
  128. if (dwc->ep0state != EP0_DATA_PHASE) {
  129. dev_WARN(dwc->dev, "Unexpected pending request\n");
  130. return 0;
  131. }
  132. ret = dwc3_ep0_start_trans(dwc, direction,
  133. req->request.dma, req->request.length,
  134. DWC3_TRBCTL_CONTROL_DATA);
  135. dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
  136. DWC3_EP0_DIR_IN);
  137. } else if (dwc->delayed_status) {
  138. dwc->delayed_status = false;
  139. if (dwc->ep0state == EP0_STATUS_PHASE)
  140. dwc3_ep0_do_control_status(dwc, 1);
  141. else
  142. dev_dbg(dwc->dev, "too early for delayed status\n");
  143. }
  144. return ret;
  145. }
  146. int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
  147. gfp_t gfp_flags)
  148. {
  149. struct dwc3_request *req = to_dwc3_request(request);
  150. struct dwc3_ep *dep = to_dwc3_ep(ep);
  151. struct dwc3 *dwc = dep->dwc;
  152. unsigned long flags;
  153. int ret;
  154. spin_lock_irqsave(&dwc->lock, flags);
  155. if (!dep->desc) {
  156. dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
  157. request, dep->name);
  158. ret = -ESHUTDOWN;
  159. goto out;
  160. }
  161. /* we share one TRB for ep0/1 */
  162. if (!list_empty(&dep->request_list)) {
  163. ret = -EBUSY;
  164. goto out;
  165. }
  166. dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
  167. request, dep->name, request->length,
  168. dwc3_ep0_state_string(dwc->ep0state));
  169. ret = __dwc3_gadget_ep0_queue(dep, req);
  170. out:
  171. spin_unlock_irqrestore(&dwc->lock, flags);
  172. return ret;
  173. }
  174. static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
  175. {
  176. struct dwc3_ep *dep = dwc->eps[0];
  177. /* stall is always issued on EP0 */
  178. __dwc3_gadget_ep_set_halt(dep, 1);
  179. dep->flags = DWC3_EP_ENABLED;
  180. dwc->delayed_status = false;
  181. if (!list_empty(&dep->request_list)) {
  182. struct dwc3_request *req;
  183. req = next_request(&dep->request_list);
  184. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  185. }
  186. dwc->ep0state = EP0_SETUP_PHASE;
  187. dwc3_ep0_out_start(dwc);
  188. }
  189. void dwc3_ep0_out_start(struct dwc3 *dwc)
  190. {
  191. int ret;
  192. ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
  193. DWC3_TRBCTL_CONTROL_SETUP);
  194. WARN_ON(ret < 0);
  195. }
  196. static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
  197. {
  198. struct dwc3_ep *dep;
  199. u32 windex = le16_to_cpu(wIndex_le);
  200. u32 epnum;
  201. epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
  202. if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  203. epnum |= 1;
  204. dep = dwc->eps[epnum];
  205. if (dep->flags & DWC3_EP_ENABLED)
  206. return dep;
  207. return NULL;
  208. }
  209. static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
  210. {
  211. }
  212. /*
  213. * ch 9.4.5
  214. */
  215. static int dwc3_ep0_handle_status(struct dwc3 *dwc,
  216. struct usb_ctrlrequest *ctrl)
  217. {
  218. struct dwc3_ep *dep;
  219. u32 recip;
  220. u16 usb_status = 0;
  221. __le16 *response_pkt;
  222. recip = ctrl->bRequestType & USB_RECIP_MASK;
  223. switch (recip) {
  224. case USB_RECIP_DEVICE:
  225. /*
  226. * We are self-powered. U1/U2/LTM will be set later
  227. * once we handle this states. RemoteWakeup is 0 on SS
  228. */
  229. usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
  230. break;
  231. case USB_RECIP_INTERFACE:
  232. /*
  233. * Function Remote Wake Capable D0
  234. * Function Remote Wakeup D1
  235. */
  236. break;
  237. case USB_RECIP_ENDPOINT:
  238. dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
  239. if (!dep)
  240. return -EINVAL;
  241. if (dep->flags & DWC3_EP_STALL)
  242. usb_status = 1 << USB_ENDPOINT_HALT;
  243. break;
  244. default:
  245. return -EINVAL;
  246. };
  247. response_pkt = (__le16 *) dwc->setup_buf;
  248. *response_pkt = cpu_to_le16(usb_status);
  249. dep = dwc->eps[0];
  250. dwc->ep0_usb_req.dep = dep;
  251. dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
  252. dwc->ep0_usb_req.request.dma = dwc->setup_buf_addr;
  253. dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
  254. return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  255. }
  256. static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
  257. struct usb_ctrlrequest *ctrl, int set)
  258. {
  259. struct dwc3_ep *dep;
  260. u32 recip;
  261. u32 wValue;
  262. u32 wIndex;
  263. int ret;
  264. u32 mode;
  265. wValue = le16_to_cpu(ctrl->wValue);
  266. wIndex = le16_to_cpu(ctrl->wIndex);
  267. recip = ctrl->bRequestType & USB_RECIP_MASK;
  268. switch (recip) {
  269. case USB_RECIP_DEVICE:
  270. /*
  271. * 9.4.1 says only only for SS, in AddressState only for
  272. * default control pipe
  273. */
  274. switch (wValue) {
  275. case USB_DEVICE_U1_ENABLE:
  276. case USB_DEVICE_U2_ENABLE:
  277. case USB_DEVICE_LTM_ENABLE:
  278. if (dwc->dev_state != DWC3_CONFIGURED_STATE)
  279. return -EINVAL;
  280. if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  281. return -EINVAL;
  282. }
  283. /* XXX add U[12] & LTM */
  284. switch (wValue) {
  285. case USB_DEVICE_REMOTE_WAKEUP:
  286. break;
  287. case USB_DEVICE_U1_ENABLE:
  288. break;
  289. case USB_DEVICE_U2_ENABLE:
  290. break;
  291. case USB_DEVICE_LTM_ENABLE:
  292. break;
  293. case USB_DEVICE_TEST_MODE:
  294. if ((wIndex & 0xff) != 0)
  295. return -EINVAL;
  296. if (!set)
  297. return -EINVAL;
  298. mode = wIndex >> 8;
  299. ret = dwc3_gadget_set_test_mode(dwc, mode);
  300. if (ret < 0) {
  301. dev_dbg(dwc->dev, "Invalid Test #%d\n",
  302. mode);
  303. return ret;
  304. }
  305. }
  306. break;
  307. case USB_RECIP_INTERFACE:
  308. switch (wValue) {
  309. case USB_INTRF_FUNC_SUSPEND:
  310. if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
  311. /* XXX enable Low power suspend */
  312. ;
  313. if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
  314. /* XXX enable remote wakeup */
  315. ;
  316. break;
  317. default:
  318. return -EINVAL;
  319. }
  320. break;
  321. case USB_RECIP_ENDPOINT:
  322. switch (wValue) {
  323. case USB_ENDPOINT_HALT:
  324. dep = dwc3_wIndex_to_dep(dwc, wIndex);
  325. if (!dep)
  326. return -EINVAL;
  327. ret = __dwc3_gadget_ep_set_halt(dep, set);
  328. if (ret)
  329. return -EINVAL;
  330. break;
  331. default:
  332. return -EINVAL;
  333. }
  334. break;
  335. default:
  336. return -EINVAL;
  337. };
  338. return 0;
  339. }
  340. static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  341. {
  342. u32 addr;
  343. u32 reg;
  344. addr = le16_to_cpu(ctrl->wValue);
  345. if (addr > 127) {
  346. dev_dbg(dwc->dev, "invalid device address %d\n", addr);
  347. return -EINVAL;
  348. }
  349. if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
  350. dev_dbg(dwc->dev, "trying to set address when configured\n");
  351. return -EINVAL;
  352. }
  353. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  354. reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  355. reg |= DWC3_DCFG_DEVADDR(addr);
  356. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  357. if (addr)
  358. dwc->dev_state = DWC3_ADDRESS_STATE;
  359. else
  360. dwc->dev_state = DWC3_DEFAULT_STATE;
  361. return 0;
  362. }
  363. static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  364. {
  365. int ret;
  366. spin_unlock(&dwc->lock);
  367. ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
  368. spin_lock(&dwc->lock);
  369. return ret;
  370. }
  371. static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  372. {
  373. u32 cfg;
  374. int ret;
  375. dwc->start_config_issued = false;
  376. cfg = le16_to_cpu(ctrl->wValue);
  377. switch (dwc->dev_state) {
  378. case DWC3_DEFAULT_STATE:
  379. return -EINVAL;
  380. break;
  381. case DWC3_ADDRESS_STATE:
  382. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  383. /* if the cfg matches and the cfg is non zero */
  384. if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
  385. dwc->dev_state = DWC3_CONFIGURED_STATE;
  386. dwc->resize_fifos = true;
  387. dev_dbg(dwc->dev, "resize fifos flag SET\n");
  388. }
  389. break;
  390. case DWC3_CONFIGURED_STATE:
  391. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  392. if (!cfg)
  393. dwc->dev_state = DWC3_ADDRESS_STATE;
  394. break;
  395. default:
  396. ret = -EINVAL;
  397. }
  398. return ret;
  399. }
  400. static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  401. {
  402. int ret;
  403. switch (ctrl->bRequest) {
  404. case USB_REQ_GET_STATUS:
  405. dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
  406. ret = dwc3_ep0_handle_status(dwc, ctrl);
  407. break;
  408. case USB_REQ_CLEAR_FEATURE:
  409. dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
  410. ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
  411. break;
  412. case USB_REQ_SET_FEATURE:
  413. dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
  414. ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
  415. break;
  416. case USB_REQ_SET_ADDRESS:
  417. dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
  418. ret = dwc3_ep0_set_address(dwc, ctrl);
  419. break;
  420. case USB_REQ_SET_CONFIGURATION:
  421. dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
  422. ret = dwc3_ep0_set_config(dwc, ctrl);
  423. break;
  424. default:
  425. dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
  426. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  427. break;
  428. };
  429. return ret;
  430. }
  431. static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
  432. const struct dwc3_event_depevt *event)
  433. {
  434. struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
  435. int ret;
  436. u32 len;
  437. if (!dwc->gadget_driver)
  438. goto err;
  439. len = le16_to_cpu(ctrl->wLength);
  440. if (!len) {
  441. dwc->three_stage_setup = false;
  442. dwc->ep0_expect_in = false;
  443. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  444. } else {
  445. dwc->three_stage_setup = true;
  446. dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
  447. dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
  448. }
  449. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  450. ret = dwc3_ep0_std_request(dwc, ctrl);
  451. else
  452. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  453. if (ret == USB_GADGET_DELAYED_STATUS)
  454. dwc->delayed_status = true;
  455. if (ret >= 0)
  456. return;
  457. err:
  458. dwc3_ep0_stall_and_restart(dwc);
  459. }
  460. static void dwc3_ep0_complete_data(struct dwc3 *dwc,
  461. const struct dwc3_event_depevt *event)
  462. {
  463. struct dwc3_request *r = NULL;
  464. struct usb_request *ur;
  465. struct dwc3_trb trb;
  466. struct dwc3_ep *ep0;
  467. u32 transferred;
  468. u8 epnum;
  469. epnum = event->endpoint_number;
  470. ep0 = dwc->eps[0];
  471. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  472. r = next_request(&ep0->request_list);
  473. ur = &r->request;
  474. dwc3_trb_to_nat(dwc->ep0_trb, &trb);
  475. if (dwc->ep0_bounced) {
  476. transferred = min_t(u32, ur->length,
  477. ep0->endpoint.maxpacket - trb.length);
  478. memcpy(ur->buf, dwc->ep0_bounce, transferred);
  479. dwc->ep0_bounced = false;
  480. } else {
  481. transferred = ur->length - trb.length;
  482. ur->actual += transferred;
  483. }
  484. if ((epnum & 1) && ur->actual < ur->length) {
  485. /* for some reason we did not get everything out */
  486. dwc3_ep0_stall_and_restart(dwc);
  487. } else {
  488. /*
  489. * handle the case where we have to send a zero packet. This
  490. * seems to be case when req.length > maxpacket. Could it be?
  491. */
  492. if (r)
  493. dwc3_gadget_giveback(ep0, r, 0);
  494. }
  495. }
  496. static void dwc3_ep0_complete_req(struct dwc3 *dwc,
  497. const struct dwc3_event_depevt *event)
  498. {
  499. struct dwc3_request *r;
  500. struct dwc3_ep *dep;
  501. dep = dwc->eps[0];
  502. if (!list_empty(&dep->request_list)) {
  503. r = next_request(&dep->request_list);
  504. dwc3_gadget_giveback(dep, r, 0);
  505. }
  506. dwc->ep0state = EP0_SETUP_PHASE;
  507. dwc3_ep0_out_start(dwc);
  508. }
  509. static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
  510. const struct dwc3_event_depevt *event)
  511. {
  512. struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
  513. dep->flags &= ~DWC3_EP_BUSY;
  514. dwc->setup_packet_pending = false;
  515. switch (dwc->ep0state) {
  516. case EP0_SETUP_PHASE:
  517. dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
  518. dwc3_ep0_inspect_setup(dwc, event);
  519. break;
  520. case EP0_DATA_PHASE:
  521. dev_vdbg(dwc->dev, "Data Phase\n");
  522. dwc3_ep0_complete_data(dwc, event);
  523. break;
  524. case EP0_STATUS_PHASE:
  525. dev_vdbg(dwc->dev, "Status Phase\n");
  526. dwc3_ep0_complete_req(dwc, event);
  527. break;
  528. default:
  529. WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
  530. }
  531. }
  532. static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
  533. const struct dwc3_event_depevt *event)
  534. {
  535. dwc3_ep0_out_start(dwc);
  536. }
  537. static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
  538. const struct dwc3_event_depevt *event)
  539. {
  540. struct dwc3_ep *dep;
  541. struct dwc3_request *req;
  542. int ret;
  543. dep = dwc->eps[0];
  544. if (list_empty(&dep->request_list)) {
  545. dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
  546. dep->flags |= DWC3_EP_PENDING_REQUEST;
  547. if (event->endpoint_number)
  548. dep->flags |= DWC3_EP0_DIR_IN;
  549. return;
  550. }
  551. req = next_request(&dep->request_list);
  552. req->direction = !!event->endpoint_number;
  553. if (req->request.length == 0) {
  554. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  555. dwc->ctrl_req_addr, 0,
  556. DWC3_TRBCTL_CONTROL_DATA);
  557. } else if ((req->request.length % dep->endpoint.maxpacket)
  558. && (event->endpoint_number == 0)) {
  559. dwc3_map_buffer_to_dma(req);
  560. WARN_ON(req->request.length > dep->endpoint.maxpacket);
  561. dwc->ep0_bounced = true;
  562. /*
  563. * REVISIT in case request length is bigger than EP0
  564. * wMaxPacketSize, we will need two chained TRBs to handle
  565. * the transfer.
  566. */
  567. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  568. dwc->ep0_bounce_addr, dep->endpoint.maxpacket,
  569. DWC3_TRBCTL_CONTROL_DATA);
  570. } else {
  571. dwc3_map_buffer_to_dma(req);
  572. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  573. req->request.dma, req->request.length,
  574. DWC3_TRBCTL_CONTROL_DATA);
  575. }
  576. WARN_ON(ret < 0);
  577. }
  578. static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
  579. {
  580. struct dwc3 *dwc = dep->dwc;
  581. u32 type;
  582. type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
  583. : DWC3_TRBCTL_CONTROL_STATUS2;
  584. return dwc3_ep0_start_trans(dwc, dep->number,
  585. dwc->ctrl_req_addr, 0, type);
  586. }
  587. static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
  588. {
  589. struct dwc3_ep *dep = dwc->eps[epnum];
  590. if (dwc->resize_fifos) {
  591. dev_dbg(dwc->dev, "starting to resize fifos\n");
  592. dwc3_gadget_resize_tx_fifos(dwc);
  593. dwc->resize_fifos = 0;
  594. }
  595. WARN_ON(dwc3_ep0_start_control_status(dep));
  596. }
  597. static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
  598. const struct dwc3_event_depevt *event)
  599. {
  600. dwc->setup_packet_pending = true;
  601. /*
  602. * This part is very tricky: If we has just handled
  603. * XferNotReady(Setup) and we're now expecting a
  604. * XferComplete but, instead, we receive another
  605. * XferNotReady(Setup), we should STALL and restart
  606. * the state machine.
  607. *
  608. * In all other cases, we just continue waiting
  609. * for the XferComplete event.
  610. *
  611. * We are a little bit unsafe here because we're
  612. * not trying to ensure that last event was, indeed,
  613. * XferNotReady(Setup).
  614. *
  615. * Still, we don't expect any condition where that
  616. * should happen and, even if it does, it would be
  617. * another error condition.
  618. */
  619. if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
  620. switch (event->status) {
  621. case DEPEVT_STATUS_CONTROL_SETUP:
  622. dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
  623. dwc3_ep0_stall_and_restart(dwc);
  624. break;
  625. case DEPEVT_STATUS_CONTROL_DATA:
  626. /* FALLTHROUGH */
  627. case DEPEVT_STATUS_CONTROL_STATUS:
  628. /* FALLTHROUGH */
  629. default:
  630. dev_vdbg(dwc->dev, "waiting for XferComplete\n");
  631. }
  632. return;
  633. }
  634. switch (event->status) {
  635. case DEPEVT_STATUS_CONTROL_SETUP:
  636. dev_vdbg(dwc->dev, "Control Setup\n");
  637. dwc->ep0state = EP0_SETUP_PHASE;
  638. dwc3_ep0_do_control_setup(dwc, event);
  639. break;
  640. case DEPEVT_STATUS_CONTROL_DATA:
  641. dev_vdbg(dwc->dev, "Control Data\n");
  642. dwc->ep0state = EP0_DATA_PHASE;
  643. if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
  644. dev_vdbg(dwc->dev, "Expected %d got %d\n",
  645. dwc->ep0_next_event,
  646. DWC3_EP0_NRDY_DATA);
  647. dwc3_ep0_stall_and_restart(dwc);
  648. return;
  649. }
  650. /*
  651. * One of the possible error cases is when Host _does_
  652. * request for Data Phase, but it does so on the wrong
  653. * direction.
  654. *
  655. * Here, we already know ep0_next_event is DATA (see above),
  656. * so we only need to check for direction.
  657. */
  658. if (dwc->ep0_expect_in != event->endpoint_number) {
  659. dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
  660. dwc3_ep0_stall_and_restart(dwc);
  661. return;
  662. }
  663. dwc3_ep0_do_control_data(dwc, event);
  664. break;
  665. case DEPEVT_STATUS_CONTROL_STATUS:
  666. dev_vdbg(dwc->dev, "Control Status\n");
  667. dwc->ep0state = EP0_STATUS_PHASE;
  668. if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
  669. dev_vdbg(dwc->dev, "Expected %d got %d\n",
  670. dwc->ep0_next_event,
  671. DWC3_EP0_NRDY_STATUS);
  672. dwc3_ep0_stall_and_restart(dwc);
  673. return;
  674. }
  675. if (dwc->delayed_status) {
  676. WARN_ON_ONCE(event->endpoint_number != 1);
  677. dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
  678. return;
  679. }
  680. dwc3_ep0_do_control_status(dwc, event->endpoint_number);
  681. }
  682. }
  683. void dwc3_ep0_interrupt(struct dwc3 *dwc,
  684. const struct dwc3_event_depevt *event)
  685. {
  686. u8 epnum = event->endpoint_number;
  687. dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
  688. dwc3_ep_event_string(event->endpoint_event),
  689. epnum >> 1, (epnum & 1) ? "in" : "out",
  690. dwc3_ep0_state_string(dwc->ep0state));
  691. switch (event->endpoint_event) {
  692. case DWC3_DEPEVT_XFERCOMPLETE:
  693. dwc3_ep0_xfer_complete(dwc, event);
  694. break;
  695. case DWC3_DEPEVT_XFERNOTREADY:
  696. dwc3_ep0_xfernotready(dwc, event);
  697. break;
  698. case DWC3_DEPEVT_XFERINPROGRESS:
  699. case DWC3_DEPEVT_RXTXFIFOEVT:
  700. case DWC3_DEPEVT_STREAMEVT:
  701. case DWC3_DEPEVT_EPCMDCMPLT:
  702. break;
  703. }
  704. }