ep0.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855
  1. /**
  2. * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
  3. *
  4. * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Felipe Balbi <balbi@ti.com>,
  7. * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions, and the following disclaimer,
  14. * without modification.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. The names of the above-listed copyright holders may not be used
  19. * to endorse or promote products derived from this software without
  20. * specific prior written permission.
  21. *
  22. * ALTERNATIVELY, this software may be distributed under the terms of the
  23. * GNU General Public License ("GPL") version 2, as published by the Free
  24. * Software Foundation.
  25. *
  26. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  27. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  28. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  29. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  30. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  31. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  32. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  33. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  34. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  35. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  36. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. */
  38. #include <linux/kernel.h>
  39. #include <linux/slab.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/pm_runtime.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/io.h>
  45. #include <linux/list.h>
  46. #include <linux/dma-mapping.h>
  47. #include <linux/usb/ch9.h>
  48. #include <linux/usb/gadget.h>
  49. #include <linux/usb/composite.h>
  50. #include "core.h"
  51. #include "gadget.h"
  52. #include "io.h"
  53. static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
  54. static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
  55. {
  56. switch (state) {
  57. case EP0_UNCONNECTED:
  58. return "Unconnected";
  59. case EP0_SETUP_PHASE:
  60. return "Setup Phase";
  61. case EP0_DATA_PHASE:
  62. return "Data Phase";
  63. case EP0_STATUS_PHASE:
  64. return "Status Phase";
  65. default:
  66. return "UNKNOWN";
  67. }
  68. }
  69. static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
  70. u32 len, u32 type)
  71. {
  72. struct dwc3_gadget_ep_cmd_params params;
  73. struct dwc3_trb_hw *trb_hw;
  74. struct dwc3_trb trb;
  75. struct dwc3_ep *dep;
  76. int ret;
  77. dep = dwc->eps[epnum];
  78. if (dep->flags & DWC3_EP_BUSY) {
  79. dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
  80. return 0;
  81. }
  82. trb_hw = dwc->ep0_trb;
  83. memset(&trb, 0, sizeof(trb));
  84. trb.trbctl = type;
  85. trb.bplh = buf_dma;
  86. trb.length = len;
  87. trb.hwo = 1;
  88. trb.lst = 1;
  89. trb.ioc = 1;
  90. trb.isp_imi = 1;
  91. dwc3_trb_to_hw(&trb, trb_hw);
  92. memset(&params, 0, sizeof(params));
  93. params.param0 = upper_32_bits(dwc->ep0_trb_addr);
  94. params.param1 = lower_32_bits(dwc->ep0_trb_addr);
  95. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  96. DWC3_DEPCMD_STARTTRANSFER, &params);
  97. if (ret < 0) {
  98. dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
  99. return ret;
  100. }
  101. dep->flags |= DWC3_EP_BUSY;
  102. dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
  103. dep->number);
  104. dwc->ep0_next_event = DWC3_EP0_COMPLETE;
  105. return 0;
  106. }
  107. static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
  108. struct dwc3_request *req)
  109. {
  110. struct dwc3 *dwc = dep->dwc;
  111. int ret = 0;
  112. req->request.actual = 0;
  113. req->request.status = -EINPROGRESS;
  114. req->epnum = dep->number;
  115. list_add_tail(&req->list, &dep->request_list);
  116. /*
  117. * Gadget driver might not be quick enough to queue a request
  118. * before we get a Transfer Not Ready event on this endpoint.
  119. *
  120. * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
  121. * flag is set, it's telling us that as soon as Gadget queues the
  122. * required request, we should kick the transfer here because the
  123. * IRQ we were waiting for is long gone.
  124. */
  125. if (dep->flags & DWC3_EP_PENDING_REQUEST) {
  126. unsigned direction;
  127. direction = !!(dep->flags & DWC3_EP0_DIR_IN);
  128. if (dwc->ep0state != EP0_DATA_PHASE) {
  129. dev_WARN(dwc->dev, "Unexpected pending request\n");
  130. return 0;
  131. }
  132. ret = dwc3_ep0_start_trans(dwc, direction,
  133. req->request.dma, req->request.length,
  134. DWC3_TRBCTL_CONTROL_DATA);
  135. dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
  136. DWC3_EP0_DIR_IN);
  137. } else if (dwc->delayed_status) {
  138. dwc->delayed_status = false;
  139. if (dwc->ep0state == EP0_STATUS_PHASE)
  140. dwc3_ep0_do_control_status(dwc, 1);
  141. else
  142. dev_dbg(dwc->dev, "too early for delayed status\n");
  143. }
  144. return ret;
  145. }
  146. int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
  147. gfp_t gfp_flags)
  148. {
  149. struct dwc3_request *req = to_dwc3_request(request);
  150. struct dwc3_ep *dep = to_dwc3_ep(ep);
  151. struct dwc3 *dwc = dep->dwc;
  152. unsigned long flags;
  153. int ret;
  154. spin_lock_irqsave(&dwc->lock, flags);
  155. if (!dep->desc) {
  156. dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
  157. request, dep->name);
  158. ret = -ESHUTDOWN;
  159. goto out;
  160. }
  161. /* we share one TRB for ep0/1 */
  162. if (!list_empty(&dep->request_list)) {
  163. ret = -EBUSY;
  164. goto out;
  165. }
  166. dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
  167. request, dep->name, request->length,
  168. dwc3_ep0_state_string(dwc->ep0state));
  169. ret = __dwc3_gadget_ep0_queue(dep, req);
  170. out:
  171. spin_unlock_irqrestore(&dwc->lock, flags);
  172. return ret;
  173. }
  174. static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
  175. {
  176. struct dwc3_ep *dep = dwc->eps[0];
  177. /* stall is always issued on EP0 */
  178. __dwc3_gadget_ep_set_halt(dep, 1);
  179. dep->flags = DWC3_EP_ENABLED;
  180. dwc->delayed_status = false;
  181. if (!list_empty(&dep->request_list)) {
  182. struct dwc3_request *req;
  183. req = next_request(&dep->request_list);
  184. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  185. }
  186. dwc->ep0state = EP0_SETUP_PHASE;
  187. dwc3_ep0_out_start(dwc);
  188. }
  189. void dwc3_ep0_out_start(struct dwc3 *dwc)
  190. {
  191. int ret;
  192. ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
  193. DWC3_TRBCTL_CONTROL_SETUP);
  194. WARN_ON(ret < 0);
  195. }
  196. static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
  197. {
  198. struct dwc3_ep *dep;
  199. u32 windex = le16_to_cpu(wIndex_le);
  200. u32 epnum;
  201. epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
  202. if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  203. epnum |= 1;
  204. dep = dwc->eps[epnum];
  205. if (dep->flags & DWC3_EP_ENABLED)
  206. return dep;
  207. return NULL;
  208. }
  209. static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
  210. {
  211. }
  212. /*
  213. * ch 9.4.5
  214. */
  215. static int dwc3_ep0_handle_status(struct dwc3 *dwc,
  216. struct usb_ctrlrequest *ctrl)
  217. {
  218. struct dwc3_ep *dep;
  219. u32 recip;
  220. u16 usb_status = 0;
  221. __le16 *response_pkt;
  222. recip = ctrl->bRequestType & USB_RECIP_MASK;
  223. switch (recip) {
  224. case USB_RECIP_DEVICE:
  225. /*
  226. * We are self-powered. U1/U2/LTM will be set later
  227. * once we handle this states. RemoteWakeup is 0 on SS
  228. */
  229. usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
  230. break;
  231. case USB_RECIP_INTERFACE:
  232. /*
  233. * Function Remote Wake Capable D0
  234. * Function Remote Wakeup D1
  235. */
  236. break;
  237. case USB_RECIP_ENDPOINT:
  238. dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
  239. if (!dep)
  240. return -EINVAL;
  241. if (dep->flags & DWC3_EP_STALL)
  242. usb_status = 1 << USB_ENDPOINT_HALT;
  243. break;
  244. default:
  245. return -EINVAL;
  246. };
  247. response_pkt = (__le16 *) dwc->setup_buf;
  248. *response_pkt = cpu_to_le16(usb_status);
  249. dep = dwc->eps[0];
  250. dwc->ep0_usb_req.dep = dep;
  251. dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
  252. dwc->ep0_usb_req.request.dma = dwc->setup_buf_addr;
  253. dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
  254. return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  255. }
  256. static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
  257. struct usb_ctrlrequest *ctrl, int set)
  258. {
  259. struct dwc3_ep *dep;
  260. u32 recip;
  261. u32 wValue;
  262. u32 wIndex;
  263. u32 reg;
  264. int ret;
  265. u32 mode;
  266. wValue = le16_to_cpu(ctrl->wValue);
  267. wIndex = le16_to_cpu(ctrl->wIndex);
  268. recip = ctrl->bRequestType & USB_RECIP_MASK;
  269. switch (recip) {
  270. case USB_RECIP_DEVICE:
  271. /*
  272. * 9.4.1 says only only for SS, in AddressState only for
  273. * default control pipe
  274. */
  275. switch (wValue) {
  276. case USB_DEVICE_U1_ENABLE:
  277. case USB_DEVICE_U2_ENABLE:
  278. case USB_DEVICE_LTM_ENABLE:
  279. if (dwc->dev_state != DWC3_CONFIGURED_STATE)
  280. return -EINVAL;
  281. if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  282. return -EINVAL;
  283. }
  284. /* XXX add U[12] & LTM */
  285. switch (wValue) {
  286. case USB_DEVICE_REMOTE_WAKEUP:
  287. break;
  288. case USB_DEVICE_U1_ENABLE:
  289. break;
  290. case USB_DEVICE_U2_ENABLE:
  291. break;
  292. case USB_DEVICE_LTM_ENABLE:
  293. break;
  294. case USB_DEVICE_TEST_MODE:
  295. if ((wIndex & 0xff) != 0)
  296. return -EINVAL;
  297. if (!set)
  298. return -EINVAL;
  299. mode = wIndex >> 8;
  300. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  301. reg &= ~DWC3_DCTL_TSTCTRL_MASK;
  302. switch (mode) {
  303. case TEST_J:
  304. case TEST_K:
  305. case TEST_SE0_NAK:
  306. case TEST_PACKET:
  307. case TEST_FORCE_EN:
  308. reg |= mode << 1;
  309. break;
  310. default:
  311. return -EINVAL;
  312. }
  313. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  314. break;
  315. default:
  316. return -EINVAL;
  317. }
  318. break;
  319. case USB_RECIP_INTERFACE:
  320. switch (wValue) {
  321. case USB_INTRF_FUNC_SUSPEND:
  322. if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
  323. /* XXX enable Low power suspend */
  324. ;
  325. if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
  326. /* XXX enable remote wakeup */
  327. ;
  328. break;
  329. default:
  330. return -EINVAL;
  331. }
  332. break;
  333. case USB_RECIP_ENDPOINT:
  334. switch (wValue) {
  335. case USB_ENDPOINT_HALT:
  336. dep = dwc3_wIndex_to_dep(dwc, wIndex);
  337. if (!dep)
  338. return -EINVAL;
  339. ret = __dwc3_gadget_ep_set_halt(dep, set);
  340. if (ret)
  341. return -EINVAL;
  342. break;
  343. default:
  344. return -EINVAL;
  345. }
  346. break;
  347. default:
  348. return -EINVAL;
  349. };
  350. return 0;
  351. }
  352. static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  353. {
  354. u32 addr;
  355. u32 reg;
  356. addr = le16_to_cpu(ctrl->wValue);
  357. if (addr > 127) {
  358. dev_dbg(dwc->dev, "invalid device address %d\n", addr);
  359. return -EINVAL;
  360. }
  361. if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
  362. dev_dbg(dwc->dev, "trying to set address when configured\n");
  363. return -EINVAL;
  364. }
  365. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  366. reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  367. reg |= DWC3_DCFG_DEVADDR(addr);
  368. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  369. if (addr)
  370. dwc->dev_state = DWC3_ADDRESS_STATE;
  371. else
  372. dwc->dev_state = DWC3_DEFAULT_STATE;
  373. return 0;
  374. }
  375. static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  376. {
  377. int ret;
  378. spin_unlock(&dwc->lock);
  379. ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
  380. spin_lock(&dwc->lock);
  381. return ret;
  382. }
  383. static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  384. {
  385. u32 cfg;
  386. int ret;
  387. dwc->start_config_issued = false;
  388. cfg = le16_to_cpu(ctrl->wValue);
  389. switch (dwc->dev_state) {
  390. case DWC3_DEFAULT_STATE:
  391. return -EINVAL;
  392. break;
  393. case DWC3_ADDRESS_STATE:
  394. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  395. /* if the cfg matches and the cfg is non zero */
  396. if (!ret && cfg)
  397. dwc->dev_state = DWC3_CONFIGURED_STATE;
  398. break;
  399. case DWC3_CONFIGURED_STATE:
  400. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  401. if (!cfg)
  402. dwc->dev_state = DWC3_ADDRESS_STATE;
  403. break;
  404. default:
  405. ret = -EINVAL;
  406. }
  407. return ret;
  408. }
  409. static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  410. {
  411. int ret;
  412. switch (ctrl->bRequest) {
  413. case USB_REQ_GET_STATUS:
  414. dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
  415. ret = dwc3_ep0_handle_status(dwc, ctrl);
  416. break;
  417. case USB_REQ_CLEAR_FEATURE:
  418. dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
  419. ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
  420. break;
  421. case USB_REQ_SET_FEATURE:
  422. dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
  423. ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
  424. break;
  425. case USB_REQ_SET_ADDRESS:
  426. dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
  427. ret = dwc3_ep0_set_address(dwc, ctrl);
  428. break;
  429. case USB_REQ_SET_CONFIGURATION:
  430. dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
  431. ret = dwc3_ep0_set_config(dwc, ctrl);
  432. break;
  433. default:
  434. dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
  435. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  436. break;
  437. };
  438. return ret;
  439. }
  440. static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
  441. const struct dwc3_event_depevt *event)
  442. {
  443. struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
  444. int ret;
  445. u32 len;
  446. if (!dwc->gadget_driver)
  447. goto err;
  448. len = le16_to_cpu(ctrl->wLength);
  449. if (!len) {
  450. dwc->three_stage_setup = false;
  451. dwc->ep0_expect_in = false;
  452. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  453. } else {
  454. dwc->three_stage_setup = true;
  455. dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
  456. dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
  457. }
  458. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  459. ret = dwc3_ep0_std_request(dwc, ctrl);
  460. else
  461. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  462. if (ret == USB_GADGET_DELAYED_STATUS)
  463. dwc->delayed_status = true;
  464. if (ret >= 0)
  465. return;
  466. err:
  467. dwc3_ep0_stall_and_restart(dwc);
  468. }
  469. static void dwc3_ep0_complete_data(struct dwc3 *dwc,
  470. const struct dwc3_event_depevt *event)
  471. {
  472. struct dwc3_request *r = NULL;
  473. struct usb_request *ur;
  474. struct dwc3_trb trb;
  475. struct dwc3_ep *ep0;
  476. u32 transferred;
  477. u8 epnum;
  478. epnum = event->endpoint_number;
  479. ep0 = dwc->eps[0];
  480. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  481. r = next_request(&ep0->request_list);
  482. ur = &r->request;
  483. dwc3_trb_to_nat(dwc->ep0_trb, &trb);
  484. if (dwc->ep0_bounced) {
  485. transferred = min_t(u32, ur->length,
  486. ep0->endpoint.maxpacket - trb.length);
  487. memcpy(ur->buf, dwc->ep0_bounce, transferred);
  488. dwc->ep0_bounced = false;
  489. } else {
  490. transferred = ur->length - trb.length;
  491. ur->actual += transferred;
  492. }
  493. if ((epnum & 1) && ur->actual < ur->length) {
  494. /* for some reason we did not get everything out */
  495. dwc3_ep0_stall_and_restart(dwc);
  496. } else {
  497. /*
  498. * handle the case where we have to send a zero packet. This
  499. * seems to be case when req.length > maxpacket. Could it be?
  500. */
  501. if (r)
  502. dwc3_gadget_giveback(ep0, r, 0);
  503. }
  504. }
  505. static void dwc3_ep0_complete_req(struct dwc3 *dwc,
  506. const struct dwc3_event_depevt *event)
  507. {
  508. struct dwc3_request *r;
  509. struct dwc3_ep *dep;
  510. dep = dwc->eps[0];
  511. if (!list_empty(&dep->request_list)) {
  512. r = next_request(&dep->request_list);
  513. dwc3_gadget_giveback(dep, r, 0);
  514. }
  515. dwc->ep0state = EP0_SETUP_PHASE;
  516. dwc3_ep0_out_start(dwc);
  517. }
  518. static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
  519. const struct dwc3_event_depevt *event)
  520. {
  521. struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
  522. dep->flags &= ~DWC3_EP_BUSY;
  523. dwc->setup_packet_pending = false;
  524. switch (dwc->ep0state) {
  525. case EP0_SETUP_PHASE:
  526. dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
  527. dwc3_ep0_inspect_setup(dwc, event);
  528. break;
  529. case EP0_DATA_PHASE:
  530. dev_vdbg(dwc->dev, "Data Phase\n");
  531. dwc3_ep0_complete_data(dwc, event);
  532. break;
  533. case EP0_STATUS_PHASE:
  534. dev_vdbg(dwc->dev, "Status Phase\n");
  535. dwc3_ep0_complete_req(dwc, event);
  536. break;
  537. default:
  538. WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
  539. }
  540. }
  541. static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
  542. const struct dwc3_event_depevt *event)
  543. {
  544. dwc3_ep0_out_start(dwc);
  545. }
  546. static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
  547. const struct dwc3_event_depevt *event)
  548. {
  549. struct dwc3_ep *dep;
  550. struct dwc3_request *req;
  551. int ret;
  552. dep = dwc->eps[0];
  553. if (list_empty(&dep->request_list)) {
  554. dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
  555. dep->flags |= DWC3_EP_PENDING_REQUEST;
  556. if (event->endpoint_number)
  557. dep->flags |= DWC3_EP0_DIR_IN;
  558. return;
  559. }
  560. req = next_request(&dep->request_list);
  561. req->direction = !!event->endpoint_number;
  562. if (req->request.length == 0) {
  563. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  564. dwc->ctrl_req_addr, 0,
  565. DWC3_TRBCTL_CONTROL_DATA);
  566. } else if ((req->request.length % dep->endpoint.maxpacket)
  567. && (event->endpoint_number == 0)) {
  568. dwc3_map_buffer_to_dma(req);
  569. WARN_ON(req->request.length > dep->endpoint.maxpacket);
  570. dwc->ep0_bounced = true;
  571. /*
  572. * REVISIT in case request length is bigger than EP0
  573. * wMaxPacketSize, we will need two chained TRBs to handle
  574. * the transfer.
  575. */
  576. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  577. dwc->ep0_bounce_addr, dep->endpoint.maxpacket,
  578. DWC3_TRBCTL_CONTROL_DATA);
  579. } else {
  580. dwc3_map_buffer_to_dma(req);
  581. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  582. req->request.dma, req->request.length,
  583. DWC3_TRBCTL_CONTROL_DATA);
  584. }
  585. WARN_ON(ret < 0);
  586. }
  587. static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
  588. {
  589. struct dwc3 *dwc = dep->dwc;
  590. u32 type;
  591. type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
  592. : DWC3_TRBCTL_CONTROL_STATUS2;
  593. return dwc3_ep0_start_trans(dwc, dep->number,
  594. dwc->ctrl_req_addr, 0, type);
  595. }
  596. static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
  597. {
  598. struct dwc3_ep *dep = dwc->eps[epnum];
  599. WARN_ON(dwc3_ep0_start_control_status(dep));
  600. }
  601. static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
  602. const struct dwc3_event_depevt *event)
  603. {
  604. dwc->setup_packet_pending = true;
  605. /*
  606. * This part is very tricky: If we has just handled
  607. * XferNotReady(Setup) and we're now expecting a
  608. * XferComplete but, instead, we receive another
  609. * XferNotReady(Setup), we should STALL and restart
  610. * the state machine.
  611. *
  612. * In all other cases, we just continue waiting
  613. * for the XferComplete event.
  614. *
  615. * We are a little bit unsafe here because we're
  616. * not trying to ensure that last event was, indeed,
  617. * XferNotReady(Setup).
  618. *
  619. * Still, we don't expect any condition where that
  620. * should happen and, even if it does, it would be
  621. * another error condition.
  622. */
  623. if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
  624. switch (event->status) {
  625. case DEPEVT_STATUS_CONTROL_SETUP:
  626. dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
  627. dwc3_ep0_stall_and_restart(dwc);
  628. break;
  629. case DEPEVT_STATUS_CONTROL_DATA:
  630. /* FALLTHROUGH */
  631. case DEPEVT_STATUS_CONTROL_STATUS:
  632. /* FALLTHROUGH */
  633. default:
  634. dev_vdbg(dwc->dev, "waiting for XferComplete\n");
  635. }
  636. return;
  637. }
  638. switch (event->status) {
  639. case DEPEVT_STATUS_CONTROL_SETUP:
  640. dev_vdbg(dwc->dev, "Control Setup\n");
  641. dwc->ep0state = EP0_SETUP_PHASE;
  642. dwc3_ep0_do_control_setup(dwc, event);
  643. break;
  644. case DEPEVT_STATUS_CONTROL_DATA:
  645. dev_vdbg(dwc->dev, "Control Data\n");
  646. dwc->ep0state = EP0_DATA_PHASE;
  647. if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
  648. dev_vdbg(dwc->dev, "Expected %d got %d\n",
  649. dwc->ep0_next_event,
  650. DWC3_EP0_NRDY_DATA);
  651. dwc3_ep0_stall_and_restart(dwc);
  652. return;
  653. }
  654. /*
  655. * One of the possible error cases is when Host _does_
  656. * request for Data Phase, but it does so on the wrong
  657. * direction.
  658. *
  659. * Here, we already know ep0_next_event is DATA (see above),
  660. * so we only need to check for direction.
  661. */
  662. if (dwc->ep0_expect_in != event->endpoint_number) {
  663. dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
  664. dwc3_ep0_stall_and_restart(dwc);
  665. return;
  666. }
  667. dwc3_ep0_do_control_data(dwc, event);
  668. break;
  669. case DEPEVT_STATUS_CONTROL_STATUS:
  670. dev_vdbg(dwc->dev, "Control Status\n");
  671. dwc->ep0state = EP0_STATUS_PHASE;
  672. if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
  673. dev_vdbg(dwc->dev, "Expected %d got %d\n",
  674. dwc->ep0_next_event,
  675. DWC3_EP0_NRDY_STATUS);
  676. dwc3_ep0_stall_and_restart(dwc);
  677. return;
  678. }
  679. if (dwc->delayed_status) {
  680. WARN_ON_ONCE(event->endpoint_number != 1);
  681. dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
  682. return;
  683. }
  684. dwc3_ep0_do_control_status(dwc, event->endpoint_number);
  685. }
  686. }
  687. void dwc3_ep0_interrupt(struct dwc3 *dwc,
  688. const struct dwc3_event_depevt *event)
  689. {
  690. u8 epnum = event->endpoint_number;
  691. dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
  692. dwc3_ep_event_string(event->endpoint_event),
  693. epnum >> 1, (epnum & 1) ? "in" : "out",
  694. dwc3_ep0_state_string(dwc->ep0state));
  695. switch (event->endpoint_event) {
  696. case DWC3_DEPEVT_XFERCOMPLETE:
  697. dwc3_ep0_xfer_complete(dwc, event);
  698. break;
  699. case DWC3_DEPEVT_XFERNOTREADY:
  700. dwc3_ep0_xfernotready(dwc, event);
  701. break;
  702. case DWC3_DEPEVT_XFERINPROGRESS:
  703. case DWC3_DEPEVT_RXTXFIFOEVT:
  704. case DWC3_DEPEVT_STREAMEVT:
  705. case DWC3_DEPEVT_EPCMDCMPLT:
  706. break;
  707. }
  708. }