ep0.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /**
  2. * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
  3. *
  4. * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Felipe Balbi <balbi@ti.com>,
  7. * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions, and the following disclaimer,
  14. * without modification.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. The names of the above-listed copyright holders may not be used
  19. * to endorse or promote products derived from this software without
  20. * specific prior written permission.
  21. *
  22. * ALTERNATIVELY, this software may be distributed under the terms of the
  23. * GNU General Public License ("GPL") version 2, as published by the Free
  24. * Software Foundation.
  25. *
  26. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  27. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  28. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  29. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  30. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  31. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  32. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  33. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  34. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  35. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  36. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. */
  38. #include <linux/kernel.h>
  39. #include <linux/slab.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/pm_runtime.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/io.h>
  45. #include <linux/list.h>
  46. #include <linux/dma-mapping.h>
  47. #include <linux/usb/ch9.h>
  48. #include <linux/usb/gadget.h>
  49. #include <linux/usb/composite.h>
  50. #include "core.h"
  51. #include "gadget.h"
  52. #include "io.h"
  53. static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
  54. static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
  55. {
  56. switch (state) {
  57. case EP0_UNCONNECTED:
  58. return "Unconnected";
  59. case EP0_SETUP_PHASE:
  60. return "Setup Phase";
  61. case EP0_DATA_PHASE:
  62. return "Data Phase";
  63. case EP0_STATUS_PHASE:
  64. return "Status Phase";
  65. default:
  66. return "UNKNOWN";
  67. }
  68. }
  69. static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
  70. u32 len, u32 type)
  71. {
  72. struct dwc3_gadget_ep_cmd_params params;
  73. struct dwc3_trb_hw *trb_hw;
  74. struct dwc3_trb trb;
  75. struct dwc3_ep *dep;
  76. int ret;
  77. dep = dwc->eps[epnum];
  78. if (dep->flags & DWC3_EP_BUSY) {
  79. dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
  80. return 0;
  81. }
  82. trb_hw = dwc->ep0_trb;
  83. memset(&trb, 0, sizeof(trb));
  84. trb.trbctl = type;
  85. trb.bplh = buf_dma;
  86. trb.length = len;
  87. trb.hwo = 1;
  88. trb.lst = 1;
  89. trb.ioc = 1;
  90. trb.isp_imi = 1;
  91. dwc3_trb_to_hw(&trb, trb_hw);
  92. memset(&params, 0, sizeof(params));
  93. params.param0 = upper_32_bits(dwc->ep0_trb_addr);
  94. params.param1 = lower_32_bits(dwc->ep0_trb_addr);
  95. ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
  96. DWC3_DEPCMD_STARTTRANSFER, &params);
  97. if (ret < 0) {
  98. dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
  99. return ret;
  100. }
  101. dep->flags |= DWC3_EP_BUSY;
  102. dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
  103. dep->number);
  104. dwc->ep0_next_event = DWC3_EP0_COMPLETE;
  105. return 0;
  106. }
  107. static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
  108. struct dwc3_request *req)
  109. {
  110. struct dwc3 *dwc = dep->dwc;
  111. u32 type;
  112. int ret = 0;
  113. req->request.actual = 0;
  114. req->request.status = -EINPROGRESS;
  115. req->epnum = dep->number;
  116. list_add_tail(&req->list, &dep->request_list);
  117. /*
  118. * Gadget driver might not be quick enough to queue a request
  119. * before we get a Transfer Not Ready event on this endpoint.
  120. *
  121. * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
  122. * flag is set, it's telling us that as soon as Gadget queues the
  123. * required request, we should kick the transfer here because the
  124. * IRQ we were waiting for is long gone.
  125. */
  126. if (dep->flags & DWC3_EP_PENDING_REQUEST) {
  127. unsigned direction;
  128. direction = !!(dep->flags & DWC3_EP0_DIR_IN);
  129. if (dwc->ep0state != EP0_DATA_PHASE) {
  130. dev_WARN(dwc->dev, "Unexpected pending request\n");
  131. return 0;
  132. }
  133. ret = dwc3_ep0_start_trans(dwc, direction,
  134. req->request.dma, req->request.length,
  135. DWC3_TRBCTL_CONTROL_DATA);
  136. dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
  137. DWC3_EP0_DIR_IN);
  138. } else if (dwc->delayed_status) {
  139. dwc->delayed_status = false;
  140. if (dwc->ep0state == EP0_STATUS_PHASE)
  141. dwc3_ep0_do_control_status(dwc, 1);
  142. else
  143. dev_dbg(dwc->dev, "too early for delayed status\n");
  144. }
  145. return ret;
  146. }
  147. int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
  148. gfp_t gfp_flags)
  149. {
  150. struct dwc3_request *req = to_dwc3_request(request);
  151. struct dwc3_ep *dep = to_dwc3_ep(ep);
  152. struct dwc3 *dwc = dep->dwc;
  153. unsigned long flags;
  154. int ret;
  155. spin_lock_irqsave(&dwc->lock, flags);
  156. if (!dep->desc) {
  157. dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
  158. request, dep->name);
  159. ret = -ESHUTDOWN;
  160. goto out;
  161. }
  162. /* we share one TRB for ep0/1 */
  163. if (!list_empty(&dep->request_list)) {
  164. ret = -EBUSY;
  165. goto out;
  166. }
  167. dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
  168. request, dep->name, request->length,
  169. dwc3_ep0_state_string(dwc->ep0state));
  170. ret = __dwc3_gadget_ep0_queue(dep, req);
  171. out:
  172. spin_unlock_irqrestore(&dwc->lock, flags);
  173. return ret;
  174. }
  175. static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
  176. {
  177. struct dwc3_ep *dep = dwc->eps[0];
  178. /* stall is always issued on EP0 */
  179. __dwc3_gadget_ep_set_halt(dep, 1);
  180. dep->flags = DWC3_EP_ENABLED;
  181. dwc->delayed_status = false;
  182. if (!list_empty(&dep->request_list)) {
  183. struct dwc3_request *req;
  184. req = next_request(&dep->request_list);
  185. dwc3_gadget_giveback(dep, req, -ECONNRESET);
  186. }
  187. dwc->ep0state = EP0_SETUP_PHASE;
  188. dwc3_ep0_out_start(dwc);
  189. }
  190. void dwc3_ep0_out_start(struct dwc3 *dwc)
  191. {
  192. int ret;
  193. ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
  194. DWC3_TRBCTL_CONTROL_SETUP);
  195. WARN_ON(ret < 0);
  196. }
  197. static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
  198. {
  199. struct dwc3_ep *dep;
  200. u32 windex = le16_to_cpu(wIndex_le);
  201. u32 epnum;
  202. epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
  203. if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  204. epnum |= 1;
  205. dep = dwc->eps[epnum];
  206. if (dep->flags & DWC3_EP_ENABLED)
  207. return dep;
  208. return NULL;
  209. }
  210. static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
  211. {
  212. }
  213. /*
  214. * ch 9.4.5
  215. */
  216. static int dwc3_ep0_handle_status(struct dwc3 *dwc,
  217. struct usb_ctrlrequest *ctrl)
  218. {
  219. struct dwc3_ep *dep;
  220. u32 recip;
  221. u16 usb_status = 0;
  222. __le16 *response_pkt;
  223. recip = ctrl->bRequestType & USB_RECIP_MASK;
  224. switch (recip) {
  225. case USB_RECIP_DEVICE:
  226. /*
  227. * We are self-powered. U1/U2/LTM will be set later
  228. * once we handle this states. RemoteWakeup is 0 on SS
  229. */
  230. usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
  231. break;
  232. case USB_RECIP_INTERFACE:
  233. /*
  234. * Function Remote Wake Capable D0
  235. * Function Remote Wakeup D1
  236. */
  237. break;
  238. case USB_RECIP_ENDPOINT:
  239. dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
  240. if (!dep)
  241. return -EINVAL;
  242. if (dep->flags & DWC3_EP_STALL)
  243. usb_status = 1 << USB_ENDPOINT_HALT;
  244. break;
  245. default:
  246. return -EINVAL;
  247. };
  248. response_pkt = (__le16 *) dwc->setup_buf;
  249. *response_pkt = cpu_to_le16(usb_status);
  250. dep = dwc->eps[0];
  251. dwc->ep0_usb_req.dep = dep;
  252. dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
  253. dwc->ep0_usb_req.request.dma = dwc->setup_buf_addr;
  254. dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
  255. return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
  256. }
  257. static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
  258. struct usb_ctrlrequest *ctrl, int set)
  259. {
  260. struct dwc3_ep *dep;
  261. u32 recip;
  262. u32 wValue;
  263. u32 wIndex;
  264. u32 reg;
  265. int ret;
  266. u32 mode;
  267. wValue = le16_to_cpu(ctrl->wValue);
  268. wIndex = le16_to_cpu(ctrl->wIndex);
  269. recip = ctrl->bRequestType & USB_RECIP_MASK;
  270. switch (recip) {
  271. case USB_RECIP_DEVICE:
  272. /*
  273. * 9.4.1 says only only for SS, in AddressState only for
  274. * default control pipe
  275. */
  276. switch (wValue) {
  277. case USB_DEVICE_U1_ENABLE:
  278. case USB_DEVICE_U2_ENABLE:
  279. case USB_DEVICE_LTM_ENABLE:
  280. if (dwc->dev_state != DWC3_CONFIGURED_STATE)
  281. return -EINVAL;
  282. if (dwc->speed != DWC3_DSTS_SUPERSPEED)
  283. return -EINVAL;
  284. }
  285. /* XXX add U[12] & LTM */
  286. switch (wValue) {
  287. case USB_DEVICE_REMOTE_WAKEUP:
  288. break;
  289. case USB_DEVICE_U1_ENABLE:
  290. break;
  291. case USB_DEVICE_U2_ENABLE:
  292. break;
  293. case USB_DEVICE_LTM_ENABLE:
  294. break;
  295. case USB_DEVICE_TEST_MODE:
  296. if ((wIndex & 0xff) != 0)
  297. return -EINVAL;
  298. if (!set)
  299. return -EINVAL;
  300. mode = wIndex >> 8;
  301. reg = dwc3_readl(dwc->regs, DWC3_DCTL);
  302. reg &= ~DWC3_DCTL_TSTCTRL_MASK;
  303. switch (mode) {
  304. case TEST_J:
  305. case TEST_K:
  306. case TEST_SE0_NAK:
  307. case TEST_PACKET:
  308. case TEST_FORCE_EN:
  309. reg |= mode << 1;
  310. break;
  311. default:
  312. return -EINVAL;
  313. }
  314. dwc3_writel(dwc->regs, DWC3_DCTL, reg);
  315. break;
  316. default:
  317. return -EINVAL;
  318. }
  319. break;
  320. case USB_RECIP_INTERFACE:
  321. switch (wValue) {
  322. case USB_INTRF_FUNC_SUSPEND:
  323. if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
  324. /* XXX enable Low power suspend */
  325. ;
  326. if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
  327. /* XXX enable remote wakeup */
  328. ;
  329. break;
  330. default:
  331. return -EINVAL;
  332. }
  333. break;
  334. case USB_RECIP_ENDPOINT:
  335. switch (wValue) {
  336. case USB_ENDPOINT_HALT:
  337. dep = dwc3_wIndex_to_dep(dwc, wIndex);
  338. if (!dep)
  339. return -EINVAL;
  340. ret = __dwc3_gadget_ep_set_halt(dep, set);
  341. if (ret)
  342. return -EINVAL;
  343. break;
  344. default:
  345. return -EINVAL;
  346. }
  347. break;
  348. default:
  349. return -EINVAL;
  350. };
  351. return 0;
  352. }
  353. static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  354. {
  355. u32 addr;
  356. u32 reg;
  357. addr = le16_to_cpu(ctrl->wValue);
  358. if (addr > 127) {
  359. dev_dbg(dwc->dev, "invalid device address %d\n", addr);
  360. return -EINVAL;
  361. }
  362. if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
  363. dev_dbg(dwc->dev, "trying to set address when configured\n");
  364. return -EINVAL;
  365. }
  366. reg = dwc3_readl(dwc->regs, DWC3_DCFG);
  367. reg &= ~(DWC3_DCFG_DEVADDR_MASK);
  368. reg |= DWC3_DCFG_DEVADDR(addr);
  369. dwc3_writel(dwc->regs, DWC3_DCFG, reg);
  370. if (addr)
  371. dwc->dev_state = DWC3_ADDRESS_STATE;
  372. else
  373. dwc->dev_state = DWC3_DEFAULT_STATE;
  374. return 0;
  375. }
  376. static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  377. {
  378. int ret;
  379. spin_unlock(&dwc->lock);
  380. ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
  381. spin_lock(&dwc->lock);
  382. return ret;
  383. }
  384. static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  385. {
  386. u32 cfg;
  387. int ret;
  388. dwc->start_config_issued = false;
  389. cfg = le16_to_cpu(ctrl->wValue);
  390. switch (dwc->dev_state) {
  391. case DWC3_DEFAULT_STATE:
  392. return -EINVAL;
  393. break;
  394. case DWC3_ADDRESS_STATE:
  395. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  396. /* if the cfg matches and the cfg is non zero */
  397. if (!ret && cfg)
  398. dwc->dev_state = DWC3_CONFIGURED_STATE;
  399. break;
  400. case DWC3_CONFIGURED_STATE:
  401. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  402. if (!cfg)
  403. dwc->dev_state = DWC3_ADDRESS_STATE;
  404. break;
  405. default:
  406. ret = -EINVAL;
  407. }
  408. return ret;
  409. }
  410. static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
  411. {
  412. int ret;
  413. switch (ctrl->bRequest) {
  414. case USB_REQ_GET_STATUS:
  415. dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
  416. ret = dwc3_ep0_handle_status(dwc, ctrl);
  417. break;
  418. case USB_REQ_CLEAR_FEATURE:
  419. dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
  420. ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
  421. break;
  422. case USB_REQ_SET_FEATURE:
  423. dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
  424. ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
  425. break;
  426. case USB_REQ_SET_ADDRESS:
  427. dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
  428. ret = dwc3_ep0_set_address(dwc, ctrl);
  429. break;
  430. case USB_REQ_SET_CONFIGURATION:
  431. dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
  432. ret = dwc3_ep0_set_config(dwc, ctrl);
  433. break;
  434. default:
  435. dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
  436. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  437. break;
  438. };
  439. return ret;
  440. }
  441. static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
  442. const struct dwc3_event_depevt *event)
  443. {
  444. struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
  445. int ret;
  446. u32 len;
  447. if (!dwc->gadget_driver)
  448. goto err;
  449. len = le16_to_cpu(ctrl->wLength);
  450. if (!len) {
  451. dwc->three_stage_setup = false;
  452. dwc->ep0_expect_in = false;
  453. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  454. } else {
  455. dwc->three_stage_setup = true;
  456. dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
  457. dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
  458. }
  459. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
  460. ret = dwc3_ep0_std_request(dwc, ctrl);
  461. else
  462. ret = dwc3_ep0_delegate_req(dwc, ctrl);
  463. if (ret == USB_GADGET_DELAYED_STATUS)
  464. dwc->delayed_status = true;
  465. if (ret >= 0)
  466. return;
  467. err:
  468. dwc3_ep0_stall_and_restart(dwc);
  469. }
  470. static void dwc3_ep0_complete_data(struct dwc3 *dwc,
  471. const struct dwc3_event_depevt *event)
  472. {
  473. struct dwc3_request *r = NULL;
  474. struct usb_request *ur;
  475. struct dwc3_trb trb;
  476. struct dwc3_ep *ep0;
  477. u32 transferred;
  478. u8 epnum;
  479. epnum = event->endpoint_number;
  480. ep0 = dwc->eps[0];
  481. dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
  482. r = next_request(&ep0->request_list);
  483. ur = &r->request;
  484. dwc3_trb_to_nat(dwc->ep0_trb, &trb);
  485. if (dwc->ep0_bounced) {
  486. transferred = min_t(u32, ur->length,
  487. ep0->endpoint.maxpacket - trb.length);
  488. memcpy(ur->buf, dwc->ep0_bounce, transferred);
  489. dwc->ep0_bounced = false;
  490. } else {
  491. transferred = ur->length - trb.length;
  492. ur->actual += transferred;
  493. }
  494. if ((epnum & 1) && ur->actual < ur->length) {
  495. /* for some reason we did not get everything out */
  496. dwc3_ep0_stall_and_restart(dwc);
  497. } else {
  498. /*
  499. * handle the case where we have to send a zero packet. This
  500. * seems to be case when req.length > maxpacket. Could it be?
  501. */
  502. if (r)
  503. dwc3_gadget_giveback(ep0, r, 0);
  504. }
  505. }
  506. static void dwc3_ep0_complete_req(struct dwc3 *dwc,
  507. const struct dwc3_event_depevt *event)
  508. {
  509. struct dwc3_request *r;
  510. struct dwc3_ep *dep;
  511. dep = dwc->eps[0];
  512. if (!list_empty(&dep->request_list)) {
  513. r = next_request(&dep->request_list);
  514. dwc3_gadget_giveback(dep, r, 0);
  515. }
  516. dwc->ep0state = EP0_SETUP_PHASE;
  517. dwc3_ep0_out_start(dwc);
  518. }
  519. static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
  520. const struct dwc3_event_depevt *event)
  521. {
  522. struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
  523. dep->flags &= ~DWC3_EP_BUSY;
  524. dwc->setup_packet_pending = false;
  525. switch (dwc->ep0state) {
  526. case EP0_SETUP_PHASE:
  527. dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
  528. dwc3_ep0_inspect_setup(dwc, event);
  529. break;
  530. case EP0_DATA_PHASE:
  531. dev_vdbg(dwc->dev, "Data Phase\n");
  532. dwc3_ep0_complete_data(dwc, event);
  533. break;
  534. case EP0_STATUS_PHASE:
  535. dev_vdbg(dwc->dev, "Status Phase\n");
  536. dwc3_ep0_complete_req(dwc, event);
  537. break;
  538. default:
  539. WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
  540. }
  541. }
  542. static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
  543. const struct dwc3_event_depevt *event)
  544. {
  545. dwc3_ep0_out_start(dwc);
  546. }
  547. static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
  548. const struct dwc3_event_depevt *event)
  549. {
  550. struct dwc3_ep *dep;
  551. struct dwc3_request *req;
  552. int ret;
  553. dep = dwc->eps[0];
  554. if (list_empty(&dep->request_list)) {
  555. dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
  556. dep->flags |= DWC3_EP_PENDING_REQUEST;
  557. if (event->endpoint_number)
  558. dep->flags |= DWC3_EP0_DIR_IN;
  559. return;
  560. }
  561. req = next_request(&dep->request_list);
  562. req->direction = !!event->endpoint_number;
  563. if (req->request.length == 0) {
  564. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  565. dwc->ctrl_req_addr, 0,
  566. DWC3_TRBCTL_CONTROL_DATA);
  567. } else if ((req->request.length % dep->endpoint.maxpacket)
  568. && (event->endpoint_number == 0)) {
  569. dwc3_map_buffer_to_dma(req);
  570. WARN_ON(req->request.length > dep->endpoint.maxpacket);
  571. dwc->ep0_bounced = true;
  572. /*
  573. * REVISIT in case request length is bigger than EP0
  574. * wMaxPacketSize, we will need two chained TRBs to handle
  575. * the transfer.
  576. */
  577. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  578. dwc->ep0_bounce_addr, dep->endpoint.maxpacket,
  579. DWC3_TRBCTL_CONTROL_DATA);
  580. } else {
  581. dwc3_map_buffer_to_dma(req);
  582. ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
  583. req->request.dma, req->request.length,
  584. DWC3_TRBCTL_CONTROL_DATA);
  585. }
  586. WARN_ON(ret < 0);
  587. }
  588. static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
  589. {
  590. struct dwc3 *dwc = dep->dwc;
  591. u32 type;
  592. type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
  593. : DWC3_TRBCTL_CONTROL_STATUS2;
  594. return dwc3_ep0_start_trans(dwc, dep->number,
  595. dwc->ctrl_req_addr, 0, type);
  596. }
  597. static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
  598. {
  599. struct dwc3_ep *dep = dwc->eps[epnum];
  600. WARN_ON(dwc3_ep0_start_control_status(dep));
  601. }
  602. static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
  603. const struct dwc3_event_depevt *event)
  604. {
  605. dwc->setup_packet_pending = true;
  606. /*
  607. * This part is very tricky: If we has just handled
  608. * XferNotReady(Setup) and we're now expecting a
  609. * XferComplete but, instead, we receive another
  610. * XferNotReady(Setup), we should STALL and restart
  611. * the state machine.
  612. *
  613. * In all other cases, we just continue waiting
  614. * for the XferComplete event.
  615. *
  616. * We are a little bit unsafe here because we're
  617. * not trying to ensure that last event was, indeed,
  618. * XferNotReady(Setup).
  619. *
  620. * Still, we don't expect any condition where that
  621. * should happen and, even if it does, it would be
  622. * another error condition.
  623. */
  624. if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
  625. switch (event->status) {
  626. case DEPEVT_STATUS_CONTROL_SETUP:
  627. dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
  628. dwc3_ep0_stall_and_restart(dwc);
  629. break;
  630. case DEPEVT_STATUS_CONTROL_DATA:
  631. /* FALLTHROUGH */
  632. case DEPEVT_STATUS_CONTROL_STATUS:
  633. /* FALLTHROUGH */
  634. default:
  635. dev_vdbg(dwc->dev, "waiting for XferComplete\n");
  636. }
  637. return;
  638. }
  639. switch (event->status) {
  640. case DEPEVT_STATUS_CONTROL_SETUP:
  641. dev_vdbg(dwc->dev, "Control Setup\n");
  642. dwc->ep0state = EP0_SETUP_PHASE;
  643. dwc3_ep0_do_control_setup(dwc, event);
  644. break;
  645. case DEPEVT_STATUS_CONTROL_DATA:
  646. dev_vdbg(dwc->dev, "Control Data\n");
  647. dwc->ep0state = EP0_DATA_PHASE;
  648. if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
  649. dev_vdbg(dwc->dev, "Expected %d got %d\n",
  650. dwc->ep0_next_event,
  651. DWC3_EP0_NRDY_DATA);
  652. dwc3_ep0_stall_and_restart(dwc);
  653. return;
  654. }
  655. /*
  656. * One of the possible error cases is when Host _does_
  657. * request for Data Phase, but it does so on the wrong
  658. * direction.
  659. *
  660. * Here, we already know ep0_next_event is DATA (see above),
  661. * so we only need to check for direction.
  662. */
  663. if (dwc->ep0_expect_in != event->endpoint_number) {
  664. dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
  665. dwc3_ep0_stall_and_restart(dwc);
  666. return;
  667. }
  668. dwc3_ep0_do_control_data(dwc, event);
  669. break;
  670. case DEPEVT_STATUS_CONTROL_STATUS:
  671. dev_vdbg(dwc->dev, "Control Status\n");
  672. dwc->ep0state = EP0_STATUS_PHASE;
  673. if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
  674. dev_vdbg(dwc->dev, "Expected %d got %d\n",
  675. dwc->ep0_next_event,
  676. DWC3_EP0_NRDY_STATUS);
  677. dwc3_ep0_stall_and_restart(dwc);
  678. return;
  679. }
  680. if (dwc->delayed_status) {
  681. WARN_ON_ONCE(event->endpoint_number != 1);
  682. dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
  683. return;
  684. }
  685. dwc3_ep0_do_control_status(dwc, event->endpoint_number);
  686. }
  687. }
  688. void dwc3_ep0_interrupt(struct dwc3 *dwc,
  689. const struct dwc3_event_depevt *event)
  690. {
  691. u8 epnum = event->endpoint_number;
  692. dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
  693. dwc3_ep_event_string(event->endpoint_event),
  694. epnum >> 1, (epnum & 1) ? "in" : "out",
  695. dwc3_ep0_state_string(dwc->ep0state));
  696. switch (event->endpoint_event) {
  697. case DWC3_DEPEVT_XFERCOMPLETE:
  698. dwc3_ep0_xfer_complete(dwc, event);
  699. break;
  700. case DWC3_DEPEVT_XFERNOTREADY:
  701. dwc3_ep0_xfernotready(dwc, event);
  702. break;
  703. case DWC3_DEPEVT_XFERINPROGRESS:
  704. case DWC3_DEPEVT_RXTXFIFOEVT:
  705. case DWC3_DEPEVT_STREAMEVT:
  706. case DWC3_DEPEVT_EPCMDCMPLT:
  707. break;
  708. }
  709. }