xhci-mem.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/usb.h>
  23. #include <linux/pci.h>
  24. #include <linux/dmapool.h>
  25. #include "xhci.h"
  26. /*
  27. * Allocates a generic ring segment from the ring pool, sets the dma address,
  28. * initializes the segment to zero, and sets the private next pointer to NULL.
  29. *
  30. * Section 4.11.1.1:
  31. * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  32. */
  33. static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
  34. {
  35. struct xhci_segment *seg;
  36. dma_addr_t dma;
  37. seg = kzalloc(sizeof *seg, flags);
  38. if (!seg)
  39. return 0;
  40. xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
  41. seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
  42. if (!seg->trbs) {
  43. kfree(seg);
  44. return 0;
  45. }
  46. xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
  47. seg->trbs, (unsigned long long)dma);
  48. memset(seg->trbs, 0, SEGMENT_SIZE);
  49. seg->dma = dma;
  50. seg->next = NULL;
  51. return seg;
  52. }
  53. static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  54. {
  55. if (!seg)
  56. return;
  57. if (seg->trbs) {
  58. xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
  59. seg->trbs, (unsigned long long)seg->dma);
  60. dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
  61. seg->trbs = NULL;
  62. }
  63. xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
  64. kfree(seg);
  65. }
  66. /*
  67. * Make the prev segment point to the next segment.
  68. *
  69. * Change the last TRB in the prev segment to be a Link TRB which points to the
  70. * DMA address of the next segment. The caller needs to set any Link TRB
  71. * related flags, such as End TRB, Toggle Cycle, and no snoop.
  72. */
  73. static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
  74. struct xhci_segment *next, bool link_trbs)
  75. {
  76. u32 val;
  77. if (!prev || !next)
  78. return;
  79. prev->next = next;
  80. if (link_trbs) {
  81. prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
  82. /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
  83. val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
  84. val &= ~TRB_TYPE_BITMASK;
  85. val |= TRB_TYPE(TRB_LINK);
  86. prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
  87. }
  88. xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
  89. (unsigned long long)prev->dma,
  90. (unsigned long long)next->dma);
  91. }
  92. /* XXX: Do we need the hcd structure in all these functions? */
  93. void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
  94. {
  95. struct xhci_segment *seg;
  96. struct xhci_segment *first_seg;
  97. if (!ring || !ring->first_seg)
  98. return;
  99. first_seg = ring->first_seg;
  100. seg = first_seg->next;
  101. xhci_dbg(xhci, "Freeing ring at %p\n", ring);
  102. while (seg != first_seg) {
  103. struct xhci_segment *next = seg->next;
  104. xhci_segment_free(xhci, seg);
  105. seg = next;
  106. }
  107. xhci_segment_free(xhci, first_seg);
  108. ring->first_seg = NULL;
  109. kfree(ring);
  110. }
  111. /**
  112. * Create a new ring with zero or more segments.
  113. *
  114. * Link each segment together into a ring.
  115. * Set the end flag and the cycle toggle bit on the last segment.
  116. * See section 4.9.1 and figures 15 and 16.
  117. */
  118. static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
  119. unsigned int num_segs, bool link_trbs, gfp_t flags)
  120. {
  121. struct xhci_ring *ring;
  122. struct xhci_segment *prev;
  123. ring = kzalloc(sizeof *(ring), flags);
  124. xhci_dbg(xhci, "Allocating ring at %p\n", ring);
  125. if (!ring)
  126. return 0;
  127. INIT_LIST_HEAD(&ring->td_list);
  128. INIT_LIST_HEAD(&ring->cancelled_td_list);
  129. if (num_segs == 0)
  130. return ring;
  131. ring->first_seg = xhci_segment_alloc(xhci, flags);
  132. if (!ring->first_seg)
  133. goto fail;
  134. num_segs--;
  135. prev = ring->first_seg;
  136. while (num_segs > 0) {
  137. struct xhci_segment *next;
  138. next = xhci_segment_alloc(xhci, flags);
  139. if (!next)
  140. goto fail;
  141. xhci_link_segments(xhci, prev, next, link_trbs);
  142. prev = next;
  143. num_segs--;
  144. }
  145. xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
  146. if (link_trbs) {
  147. /* See section 4.9.2.1 and 6.4.4.1 */
  148. prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
  149. xhci_dbg(xhci, "Wrote link toggle flag to"
  150. " segment %p (virtual), 0x%llx (DMA)\n",
  151. prev, (unsigned long long)prev->dma);
  152. }
  153. /* The ring is empty, so the enqueue pointer == dequeue pointer */
  154. ring->enqueue = ring->first_seg->trbs;
  155. ring->enq_seg = ring->first_seg;
  156. ring->dequeue = ring->enqueue;
  157. ring->deq_seg = ring->first_seg;
  158. /* The ring is initialized to 0. The producer must write 1 to the cycle
  159. * bit to handover ownership of the TRB, so PCS = 1. The consumer must
  160. * compare CCS to the cycle bit to check ownership, so CCS = 1.
  161. */
  162. ring->cycle_state = 1;
  163. return ring;
  164. fail:
  165. xhci_ring_free(xhci, ring);
  166. return 0;
  167. }
  168. #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
  169. struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
  170. int type, gfp_t flags)
  171. {
  172. struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
  173. if (!ctx)
  174. return NULL;
  175. BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
  176. ctx->type = type;
  177. ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
  178. if (type == XHCI_CTX_TYPE_INPUT)
  179. ctx->size += CTX_SIZE(xhci->hcc_params);
  180. ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
  181. memset(ctx->bytes, 0, ctx->size);
  182. return ctx;
  183. }
  184. void xhci_free_container_ctx(struct xhci_hcd *xhci,
  185. struct xhci_container_ctx *ctx)
  186. {
  187. dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
  188. kfree(ctx);
  189. }
  190. struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
  191. struct xhci_container_ctx *ctx)
  192. {
  193. BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
  194. return (struct xhci_input_control_ctx *)ctx->bytes;
  195. }
  196. struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
  197. struct xhci_container_ctx *ctx)
  198. {
  199. if (ctx->type == XHCI_CTX_TYPE_DEVICE)
  200. return (struct xhci_slot_ctx *)ctx->bytes;
  201. return (struct xhci_slot_ctx *)
  202. (ctx->bytes + CTX_SIZE(xhci->hcc_params));
  203. }
  204. struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
  205. struct xhci_container_ctx *ctx,
  206. unsigned int ep_index)
  207. {
  208. /* increment ep index by offset of start of ep ctx array */
  209. ep_index++;
  210. if (ctx->type == XHCI_CTX_TYPE_INPUT)
  211. ep_index++;
  212. return (struct xhci_ep_ctx *)
  213. (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
  214. }
  215. /* All the xhci_tds in the ring's TD list should be freed at this point */
  216. void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
  217. {
  218. struct xhci_virt_device *dev;
  219. int i;
  220. /* Slot ID 0 is reserved */
  221. if (slot_id == 0 || !xhci->devs[slot_id])
  222. return;
  223. dev = xhci->devs[slot_id];
  224. xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
  225. if (!dev)
  226. return;
  227. for (i = 0; i < 31; ++i)
  228. if (dev->ep_rings[i])
  229. xhci_ring_free(xhci, dev->ep_rings[i]);
  230. if (dev->in_ctx)
  231. xhci_free_container_ctx(xhci, dev->in_ctx);
  232. if (dev->out_ctx)
  233. xhci_free_container_ctx(xhci, dev->out_ctx);
  234. kfree(xhci->devs[slot_id]);
  235. xhci->devs[slot_id] = 0;
  236. }
  237. int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
  238. struct usb_device *udev, gfp_t flags)
  239. {
  240. struct xhci_virt_device *dev;
  241. /* Slot ID 0 is reserved */
  242. if (slot_id == 0 || xhci->devs[slot_id]) {
  243. xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
  244. return 0;
  245. }
  246. xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
  247. if (!xhci->devs[slot_id])
  248. return 0;
  249. dev = xhci->devs[slot_id];
  250. /* Allocate the (output) device context that will be used in the HC. */
  251. dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
  252. if (!dev->out_ctx)
  253. goto fail;
  254. xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
  255. (unsigned long long)dev->out_ctx->dma);
  256. /* Allocate the (input) device context for address device command */
  257. dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
  258. if (!dev->in_ctx)
  259. goto fail;
  260. xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
  261. (unsigned long long)dev->in_ctx->dma);
  262. /* Allocate endpoint 0 ring */
  263. dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
  264. if (!dev->ep_rings[0])
  265. goto fail;
  266. init_completion(&dev->cmd_completion);
  267. /* Point to output device context in dcbaa. */
  268. xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
  269. xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
  270. slot_id,
  271. &xhci->dcbaa->dev_context_ptrs[slot_id],
  272. (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
  273. return 1;
  274. fail:
  275. xhci_free_virt_device(xhci, slot_id);
  276. return 0;
  277. }
  278. /* Setup an xHCI virtual device for a Set Address command */
  279. int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
  280. {
  281. struct xhci_virt_device *dev;
  282. struct xhci_ep_ctx *ep0_ctx;
  283. struct usb_device *top_dev;
  284. struct xhci_slot_ctx *slot_ctx;
  285. struct xhci_input_control_ctx *ctrl_ctx;
  286. dev = xhci->devs[udev->slot_id];
  287. /* Slot ID 0 is reserved */
  288. if (udev->slot_id == 0 || !dev) {
  289. xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
  290. udev->slot_id);
  291. return -EINVAL;
  292. }
  293. ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
  294. ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
  295. slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
  296. /* 2) New slot context and endpoint 0 context are valid*/
  297. ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
  298. /* 3) Only the control endpoint is valid - one endpoint context */
  299. slot_ctx->dev_info |= LAST_CTX(1);
  300. switch (udev->speed) {
  301. case USB_SPEED_SUPER:
  302. slot_ctx->dev_info |= (u32) udev->route;
  303. slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
  304. break;
  305. case USB_SPEED_HIGH:
  306. slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
  307. break;
  308. case USB_SPEED_FULL:
  309. slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
  310. break;
  311. case USB_SPEED_LOW:
  312. slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
  313. break;
  314. case USB_SPEED_VARIABLE:
  315. xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
  316. return -EINVAL;
  317. break;
  318. default:
  319. /* Speed was set earlier, this shouldn't happen. */
  320. BUG();
  321. }
  322. /* Find the root hub port this device is under */
  323. for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
  324. top_dev = top_dev->parent)
  325. /* Found device below root hub */;
  326. slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
  327. xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
  328. /* Is this a LS/FS device under a HS hub? */
  329. /*
  330. * FIXME: I don't think this is right, where does the TT info for the
  331. * roothub or parent hub come from?
  332. */
  333. if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
  334. udev->tt) {
  335. slot_ctx->tt_info = udev->tt->hub->slot_id;
  336. slot_ctx->tt_info |= udev->ttport << 8;
  337. }
  338. xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
  339. xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
  340. /* Step 4 - ring already allocated */
  341. /* Step 5 */
  342. ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
  343. /*
  344. * See section 4.3 bullet 6:
  345. * The default Max Packet size for ep0 is "8 bytes for a USB2
  346. * LS/FS/HS device or 512 bytes for a USB3 SS device"
  347. * XXX: Not sure about wireless USB devices.
  348. */
  349. if (udev->speed == USB_SPEED_SUPER)
  350. ep0_ctx->ep_info2 |= MAX_PACKET(512);
  351. else
  352. ep0_ctx->ep_info2 |= MAX_PACKET(8);
  353. /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
  354. ep0_ctx->ep_info2 |= MAX_BURST(0);
  355. ep0_ctx->ep_info2 |= ERROR_COUNT(3);
  356. ep0_ctx->deq =
  357. dev->ep_rings[0]->first_seg->dma;
  358. ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
  359. /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
  360. return 0;
  361. }
  362. /* Return the polling or NAK interval.
  363. *
  364. * The polling interval is expressed in "microframes". If xHCI's Interval field
  365. * is set to N, it will service the endpoint every 2^(Interval)*125us.
  366. *
  367. * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
  368. * is set to 0.
  369. */
  370. static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
  371. struct usb_host_endpoint *ep)
  372. {
  373. unsigned int interval = 0;
  374. switch (udev->speed) {
  375. case USB_SPEED_HIGH:
  376. /* Max NAK rate */
  377. if (usb_endpoint_xfer_control(&ep->desc) ||
  378. usb_endpoint_xfer_bulk(&ep->desc))
  379. interval = ep->desc.bInterval;
  380. /* Fall through - SS and HS isoc/int have same decoding */
  381. case USB_SPEED_SUPER:
  382. if (usb_endpoint_xfer_int(&ep->desc) ||
  383. usb_endpoint_xfer_isoc(&ep->desc)) {
  384. if (ep->desc.bInterval == 0)
  385. interval = 0;
  386. else
  387. interval = ep->desc.bInterval - 1;
  388. if (interval > 15)
  389. interval = 15;
  390. if (interval != ep->desc.bInterval + 1)
  391. dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
  392. ep->desc.bEndpointAddress, 1 << interval);
  393. }
  394. break;
  395. /* Convert bInterval (in 1-255 frames) to microframes and round down to
  396. * nearest power of 2.
  397. */
  398. case USB_SPEED_FULL:
  399. case USB_SPEED_LOW:
  400. if (usb_endpoint_xfer_int(&ep->desc) ||
  401. usb_endpoint_xfer_isoc(&ep->desc)) {
  402. interval = fls(8*ep->desc.bInterval) - 1;
  403. if (interval > 10)
  404. interval = 10;
  405. if (interval < 3)
  406. interval = 3;
  407. if ((1 << interval) != 8*ep->desc.bInterval)
  408. dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
  409. ep->desc.bEndpointAddress, 1 << interval);
  410. }
  411. break;
  412. default:
  413. BUG();
  414. }
  415. return EP_INTERVAL(interval);
  416. }
  417. static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
  418. struct usb_host_endpoint *ep)
  419. {
  420. int in;
  421. u32 type;
  422. in = usb_endpoint_dir_in(&ep->desc);
  423. if (usb_endpoint_xfer_control(&ep->desc)) {
  424. type = EP_TYPE(CTRL_EP);
  425. } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
  426. if (in)
  427. type = EP_TYPE(BULK_IN_EP);
  428. else
  429. type = EP_TYPE(BULK_OUT_EP);
  430. } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
  431. if (in)
  432. type = EP_TYPE(ISOC_IN_EP);
  433. else
  434. type = EP_TYPE(ISOC_OUT_EP);
  435. } else if (usb_endpoint_xfer_int(&ep->desc)) {
  436. if (in)
  437. type = EP_TYPE(INT_IN_EP);
  438. else
  439. type = EP_TYPE(INT_OUT_EP);
  440. } else {
  441. BUG();
  442. }
  443. return type;
  444. }
  445. int xhci_endpoint_init(struct xhci_hcd *xhci,
  446. struct xhci_virt_device *virt_dev,
  447. struct usb_device *udev,
  448. struct usb_host_endpoint *ep,
  449. gfp_t mem_flags)
  450. {
  451. unsigned int ep_index;
  452. struct xhci_ep_ctx *ep_ctx;
  453. struct xhci_ring *ep_ring;
  454. unsigned int max_packet;
  455. unsigned int max_burst;
  456. ep_index = xhci_get_endpoint_index(&ep->desc);
  457. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  458. /* Set up the endpoint ring */
  459. virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
  460. if (!virt_dev->new_ep_rings[ep_index])
  461. return -ENOMEM;
  462. ep_ring = virt_dev->new_ep_rings[ep_index];
  463. ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
  464. ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
  465. /* FIXME dig Mult and streams info out of ep companion desc */
  466. /* Allow 3 retries for everything but isoc;
  467. * error count = 0 means infinite retries.
  468. */
  469. if (!usb_endpoint_xfer_isoc(&ep->desc))
  470. ep_ctx->ep_info2 = ERROR_COUNT(3);
  471. else
  472. ep_ctx->ep_info2 = ERROR_COUNT(1);
  473. ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
  474. /* Set the max packet size and max burst */
  475. switch (udev->speed) {
  476. case USB_SPEED_SUPER:
  477. max_packet = ep->desc.wMaxPacketSize;
  478. ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
  479. /* dig out max burst from ep companion desc */
  480. if (!ep->ss_ep_comp) {
  481. xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
  482. max_packet = 0;
  483. } else {
  484. max_packet = ep->ss_ep_comp->desc.bMaxBurst;
  485. }
  486. ep_ctx->ep_info2 |= MAX_BURST(max_packet);
  487. break;
  488. case USB_SPEED_HIGH:
  489. /* bits 11:12 specify the number of additional transaction
  490. * opportunities per microframe (USB 2.0, section 9.6.6)
  491. */
  492. if (usb_endpoint_xfer_isoc(&ep->desc) ||
  493. usb_endpoint_xfer_int(&ep->desc)) {
  494. max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
  495. ep_ctx->ep_info2 |= MAX_BURST(max_burst);
  496. }
  497. /* Fall through */
  498. case USB_SPEED_FULL:
  499. case USB_SPEED_LOW:
  500. max_packet = ep->desc.wMaxPacketSize & 0x3ff;
  501. ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
  502. break;
  503. default:
  504. BUG();
  505. }
  506. /* FIXME Debug endpoint context */
  507. return 0;
  508. }
  509. void xhci_endpoint_zero(struct xhci_hcd *xhci,
  510. struct xhci_virt_device *virt_dev,
  511. struct usb_host_endpoint *ep)
  512. {
  513. unsigned int ep_index;
  514. struct xhci_ep_ctx *ep_ctx;
  515. ep_index = xhci_get_endpoint_index(&ep->desc);
  516. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  517. ep_ctx->ep_info = 0;
  518. ep_ctx->ep_info2 = 0;
  519. ep_ctx->deq = 0;
  520. ep_ctx->tx_info = 0;
  521. /* Don't free the endpoint ring until the set interface or configuration
  522. * request succeeds.
  523. */
  524. }
  525. /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
  526. static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
  527. {
  528. int i;
  529. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  530. int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  531. xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
  532. if (!num_sp)
  533. return 0;
  534. xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
  535. if (!xhci->scratchpad)
  536. goto fail_sp;
  537. xhci->scratchpad->sp_array =
  538. pci_alloc_consistent(to_pci_dev(dev),
  539. num_sp * sizeof(u64),
  540. &xhci->scratchpad->sp_dma);
  541. if (!xhci->scratchpad->sp_array)
  542. goto fail_sp2;
  543. xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
  544. if (!xhci->scratchpad->sp_buffers)
  545. goto fail_sp3;
  546. xhci->scratchpad->sp_dma_buffers =
  547. kzalloc(sizeof(dma_addr_t) * num_sp, flags);
  548. if (!xhci->scratchpad->sp_dma_buffers)
  549. goto fail_sp4;
  550. xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
  551. for (i = 0; i < num_sp; i++) {
  552. dma_addr_t dma;
  553. void *buf = pci_alloc_consistent(to_pci_dev(dev),
  554. xhci->page_size, &dma);
  555. if (!buf)
  556. goto fail_sp5;
  557. xhci->scratchpad->sp_array[i] = dma;
  558. xhci->scratchpad->sp_buffers[i] = buf;
  559. xhci->scratchpad->sp_dma_buffers[i] = dma;
  560. }
  561. return 0;
  562. fail_sp5:
  563. for (i = i - 1; i >= 0; i--) {
  564. pci_free_consistent(to_pci_dev(dev), xhci->page_size,
  565. xhci->scratchpad->sp_buffers[i],
  566. xhci->scratchpad->sp_dma_buffers[i]);
  567. }
  568. kfree(xhci->scratchpad->sp_dma_buffers);
  569. fail_sp4:
  570. kfree(xhci->scratchpad->sp_buffers);
  571. fail_sp3:
  572. pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
  573. xhci->scratchpad->sp_array,
  574. xhci->scratchpad->sp_dma);
  575. fail_sp2:
  576. kfree(xhci->scratchpad);
  577. xhci->scratchpad = NULL;
  578. fail_sp:
  579. return -ENOMEM;
  580. }
  581. static void scratchpad_free(struct xhci_hcd *xhci)
  582. {
  583. int num_sp;
  584. int i;
  585. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  586. if (!xhci->scratchpad)
  587. return;
  588. num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  589. for (i = 0; i < num_sp; i++) {
  590. pci_free_consistent(pdev, xhci->page_size,
  591. xhci->scratchpad->sp_buffers[i],
  592. xhci->scratchpad->sp_dma_buffers[i]);
  593. }
  594. kfree(xhci->scratchpad->sp_dma_buffers);
  595. kfree(xhci->scratchpad->sp_buffers);
  596. pci_free_consistent(pdev, num_sp * sizeof(u64),
  597. xhci->scratchpad->sp_array,
  598. xhci->scratchpad->sp_dma);
  599. kfree(xhci->scratchpad);
  600. xhci->scratchpad = NULL;
  601. }
  602. void xhci_mem_cleanup(struct xhci_hcd *xhci)
  603. {
  604. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  605. int size;
  606. int i;
  607. /* Free the Event Ring Segment Table and the actual Event Ring */
  608. xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
  609. xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
  610. xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
  611. size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
  612. if (xhci->erst.entries)
  613. pci_free_consistent(pdev, size,
  614. xhci->erst.entries, xhci->erst.erst_dma_addr);
  615. xhci->erst.entries = NULL;
  616. xhci_dbg(xhci, "Freed ERST\n");
  617. if (xhci->event_ring)
  618. xhci_ring_free(xhci, xhci->event_ring);
  619. xhci->event_ring = NULL;
  620. xhci_dbg(xhci, "Freed event ring\n");
  621. xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
  622. if (xhci->cmd_ring)
  623. xhci_ring_free(xhci, xhci->cmd_ring);
  624. xhci->cmd_ring = NULL;
  625. xhci_dbg(xhci, "Freed command ring\n");
  626. for (i = 1; i < MAX_HC_SLOTS; ++i)
  627. xhci_free_virt_device(xhci, i);
  628. if (xhci->segment_pool)
  629. dma_pool_destroy(xhci->segment_pool);
  630. xhci->segment_pool = NULL;
  631. xhci_dbg(xhci, "Freed segment pool\n");
  632. if (xhci->device_pool)
  633. dma_pool_destroy(xhci->device_pool);
  634. xhci->device_pool = NULL;
  635. xhci_dbg(xhci, "Freed device context pool\n");
  636. xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
  637. if (xhci->dcbaa)
  638. pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
  639. xhci->dcbaa, xhci->dcbaa->dma);
  640. xhci->dcbaa = NULL;
  641. xhci->page_size = 0;
  642. xhci->page_shift = 0;
  643. scratchpad_free(xhci);
  644. }
  645. int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
  646. {
  647. dma_addr_t dma;
  648. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  649. unsigned int val, val2;
  650. u64 val_64;
  651. struct xhci_segment *seg;
  652. u32 page_size;
  653. int i;
  654. page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
  655. xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
  656. for (i = 0; i < 16; i++) {
  657. if ((0x1 & page_size) != 0)
  658. break;
  659. page_size = page_size >> 1;
  660. }
  661. if (i < 16)
  662. xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
  663. else
  664. xhci_warn(xhci, "WARN: no supported page size\n");
  665. /* Use 4K pages, since that's common and the minimum the HC supports */
  666. xhci->page_shift = 12;
  667. xhci->page_size = 1 << xhci->page_shift;
  668. xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
  669. /*
  670. * Program the Number of Device Slots Enabled field in the CONFIG
  671. * register with the max value of slots the HC can handle.
  672. */
  673. val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
  674. xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
  675. (unsigned int) val);
  676. val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
  677. val |= (val2 & ~HCS_SLOTS_MASK);
  678. xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
  679. (unsigned int) val);
  680. xhci_writel(xhci, val, &xhci->op_regs->config_reg);
  681. /*
  682. * Section 5.4.8 - doorbell array must be
  683. * "physically contiguous and 64-byte (cache line) aligned".
  684. */
  685. xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
  686. sizeof(*xhci->dcbaa), &dma);
  687. if (!xhci->dcbaa)
  688. goto fail;
  689. memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
  690. xhci->dcbaa->dma = dma;
  691. xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
  692. (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
  693. xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
  694. /*
  695. * Initialize the ring segment pool. The ring must be a contiguous
  696. * structure comprised of TRBs. The TRBs must be 16 byte aligned,
  697. * however, the command ring segment needs 64-byte aligned segments,
  698. * so we pick the greater alignment need.
  699. */
  700. xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
  701. SEGMENT_SIZE, 64, xhci->page_size);
  702. /* See Table 46 and Note on Figure 55 */
  703. xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
  704. 2112, 64, xhci->page_size);
  705. if (!xhci->segment_pool || !xhci->device_pool)
  706. goto fail;
  707. /* Set up the command ring to have one segments for now. */
  708. xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
  709. if (!xhci->cmd_ring)
  710. goto fail;
  711. xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
  712. xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
  713. (unsigned long long)xhci->cmd_ring->first_seg->dma);
  714. /* Set the address in the Command Ring Control register */
  715. val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  716. val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
  717. (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
  718. xhci->cmd_ring->cycle_state;
  719. xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
  720. xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
  721. xhci_dbg_cmd_ptrs(xhci);
  722. val = xhci_readl(xhci, &xhci->cap_regs->db_off);
  723. val &= DBOFF_MASK;
  724. xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
  725. " from cap regs base addr\n", val);
  726. xhci->dba = (void *) xhci->cap_regs + val;
  727. xhci_dbg_regs(xhci);
  728. xhci_print_run_regs(xhci);
  729. /* Set ir_set to interrupt register set 0 */
  730. xhci->ir_set = (void *) xhci->run_regs->ir_set;
  731. /*
  732. * Event ring setup: Allocate a normal ring, but also setup
  733. * the event ring segment table (ERST). Section 4.9.3.
  734. */
  735. xhci_dbg(xhci, "// Allocating event ring\n");
  736. xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
  737. if (!xhci->event_ring)
  738. goto fail;
  739. xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
  740. sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
  741. if (!xhci->erst.entries)
  742. goto fail;
  743. xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
  744. (unsigned long long)dma);
  745. memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
  746. xhci->erst.num_entries = ERST_NUM_SEGS;
  747. xhci->erst.erst_dma_addr = dma;
  748. xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
  749. xhci->erst.num_entries,
  750. xhci->erst.entries,
  751. (unsigned long long)xhci->erst.erst_dma_addr);
  752. /* set ring base address and size for each segment table entry */
  753. for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
  754. struct xhci_erst_entry *entry = &xhci->erst.entries[val];
  755. entry->seg_addr = seg->dma;
  756. entry->seg_size = TRBS_PER_SEGMENT;
  757. entry->rsvd = 0;
  758. seg = seg->next;
  759. }
  760. /* set ERST count with the number of entries in the segment table */
  761. val = xhci_readl(xhci, &xhci->ir_set->erst_size);
  762. val &= ERST_SIZE_MASK;
  763. val |= ERST_NUM_SEGS;
  764. xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
  765. val);
  766. xhci_writel(xhci, val, &xhci->ir_set->erst_size);
  767. xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
  768. /* set the segment table base address */
  769. xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
  770. (unsigned long long)xhci->erst.erst_dma_addr);
  771. val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
  772. val_64 &= ERST_PTR_MASK;
  773. val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
  774. xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
  775. /* Set the event ring dequeue address */
  776. xhci_set_hc_event_deq(xhci);
  777. xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
  778. xhci_print_ir_set(xhci, xhci->ir_set, 0);
  779. /*
  780. * XXX: Might need to set the Interrupter Moderation Register to
  781. * something other than the default (~1ms minimum between interrupts).
  782. * See section 5.5.1.2.
  783. */
  784. init_completion(&xhci->addr_dev);
  785. for (i = 0; i < MAX_HC_SLOTS; ++i)
  786. xhci->devs[i] = 0;
  787. if (scratchpad_alloc(xhci, flags))
  788. goto fail;
  789. return 0;
  790. fail:
  791. xhci_warn(xhci, "Couldn't initialize memory\n");
  792. xhci_mem_cleanup(xhci);
  793. return -ENOMEM;
  794. }