xhci-mem.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/usb.h>
  23. #include <linux/pci.h>
  24. #include <linux/slab.h>
  25. #include <linux/dmapool.h>
  26. #include <linux/dma-mapping.h>
  27. #include "xhci.h"
  28. #include "xhci-trace.h"
  29. /*
  30. * Allocates a generic ring segment from the ring pool, sets the dma address,
  31. * initializes the segment to zero, and sets the private next pointer to NULL.
  32. *
  33. * Section 4.11.1.1:
  34. * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  35. */
  36. static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
  37. unsigned int cycle_state, gfp_t flags)
  38. {
  39. struct xhci_segment *seg;
  40. dma_addr_t dma;
  41. int i;
  42. seg = kzalloc(sizeof *seg, flags);
  43. if (!seg)
  44. return NULL;
  45. seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
  46. if (!seg->trbs) {
  47. kfree(seg);
  48. return NULL;
  49. }
  50. memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
  51. /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
  52. if (cycle_state == 0) {
  53. for (i = 0; i < TRBS_PER_SEGMENT; i++)
  54. seg->trbs[i].link.control |= TRB_CYCLE;
  55. }
  56. seg->dma = dma;
  57. seg->next = NULL;
  58. return seg;
  59. }
  60. static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  61. {
  62. if (seg->trbs) {
  63. dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
  64. seg->trbs = NULL;
  65. }
  66. kfree(seg);
  67. }
  68. static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
  69. struct xhci_segment *first)
  70. {
  71. struct xhci_segment *seg;
  72. seg = first->next;
  73. while (seg != first) {
  74. struct xhci_segment *next = seg->next;
  75. xhci_segment_free(xhci, seg);
  76. seg = next;
  77. }
  78. xhci_segment_free(xhci, first);
  79. }
  80. /*
  81. * Make the prev segment point to the next segment.
  82. *
  83. * Change the last TRB in the prev segment to be a Link TRB which points to the
  84. * DMA address of the next segment. The caller needs to set any Link TRB
  85. * related flags, such as End TRB, Toggle Cycle, and no snoop.
  86. */
  87. static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
  88. struct xhci_segment *next, enum xhci_ring_type type)
  89. {
  90. u32 val;
  91. if (!prev || !next)
  92. return;
  93. prev->next = next;
  94. if (type != TYPE_EVENT) {
  95. prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
  96. cpu_to_le64(next->dma);
  97. /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
  98. val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
  99. val &= ~TRB_TYPE_BITMASK;
  100. val |= TRB_TYPE(TRB_LINK);
  101. /* Always set the chain bit with 0.95 hardware */
  102. /* Set chain bit for isoc rings on AMD 0.96 host */
  103. if (xhci_link_trb_quirk(xhci) ||
  104. (type == TYPE_ISOC &&
  105. (xhci->quirks & XHCI_AMD_0x96_HOST)))
  106. val |= TRB_CHAIN;
  107. prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
  108. }
  109. }
  110. /*
  111. * Link the ring to the new segments.
  112. * Set Toggle Cycle for the new ring if needed.
  113. */
  114. static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
  115. struct xhci_segment *first, struct xhci_segment *last,
  116. unsigned int num_segs)
  117. {
  118. struct xhci_segment *next;
  119. if (!ring || !first || !last)
  120. return;
  121. next = ring->enq_seg->next;
  122. xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
  123. xhci_link_segments(xhci, last, next, ring->type);
  124. ring->num_segs += num_segs;
  125. ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
  126. if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
  127. ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
  128. &= ~cpu_to_le32(LINK_TOGGLE);
  129. last->trbs[TRBS_PER_SEGMENT-1].link.control
  130. |= cpu_to_le32(LINK_TOGGLE);
  131. ring->last_seg = last;
  132. }
  133. }
  134. /* XXX: Do we need the hcd structure in all these functions? */
  135. void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
  136. {
  137. if (!ring)
  138. return;
  139. if (ring->first_seg)
  140. xhci_free_segments_for_ring(xhci, ring->first_seg);
  141. kfree(ring);
  142. }
  143. static void xhci_initialize_ring_info(struct xhci_ring *ring,
  144. unsigned int cycle_state)
  145. {
  146. /* The ring is empty, so the enqueue pointer == dequeue pointer */
  147. ring->enqueue = ring->first_seg->trbs;
  148. ring->enq_seg = ring->first_seg;
  149. ring->dequeue = ring->enqueue;
  150. ring->deq_seg = ring->first_seg;
  151. /* The ring is initialized to 0. The producer must write 1 to the cycle
  152. * bit to handover ownership of the TRB, so PCS = 1. The consumer must
  153. * compare CCS to the cycle bit to check ownership, so CCS = 1.
  154. *
  155. * New rings are initialized with cycle state equal to 1; if we are
  156. * handling ring expansion, set the cycle state equal to the old ring.
  157. */
  158. ring->cycle_state = cycle_state;
  159. /* Not necessary for new rings, but needed for re-initialized rings */
  160. ring->enq_updates = 0;
  161. ring->deq_updates = 0;
  162. /*
  163. * Each segment has a link TRB, and leave an extra TRB for SW
  164. * accounting purpose
  165. */
  166. ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
  167. }
  168. /* Allocate segments and link them for a ring */
  169. static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
  170. struct xhci_segment **first, struct xhci_segment **last,
  171. unsigned int num_segs, unsigned int cycle_state,
  172. enum xhci_ring_type type, gfp_t flags)
  173. {
  174. struct xhci_segment *prev;
  175. prev = xhci_segment_alloc(xhci, cycle_state, flags);
  176. if (!prev)
  177. return -ENOMEM;
  178. num_segs--;
  179. *first = prev;
  180. while (num_segs > 0) {
  181. struct xhci_segment *next;
  182. next = xhci_segment_alloc(xhci, cycle_state, flags);
  183. if (!next) {
  184. prev = *first;
  185. while (prev) {
  186. next = prev->next;
  187. xhci_segment_free(xhci, prev);
  188. prev = next;
  189. }
  190. return -ENOMEM;
  191. }
  192. xhci_link_segments(xhci, prev, next, type);
  193. prev = next;
  194. num_segs--;
  195. }
  196. xhci_link_segments(xhci, prev, *first, type);
  197. *last = prev;
  198. return 0;
  199. }
  200. /**
  201. * Create a new ring with zero or more segments.
  202. *
  203. * Link each segment together into a ring.
  204. * Set the end flag and the cycle toggle bit on the last segment.
  205. * See section 4.9.1 and figures 15 and 16.
  206. */
  207. static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
  208. unsigned int num_segs, unsigned int cycle_state,
  209. enum xhci_ring_type type, gfp_t flags)
  210. {
  211. struct xhci_ring *ring;
  212. int ret;
  213. ring = kzalloc(sizeof *(ring), flags);
  214. if (!ring)
  215. return NULL;
  216. ring->num_segs = num_segs;
  217. INIT_LIST_HEAD(&ring->td_list);
  218. ring->type = type;
  219. if (num_segs == 0)
  220. return ring;
  221. ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
  222. &ring->last_seg, num_segs, cycle_state, type, flags);
  223. if (ret)
  224. goto fail;
  225. /* Only event ring does not use link TRB */
  226. if (type != TYPE_EVENT) {
  227. /* See section 4.9.2.1 and 6.4.4.1 */
  228. ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
  229. cpu_to_le32(LINK_TOGGLE);
  230. }
  231. xhci_initialize_ring_info(ring, cycle_state);
  232. return ring;
  233. fail:
  234. kfree(ring);
  235. return NULL;
  236. }
  237. void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
  238. struct xhci_virt_device *virt_dev,
  239. unsigned int ep_index)
  240. {
  241. int rings_cached;
  242. rings_cached = virt_dev->num_rings_cached;
  243. if (rings_cached < XHCI_MAX_RINGS_CACHED) {
  244. virt_dev->ring_cache[rings_cached] =
  245. virt_dev->eps[ep_index].ring;
  246. virt_dev->num_rings_cached++;
  247. xhci_dbg(xhci, "Cached old ring, "
  248. "%d ring%s cached\n",
  249. virt_dev->num_rings_cached,
  250. (virt_dev->num_rings_cached > 1) ? "s" : "");
  251. } else {
  252. xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
  253. xhci_dbg(xhci, "Ring cache full (%d rings), "
  254. "freeing ring\n",
  255. virt_dev->num_rings_cached);
  256. }
  257. virt_dev->eps[ep_index].ring = NULL;
  258. }
  259. /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
  260. * pointers to the beginning of the ring.
  261. */
  262. static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
  263. struct xhci_ring *ring, unsigned int cycle_state,
  264. enum xhci_ring_type type)
  265. {
  266. struct xhci_segment *seg = ring->first_seg;
  267. int i;
  268. do {
  269. memset(seg->trbs, 0,
  270. sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
  271. if (cycle_state == 0) {
  272. for (i = 0; i < TRBS_PER_SEGMENT; i++)
  273. seg->trbs[i].link.control |= TRB_CYCLE;
  274. }
  275. /* All endpoint rings have link TRBs */
  276. xhci_link_segments(xhci, seg, seg->next, type);
  277. seg = seg->next;
  278. } while (seg != ring->first_seg);
  279. ring->type = type;
  280. xhci_initialize_ring_info(ring, cycle_state);
  281. /* td list should be empty since all URBs have been cancelled,
  282. * but just in case...
  283. */
  284. INIT_LIST_HEAD(&ring->td_list);
  285. }
  286. /*
  287. * Expand an existing ring.
  288. * Look for a cached ring or allocate a new ring which has same segment numbers
  289. * and link the two rings.
  290. */
  291. int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
  292. unsigned int num_trbs, gfp_t flags)
  293. {
  294. struct xhci_segment *first;
  295. struct xhci_segment *last;
  296. unsigned int num_segs;
  297. unsigned int num_segs_needed;
  298. int ret;
  299. num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
  300. (TRBS_PER_SEGMENT - 1);
  301. /* Allocate number of segments we needed, or double the ring size */
  302. num_segs = ring->num_segs > num_segs_needed ?
  303. ring->num_segs : num_segs_needed;
  304. ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
  305. num_segs, ring->cycle_state, ring->type, flags);
  306. if (ret)
  307. return -ENOMEM;
  308. xhci_link_rings(xhci, ring, first, last, num_segs);
  309. xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
  310. "ring expansion succeed, now has %d segments",
  311. ring->num_segs);
  312. return 0;
  313. }
  314. #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
  315. static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
  316. int type, gfp_t flags)
  317. {
  318. struct xhci_container_ctx *ctx;
  319. if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
  320. return NULL;
  321. ctx = kzalloc(sizeof(*ctx), flags);
  322. if (!ctx)
  323. return NULL;
  324. ctx->type = type;
  325. ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
  326. if (type == XHCI_CTX_TYPE_INPUT)
  327. ctx->size += CTX_SIZE(xhci->hcc_params);
  328. ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
  329. if (!ctx->bytes) {
  330. kfree(ctx);
  331. return NULL;
  332. }
  333. memset(ctx->bytes, 0, ctx->size);
  334. return ctx;
  335. }
  336. static void xhci_free_container_ctx(struct xhci_hcd *xhci,
  337. struct xhci_container_ctx *ctx)
  338. {
  339. if (!ctx)
  340. return;
  341. dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
  342. kfree(ctx);
  343. }
  344. struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
  345. struct xhci_container_ctx *ctx)
  346. {
  347. if (ctx->type != XHCI_CTX_TYPE_INPUT)
  348. return NULL;
  349. return (struct xhci_input_control_ctx *)ctx->bytes;
  350. }
  351. struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
  352. struct xhci_container_ctx *ctx)
  353. {
  354. if (ctx->type == XHCI_CTX_TYPE_DEVICE)
  355. return (struct xhci_slot_ctx *)ctx->bytes;
  356. return (struct xhci_slot_ctx *)
  357. (ctx->bytes + CTX_SIZE(xhci->hcc_params));
  358. }
  359. struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
  360. struct xhci_container_ctx *ctx,
  361. unsigned int ep_index)
  362. {
  363. /* increment ep index by offset of start of ep ctx array */
  364. ep_index++;
  365. if (ctx->type == XHCI_CTX_TYPE_INPUT)
  366. ep_index++;
  367. return (struct xhci_ep_ctx *)
  368. (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
  369. }
  370. /***************** Streams structures manipulation *************************/
  371. static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
  372. unsigned int num_stream_ctxs,
  373. struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
  374. {
  375. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  376. if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
  377. dma_free_coherent(&pdev->dev,
  378. sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
  379. stream_ctx, dma);
  380. else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
  381. return dma_pool_free(xhci->small_streams_pool,
  382. stream_ctx, dma);
  383. else
  384. return dma_pool_free(xhci->medium_streams_pool,
  385. stream_ctx, dma);
  386. }
  387. /*
  388. * The stream context array for each endpoint with bulk streams enabled can
  389. * vary in size, based on:
  390. * - how many streams the endpoint supports,
  391. * - the maximum primary stream array size the host controller supports,
  392. * - and how many streams the device driver asks for.
  393. *
  394. * The stream context array must be a power of 2, and can be as small as
  395. * 64 bytes or as large as 1MB.
  396. */
  397. static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
  398. unsigned int num_stream_ctxs, dma_addr_t *dma,
  399. gfp_t mem_flags)
  400. {
  401. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  402. if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
  403. return dma_alloc_coherent(&pdev->dev,
  404. sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
  405. dma, mem_flags);
  406. else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
  407. return dma_pool_alloc(xhci->small_streams_pool,
  408. mem_flags, dma);
  409. else
  410. return dma_pool_alloc(xhci->medium_streams_pool,
  411. mem_flags, dma);
  412. }
  413. struct xhci_ring *xhci_dma_to_transfer_ring(
  414. struct xhci_virt_ep *ep,
  415. u64 address)
  416. {
  417. if (ep->ep_state & EP_HAS_STREAMS)
  418. return radix_tree_lookup(&ep->stream_info->trb_address_map,
  419. address >> TRB_SEGMENT_SHIFT);
  420. return ep->ring;
  421. }
  422. struct xhci_ring *xhci_stream_id_to_ring(
  423. struct xhci_virt_device *dev,
  424. unsigned int ep_index,
  425. unsigned int stream_id)
  426. {
  427. struct xhci_virt_ep *ep = &dev->eps[ep_index];
  428. if (stream_id == 0)
  429. return ep->ring;
  430. if (!ep->stream_info)
  431. return NULL;
  432. if (stream_id > ep->stream_info->num_streams)
  433. return NULL;
  434. return ep->stream_info->stream_rings[stream_id];
  435. }
  436. /*
  437. * Change an endpoint's internal structure so it supports stream IDs. The
  438. * number of requested streams includes stream 0, which cannot be used by device
  439. * drivers.
  440. *
  441. * The number of stream contexts in the stream context array may be bigger than
  442. * the number of streams the driver wants to use. This is because the number of
  443. * stream context array entries must be a power of two.
  444. *
  445. * We need a radix tree for mapping physical addresses of TRBs to which stream
  446. * ID they belong to. We need to do this because the host controller won't tell
  447. * us which stream ring the TRB came from. We could store the stream ID in an
  448. * event data TRB, but that doesn't help us for the cancellation case, since the
  449. * endpoint may stop before it reaches that event data TRB.
  450. *
  451. * The radix tree maps the upper portion of the TRB DMA address to a ring
  452. * segment that has the same upper portion of DMA addresses. For example, say I
  453. * have segments of size 1KB, that are always 64-byte aligned. A segment may
  454. * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
  455. * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
  456. * pass the radix tree a key to get the right stream ID:
  457. *
  458. * 0x10c90fff >> 10 = 0x43243
  459. * 0x10c912c0 >> 10 = 0x43244
  460. * 0x10c91400 >> 10 = 0x43245
  461. *
  462. * Obviously, only those TRBs with DMA addresses that are within the segment
  463. * will make the radix tree return the stream ID for that ring.
  464. *
  465. * Caveats for the radix tree:
  466. *
  467. * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
  468. * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
  469. * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
  470. * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
  471. * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
  472. * extended systems (where the DMA address can be bigger than 32-bits),
  473. * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
  474. */
  475. struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
  476. unsigned int num_stream_ctxs,
  477. unsigned int num_streams, gfp_t mem_flags)
  478. {
  479. struct xhci_stream_info *stream_info;
  480. u32 cur_stream;
  481. struct xhci_ring *cur_ring;
  482. unsigned long key;
  483. u64 addr;
  484. int ret;
  485. xhci_dbg(xhci, "Allocating %u streams and %u "
  486. "stream context array entries.\n",
  487. num_streams, num_stream_ctxs);
  488. if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
  489. xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
  490. return NULL;
  491. }
  492. xhci->cmd_ring_reserved_trbs++;
  493. stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
  494. if (!stream_info)
  495. goto cleanup_trbs;
  496. stream_info->num_streams = num_streams;
  497. stream_info->num_stream_ctxs = num_stream_ctxs;
  498. /* Initialize the array of virtual pointers to stream rings. */
  499. stream_info->stream_rings = kzalloc(
  500. sizeof(struct xhci_ring *)*num_streams,
  501. mem_flags);
  502. if (!stream_info->stream_rings)
  503. goto cleanup_info;
  504. /* Initialize the array of DMA addresses for stream rings for the HW. */
  505. stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
  506. num_stream_ctxs, &stream_info->ctx_array_dma,
  507. mem_flags);
  508. if (!stream_info->stream_ctx_array)
  509. goto cleanup_ctx;
  510. memset(stream_info->stream_ctx_array, 0,
  511. sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
  512. /* Allocate everything needed to free the stream rings later */
  513. stream_info->free_streams_command =
  514. xhci_alloc_command(xhci, true, true, mem_flags);
  515. if (!stream_info->free_streams_command)
  516. goto cleanup_ctx;
  517. INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
  518. /* Allocate rings for all the streams that the driver will use,
  519. * and add their segment DMA addresses to the radix tree.
  520. * Stream 0 is reserved.
  521. */
  522. for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
  523. stream_info->stream_rings[cur_stream] =
  524. xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
  525. cur_ring = stream_info->stream_rings[cur_stream];
  526. if (!cur_ring)
  527. goto cleanup_rings;
  528. cur_ring->stream_id = cur_stream;
  529. /* Set deq ptr, cycle bit, and stream context type */
  530. addr = cur_ring->first_seg->dma |
  531. SCT_FOR_CTX(SCT_PRI_TR) |
  532. cur_ring->cycle_state;
  533. stream_info->stream_ctx_array[cur_stream].stream_ring =
  534. cpu_to_le64(addr);
  535. xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
  536. cur_stream, (unsigned long long) addr);
  537. key = (unsigned long)
  538. (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
  539. ret = radix_tree_insert(&stream_info->trb_address_map,
  540. key, cur_ring);
  541. if (ret) {
  542. xhci_ring_free(xhci, cur_ring);
  543. stream_info->stream_rings[cur_stream] = NULL;
  544. goto cleanup_rings;
  545. }
  546. }
  547. /* Leave the other unused stream ring pointers in the stream context
  548. * array initialized to zero. This will cause the xHC to give us an
  549. * error if the device asks for a stream ID we don't have setup (if it
  550. * was any other way, the host controller would assume the ring is
  551. * "empty" and wait forever for data to be queued to that stream ID).
  552. */
  553. return stream_info;
  554. cleanup_rings:
  555. for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
  556. cur_ring = stream_info->stream_rings[cur_stream];
  557. if (cur_ring) {
  558. addr = cur_ring->first_seg->dma;
  559. radix_tree_delete(&stream_info->trb_address_map,
  560. addr >> TRB_SEGMENT_SHIFT);
  561. xhci_ring_free(xhci, cur_ring);
  562. stream_info->stream_rings[cur_stream] = NULL;
  563. }
  564. }
  565. xhci_free_command(xhci, stream_info->free_streams_command);
  566. cleanup_ctx:
  567. kfree(stream_info->stream_rings);
  568. cleanup_info:
  569. kfree(stream_info);
  570. cleanup_trbs:
  571. xhci->cmd_ring_reserved_trbs--;
  572. return NULL;
  573. }
  574. /*
  575. * Sets the MaxPStreams field and the Linear Stream Array field.
  576. * Sets the dequeue pointer to the stream context array.
  577. */
  578. void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
  579. struct xhci_ep_ctx *ep_ctx,
  580. struct xhci_stream_info *stream_info)
  581. {
  582. u32 max_primary_streams;
  583. /* MaxPStreams is the number of stream context array entries, not the
  584. * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
  585. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
  586. */
  587. max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
  588. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  589. "Setting number of stream ctx array entries to %u",
  590. 1 << (max_primary_streams + 1));
  591. ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
  592. ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
  593. | EP_HAS_LSA);
  594. ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
  595. }
  596. /*
  597. * Sets the MaxPStreams field and the Linear Stream Array field to 0.
  598. * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
  599. * not at the beginning of the ring).
  600. */
  601. void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
  602. struct xhci_ep_ctx *ep_ctx,
  603. struct xhci_virt_ep *ep)
  604. {
  605. dma_addr_t addr;
  606. ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
  607. addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
  608. ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
  609. }
  610. /* Frees all stream contexts associated with the endpoint,
  611. *
  612. * Caller should fix the endpoint context streams fields.
  613. */
  614. void xhci_free_stream_info(struct xhci_hcd *xhci,
  615. struct xhci_stream_info *stream_info)
  616. {
  617. int cur_stream;
  618. struct xhci_ring *cur_ring;
  619. dma_addr_t addr;
  620. if (!stream_info)
  621. return;
  622. for (cur_stream = 1; cur_stream < stream_info->num_streams;
  623. cur_stream++) {
  624. cur_ring = stream_info->stream_rings[cur_stream];
  625. if (cur_ring) {
  626. addr = cur_ring->first_seg->dma;
  627. radix_tree_delete(&stream_info->trb_address_map,
  628. addr >> TRB_SEGMENT_SHIFT);
  629. xhci_ring_free(xhci, cur_ring);
  630. stream_info->stream_rings[cur_stream] = NULL;
  631. }
  632. }
  633. xhci_free_command(xhci, stream_info->free_streams_command);
  634. xhci->cmd_ring_reserved_trbs--;
  635. if (stream_info->stream_ctx_array)
  636. xhci_free_stream_ctx(xhci,
  637. stream_info->num_stream_ctxs,
  638. stream_info->stream_ctx_array,
  639. stream_info->ctx_array_dma);
  640. if (stream_info)
  641. kfree(stream_info->stream_rings);
  642. kfree(stream_info);
  643. }
  644. /***************** Device context manipulation *************************/
  645. static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
  646. struct xhci_virt_ep *ep)
  647. {
  648. init_timer(&ep->stop_cmd_timer);
  649. ep->stop_cmd_timer.data = (unsigned long) ep;
  650. ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
  651. ep->xhci = xhci;
  652. }
  653. static void xhci_free_tt_info(struct xhci_hcd *xhci,
  654. struct xhci_virt_device *virt_dev,
  655. int slot_id)
  656. {
  657. struct list_head *tt_list_head;
  658. struct xhci_tt_bw_info *tt_info, *next;
  659. bool slot_found = false;
  660. /* If the device never made it past the Set Address stage,
  661. * it may not have the real_port set correctly.
  662. */
  663. if (virt_dev->real_port == 0 ||
  664. virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
  665. xhci_dbg(xhci, "Bad real port.\n");
  666. return;
  667. }
  668. tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
  669. list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
  670. /* Multi-TT hubs will have more than one entry */
  671. if (tt_info->slot_id == slot_id) {
  672. slot_found = true;
  673. list_del(&tt_info->tt_list);
  674. kfree(tt_info);
  675. } else if (slot_found) {
  676. break;
  677. }
  678. }
  679. }
  680. int xhci_alloc_tt_info(struct xhci_hcd *xhci,
  681. struct xhci_virt_device *virt_dev,
  682. struct usb_device *hdev,
  683. struct usb_tt *tt, gfp_t mem_flags)
  684. {
  685. struct xhci_tt_bw_info *tt_info;
  686. unsigned int num_ports;
  687. int i, j;
  688. if (!tt->multi)
  689. num_ports = 1;
  690. else
  691. num_ports = hdev->maxchild;
  692. for (i = 0; i < num_ports; i++, tt_info++) {
  693. struct xhci_interval_bw_table *bw_table;
  694. tt_info = kzalloc(sizeof(*tt_info), mem_flags);
  695. if (!tt_info)
  696. goto free_tts;
  697. INIT_LIST_HEAD(&tt_info->tt_list);
  698. list_add(&tt_info->tt_list,
  699. &xhci->rh_bw[virt_dev->real_port - 1].tts);
  700. tt_info->slot_id = virt_dev->udev->slot_id;
  701. if (tt->multi)
  702. tt_info->ttport = i+1;
  703. bw_table = &tt_info->bw_table;
  704. for (j = 0; j < XHCI_MAX_INTERVAL; j++)
  705. INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
  706. }
  707. return 0;
  708. free_tts:
  709. xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
  710. return -ENOMEM;
  711. }
  712. /* All the xhci_tds in the ring's TD list should be freed at this point.
  713. * Should be called with xhci->lock held if there is any chance the TT lists
  714. * will be manipulated by the configure endpoint, allocate device, or update
  715. * hub functions while this function is removing the TT entries from the list.
  716. */
  717. void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
  718. {
  719. struct xhci_virt_device *dev;
  720. int i;
  721. int old_active_eps = 0;
  722. /* Slot ID 0 is reserved */
  723. if (slot_id == 0 || !xhci->devs[slot_id])
  724. return;
  725. dev = xhci->devs[slot_id];
  726. xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
  727. if (!dev)
  728. return;
  729. if (dev->tt_info)
  730. old_active_eps = dev->tt_info->active_eps;
  731. for (i = 0; i < 31; ++i) {
  732. if (dev->eps[i].ring)
  733. xhci_ring_free(xhci, dev->eps[i].ring);
  734. if (dev->eps[i].stream_info)
  735. xhci_free_stream_info(xhci,
  736. dev->eps[i].stream_info);
  737. /* Endpoints on the TT/root port lists should have been removed
  738. * when usb_disable_device() was called for the device.
  739. * We can't drop them anyway, because the udev might have gone
  740. * away by this point, and we can't tell what speed it was.
  741. */
  742. if (!list_empty(&dev->eps[i].bw_endpoint_list))
  743. xhci_warn(xhci, "Slot %u endpoint %u "
  744. "not removed from BW list!\n",
  745. slot_id, i);
  746. }
  747. /* If this is a hub, free the TT(s) from the TT list */
  748. xhci_free_tt_info(xhci, dev, slot_id);
  749. /* If necessary, update the number of active TTs on this root port */
  750. xhci_update_tt_active_eps(xhci, dev, old_active_eps);
  751. if (dev->ring_cache) {
  752. for (i = 0; i < dev->num_rings_cached; i++)
  753. xhci_ring_free(xhci, dev->ring_cache[i]);
  754. kfree(dev->ring_cache);
  755. }
  756. if (dev->in_ctx)
  757. xhci_free_container_ctx(xhci, dev->in_ctx);
  758. if (dev->out_ctx)
  759. xhci_free_container_ctx(xhci, dev->out_ctx);
  760. kfree(xhci->devs[slot_id]);
  761. xhci->devs[slot_id] = NULL;
  762. }
  763. int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
  764. struct usb_device *udev, gfp_t flags)
  765. {
  766. struct xhci_virt_device *dev;
  767. int i;
  768. /* Slot ID 0 is reserved */
  769. if (slot_id == 0 || xhci->devs[slot_id]) {
  770. xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
  771. return 0;
  772. }
  773. xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
  774. if (!xhci->devs[slot_id])
  775. return 0;
  776. dev = xhci->devs[slot_id];
  777. /* Allocate the (output) device context that will be used in the HC. */
  778. dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
  779. if (!dev->out_ctx)
  780. goto fail;
  781. xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
  782. (unsigned long long)dev->out_ctx->dma);
  783. /* Allocate the (input) device context for address device command */
  784. dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
  785. if (!dev->in_ctx)
  786. goto fail;
  787. xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
  788. (unsigned long long)dev->in_ctx->dma);
  789. /* Initialize the cancellation list and watchdog timers for each ep */
  790. for (i = 0; i < 31; i++) {
  791. xhci_init_endpoint_timer(xhci, &dev->eps[i]);
  792. INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
  793. INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
  794. }
  795. /* Allocate endpoint 0 ring */
  796. dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
  797. if (!dev->eps[0].ring)
  798. goto fail;
  799. /* Allocate pointers to the ring cache */
  800. dev->ring_cache = kzalloc(
  801. sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
  802. flags);
  803. if (!dev->ring_cache)
  804. goto fail;
  805. dev->num_rings_cached = 0;
  806. init_completion(&dev->cmd_completion);
  807. INIT_LIST_HEAD(&dev->cmd_list);
  808. dev->udev = udev;
  809. /* Point to output device context in dcbaa. */
  810. xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
  811. xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
  812. slot_id,
  813. &xhci->dcbaa->dev_context_ptrs[slot_id],
  814. le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
  815. return 1;
  816. fail:
  817. xhci_free_virt_device(xhci, slot_id);
  818. return 0;
  819. }
  820. void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
  821. struct usb_device *udev)
  822. {
  823. struct xhci_virt_device *virt_dev;
  824. struct xhci_ep_ctx *ep0_ctx;
  825. struct xhci_ring *ep_ring;
  826. virt_dev = xhci->devs[udev->slot_id];
  827. ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
  828. ep_ring = virt_dev->eps[0].ring;
  829. /*
  830. * FIXME we don't keep track of the dequeue pointer very well after a
  831. * Set TR dequeue pointer, so we're setting the dequeue pointer of the
  832. * host to our enqueue pointer. This should only be called after a
  833. * configured device has reset, so all control transfers should have
  834. * been completed or cancelled before the reset.
  835. */
  836. ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
  837. ep_ring->enqueue)
  838. | ep_ring->cycle_state);
  839. }
  840. /*
  841. * The xHCI roothub may have ports of differing speeds in any order in the port
  842. * status registers. xhci->port_array provides an array of the port speed for
  843. * each offset into the port status registers.
  844. *
  845. * The xHCI hardware wants to know the roothub port number that the USB device
  846. * is attached to (or the roothub port its ancestor hub is attached to). All we
  847. * know is the index of that port under either the USB 2.0 or the USB 3.0
  848. * roothub, but that doesn't give us the real index into the HW port status
  849. * registers. Call xhci_find_raw_port_number() to get real index.
  850. */
  851. static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
  852. struct usb_device *udev)
  853. {
  854. struct usb_device *top_dev;
  855. struct usb_hcd *hcd;
  856. if (udev->speed == USB_SPEED_SUPER)
  857. hcd = xhci->shared_hcd;
  858. else
  859. hcd = xhci->main_hcd;
  860. for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
  861. top_dev = top_dev->parent)
  862. /* Found device below root hub */;
  863. return xhci_find_raw_port_number(hcd, top_dev->portnum);
  864. }
  865. /* Setup an xHCI virtual device for a Set Address command */
  866. int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
  867. {
  868. struct xhci_virt_device *dev;
  869. struct xhci_ep_ctx *ep0_ctx;
  870. struct xhci_slot_ctx *slot_ctx;
  871. u32 port_num;
  872. u32 max_packets;
  873. struct usb_device *top_dev;
  874. dev = xhci->devs[udev->slot_id];
  875. /* Slot ID 0 is reserved */
  876. if (udev->slot_id == 0 || !dev) {
  877. xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
  878. udev->slot_id);
  879. return -EINVAL;
  880. }
  881. ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
  882. slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
  883. /* 3) Only the control endpoint is valid - one endpoint context */
  884. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
  885. switch (udev->speed) {
  886. case USB_SPEED_SUPER:
  887. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
  888. max_packets = MAX_PACKET(512);
  889. break;
  890. case USB_SPEED_HIGH:
  891. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
  892. max_packets = MAX_PACKET(64);
  893. break;
  894. /* USB core guesses at a 64-byte max packet first for FS devices */
  895. case USB_SPEED_FULL:
  896. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
  897. max_packets = MAX_PACKET(64);
  898. break;
  899. case USB_SPEED_LOW:
  900. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
  901. max_packets = MAX_PACKET(8);
  902. break;
  903. case USB_SPEED_WIRELESS:
  904. xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
  905. return -EINVAL;
  906. break;
  907. default:
  908. /* Speed was set earlier, this shouldn't happen. */
  909. return -EINVAL;
  910. }
  911. /* Find the root hub port this device is under */
  912. port_num = xhci_find_real_port_number(xhci, udev);
  913. if (!port_num)
  914. return -EINVAL;
  915. slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
  916. /* Set the port number in the virtual_device to the faked port number */
  917. for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
  918. top_dev = top_dev->parent)
  919. /* Found device below root hub */;
  920. dev->fake_port = top_dev->portnum;
  921. dev->real_port = port_num;
  922. xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
  923. xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
  924. /* Find the right bandwidth table that this device will be a part of.
  925. * If this is a full speed device attached directly to a root port (or a
  926. * decendent of one), it counts as a primary bandwidth domain, not a
  927. * secondary bandwidth domain under a TT. An xhci_tt_info structure
  928. * will never be created for the HS root hub.
  929. */
  930. if (!udev->tt || !udev->tt->hub->parent) {
  931. dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
  932. } else {
  933. struct xhci_root_port_bw_info *rh_bw;
  934. struct xhci_tt_bw_info *tt_bw;
  935. rh_bw = &xhci->rh_bw[port_num - 1];
  936. /* Find the right TT. */
  937. list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
  938. if (tt_bw->slot_id != udev->tt->hub->slot_id)
  939. continue;
  940. if (!dev->udev->tt->multi ||
  941. (udev->tt->multi &&
  942. tt_bw->ttport == dev->udev->ttport)) {
  943. dev->bw_table = &tt_bw->bw_table;
  944. dev->tt_info = tt_bw;
  945. break;
  946. }
  947. }
  948. if (!dev->tt_info)
  949. xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
  950. }
  951. /* Is this a LS/FS device under an external HS hub? */
  952. if (udev->tt && udev->tt->hub->parent) {
  953. slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
  954. (udev->ttport << 8));
  955. if (udev->tt->multi)
  956. slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
  957. }
  958. xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
  959. xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
  960. /* Step 4 - ring already allocated */
  961. /* Step 5 */
  962. ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
  963. /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
  964. ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
  965. max_packets);
  966. ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
  967. dev->eps[0].ring->cycle_state);
  968. /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
  969. return 0;
  970. }
  971. /*
  972. * Convert interval expressed as 2^(bInterval - 1) == interval into
  973. * straight exponent value 2^n == interval.
  974. *
  975. */
  976. static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
  977. struct usb_host_endpoint *ep)
  978. {
  979. unsigned int interval;
  980. interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
  981. if (interval != ep->desc.bInterval - 1)
  982. dev_warn(&udev->dev,
  983. "ep %#x - rounding interval to %d %sframes\n",
  984. ep->desc.bEndpointAddress,
  985. 1 << interval,
  986. udev->speed == USB_SPEED_FULL ? "" : "micro");
  987. if (udev->speed == USB_SPEED_FULL) {
  988. /*
  989. * Full speed isoc endpoints specify interval in frames,
  990. * not microframes. We are using microframes everywhere,
  991. * so adjust accordingly.
  992. */
  993. interval += 3; /* 1 frame = 2^3 uframes */
  994. }
  995. return interval;
  996. }
  997. /*
  998. * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
  999. * microframes, rounded down to nearest power of 2.
  1000. */
  1001. static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
  1002. struct usb_host_endpoint *ep, unsigned int desc_interval,
  1003. unsigned int min_exponent, unsigned int max_exponent)
  1004. {
  1005. unsigned int interval;
  1006. interval = fls(desc_interval) - 1;
  1007. interval = clamp_val(interval, min_exponent, max_exponent);
  1008. if ((1 << interval) != desc_interval)
  1009. dev_warn(&udev->dev,
  1010. "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
  1011. ep->desc.bEndpointAddress,
  1012. 1 << interval,
  1013. desc_interval);
  1014. return interval;
  1015. }
  1016. static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
  1017. struct usb_host_endpoint *ep)
  1018. {
  1019. if (ep->desc.bInterval == 0)
  1020. return 0;
  1021. return xhci_microframes_to_exponent(udev, ep,
  1022. ep->desc.bInterval, 0, 15);
  1023. }
  1024. static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
  1025. struct usb_host_endpoint *ep)
  1026. {
  1027. return xhci_microframes_to_exponent(udev, ep,
  1028. ep->desc.bInterval * 8, 3, 10);
  1029. }
  1030. /* Return the polling or NAK interval.
  1031. *
  1032. * The polling interval is expressed in "microframes". If xHCI's Interval field
  1033. * is set to N, it will service the endpoint every 2^(Interval)*125us.
  1034. *
  1035. * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
  1036. * is set to 0.
  1037. */
  1038. static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
  1039. struct usb_host_endpoint *ep)
  1040. {
  1041. unsigned int interval = 0;
  1042. switch (udev->speed) {
  1043. case USB_SPEED_HIGH:
  1044. /* Max NAK rate */
  1045. if (usb_endpoint_xfer_control(&ep->desc) ||
  1046. usb_endpoint_xfer_bulk(&ep->desc)) {
  1047. interval = xhci_parse_microframe_interval(udev, ep);
  1048. break;
  1049. }
  1050. /* Fall through - SS and HS isoc/int have same decoding */
  1051. case USB_SPEED_SUPER:
  1052. if (usb_endpoint_xfer_int(&ep->desc) ||
  1053. usb_endpoint_xfer_isoc(&ep->desc)) {
  1054. interval = xhci_parse_exponent_interval(udev, ep);
  1055. }
  1056. break;
  1057. case USB_SPEED_FULL:
  1058. if (usb_endpoint_xfer_isoc(&ep->desc)) {
  1059. interval = xhci_parse_exponent_interval(udev, ep);
  1060. break;
  1061. }
  1062. /*
  1063. * Fall through for interrupt endpoint interval decoding
  1064. * since it uses the same rules as low speed interrupt
  1065. * endpoints.
  1066. */
  1067. case USB_SPEED_LOW:
  1068. if (usb_endpoint_xfer_int(&ep->desc) ||
  1069. usb_endpoint_xfer_isoc(&ep->desc)) {
  1070. interval = xhci_parse_frame_interval(udev, ep);
  1071. }
  1072. break;
  1073. default:
  1074. BUG();
  1075. }
  1076. return EP_INTERVAL(interval);
  1077. }
  1078. /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
  1079. * High speed endpoint descriptors can define "the number of additional
  1080. * transaction opportunities per microframe", but that goes in the Max Burst
  1081. * endpoint context field.
  1082. */
  1083. static u32 xhci_get_endpoint_mult(struct usb_device *udev,
  1084. struct usb_host_endpoint *ep)
  1085. {
  1086. if (udev->speed != USB_SPEED_SUPER ||
  1087. !usb_endpoint_xfer_isoc(&ep->desc))
  1088. return 0;
  1089. return ep->ss_ep_comp.bmAttributes;
  1090. }
  1091. static u32 xhci_get_endpoint_type(struct usb_device *udev,
  1092. struct usb_host_endpoint *ep)
  1093. {
  1094. int in;
  1095. u32 type;
  1096. in = usb_endpoint_dir_in(&ep->desc);
  1097. if (usb_endpoint_xfer_control(&ep->desc)) {
  1098. type = EP_TYPE(CTRL_EP);
  1099. } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
  1100. if (in)
  1101. type = EP_TYPE(BULK_IN_EP);
  1102. else
  1103. type = EP_TYPE(BULK_OUT_EP);
  1104. } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
  1105. if (in)
  1106. type = EP_TYPE(ISOC_IN_EP);
  1107. else
  1108. type = EP_TYPE(ISOC_OUT_EP);
  1109. } else if (usb_endpoint_xfer_int(&ep->desc)) {
  1110. if (in)
  1111. type = EP_TYPE(INT_IN_EP);
  1112. else
  1113. type = EP_TYPE(INT_OUT_EP);
  1114. } else {
  1115. type = 0;
  1116. }
  1117. return type;
  1118. }
  1119. /* Return the maximum endpoint service interval time (ESIT) payload.
  1120. * Basically, this is the maxpacket size, multiplied by the burst size
  1121. * and mult size.
  1122. */
  1123. static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
  1124. struct usb_device *udev,
  1125. struct usb_host_endpoint *ep)
  1126. {
  1127. int max_burst;
  1128. int max_packet;
  1129. /* Only applies for interrupt or isochronous endpoints */
  1130. if (usb_endpoint_xfer_control(&ep->desc) ||
  1131. usb_endpoint_xfer_bulk(&ep->desc))
  1132. return 0;
  1133. if (udev->speed == USB_SPEED_SUPER)
  1134. return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
  1135. max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
  1136. max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
  1137. /* A 0 in max burst means 1 transfer per ESIT */
  1138. return max_packet * (max_burst + 1);
  1139. }
  1140. /* Set up an endpoint with one ring segment. Do not allocate stream rings.
  1141. * Drivers will have to call usb_alloc_streams() to do that.
  1142. */
  1143. int xhci_endpoint_init(struct xhci_hcd *xhci,
  1144. struct xhci_virt_device *virt_dev,
  1145. struct usb_device *udev,
  1146. struct usb_host_endpoint *ep,
  1147. gfp_t mem_flags)
  1148. {
  1149. unsigned int ep_index;
  1150. struct xhci_ep_ctx *ep_ctx;
  1151. struct xhci_ring *ep_ring;
  1152. unsigned int max_packet;
  1153. unsigned int max_burst;
  1154. enum xhci_ring_type type;
  1155. u32 max_esit_payload;
  1156. u32 endpoint_type;
  1157. ep_index = xhci_get_endpoint_index(&ep->desc);
  1158. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  1159. endpoint_type = xhci_get_endpoint_type(udev, ep);
  1160. if (!endpoint_type)
  1161. return -EINVAL;
  1162. ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
  1163. type = usb_endpoint_type(&ep->desc);
  1164. /* Set up the endpoint ring */
  1165. virt_dev->eps[ep_index].new_ring =
  1166. xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
  1167. if (!virt_dev->eps[ep_index].new_ring) {
  1168. /* Attempt to use the ring cache */
  1169. if (virt_dev->num_rings_cached == 0)
  1170. return -ENOMEM;
  1171. virt_dev->eps[ep_index].new_ring =
  1172. virt_dev->ring_cache[virt_dev->num_rings_cached];
  1173. virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
  1174. virt_dev->num_rings_cached--;
  1175. xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
  1176. 1, type);
  1177. }
  1178. virt_dev->eps[ep_index].skip = false;
  1179. ep_ring = virt_dev->eps[ep_index].new_ring;
  1180. ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
  1181. ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
  1182. | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
  1183. /* FIXME dig Mult and streams info out of ep companion desc */
  1184. /* Allow 3 retries for everything but isoc;
  1185. * CErr shall be set to 0 for Isoch endpoints.
  1186. */
  1187. if (!usb_endpoint_xfer_isoc(&ep->desc))
  1188. ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
  1189. else
  1190. ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
  1191. /* Set the max packet size and max burst */
  1192. max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
  1193. max_burst = 0;
  1194. switch (udev->speed) {
  1195. case USB_SPEED_SUPER:
  1196. /* dig out max burst from ep companion desc */
  1197. max_burst = ep->ss_ep_comp.bMaxBurst;
  1198. break;
  1199. case USB_SPEED_HIGH:
  1200. /* Some devices get this wrong */
  1201. if (usb_endpoint_xfer_bulk(&ep->desc))
  1202. max_packet = 512;
  1203. /* bits 11:12 specify the number of additional transaction
  1204. * opportunities per microframe (USB 2.0, section 9.6.6)
  1205. */
  1206. if (usb_endpoint_xfer_isoc(&ep->desc) ||
  1207. usb_endpoint_xfer_int(&ep->desc)) {
  1208. max_burst = (usb_endpoint_maxp(&ep->desc)
  1209. & 0x1800) >> 11;
  1210. }
  1211. break;
  1212. case USB_SPEED_FULL:
  1213. case USB_SPEED_LOW:
  1214. break;
  1215. default:
  1216. BUG();
  1217. }
  1218. ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
  1219. MAX_BURST(max_burst));
  1220. max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
  1221. ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
  1222. /*
  1223. * XXX no idea how to calculate the average TRB buffer length for bulk
  1224. * endpoints, as the driver gives us no clue how big each scatter gather
  1225. * list entry (or buffer) is going to be.
  1226. *
  1227. * For isochronous and interrupt endpoints, we set it to the max
  1228. * available, until we have new API in the USB core to allow drivers to
  1229. * declare how much bandwidth they actually need.
  1230. *
  1231. * Normally, it would be calculated by taking the total of the buffer
  1232. * lengths in the TD and then dividing by the number of TRBs in a TD,
  1233. * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
  1234. * use Event Data TRBs, and we don't chain in a link TRB on short
  1235. * transfers, we're basically dividing by 1.
  1236. *
  1237. * xHCI 1.0 specification indicates that the Average TRB Length should
  1238. * be set to 8 for control endpoints.
  1239. */
  1240. if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
  1241. ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
  1242. else
  1243. ep_ctx->tx_info |=
  1244. cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
  1245. /* FIXME Debug endpoint context */
  1246. return 0;
  1247. }
  1248. void xhci_endpoint_zero(struct xhci_hcd *xhci,
  1249. struct xhci_virt_device *virt_dev,
  1250. struct usb_host_endpoint *ep)
  1251. {
  1252. unsigned int ep_index;
  1253. struct xhci_ep_ctx *ep_ctx;
  1254. ep_index = xhci_get_endpoint_index(&ep->desc);
  1255. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  1256. ep_ctx->ep_info = 0;
  1257. ep_ctx->ep_info2 = 0;
  1258. ep_ctx->deq = 0;
  1259. ep_ctx->tx_info = 0;
  1260. /* Don't free the endpoint ring until the set interface or configuration
  1261. * request succeeds.
  1262. */
  1263. }
  1264. void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
  1265. {
  1266. bw_info->ep_interval = 0;
  1267. bw_info->mult = 0;
  1268. bw_info->num_packets = 0;
  1269. bw_info->max_packet_size = 0;
  1270. bw_info->type = 0;
  1271. bw_info->max_esit_payload = 0;
  1272. }
  1273. void xhci_update_bw_info(struct xhci_hcd *xhci,
  1274. struct xhci_container_ctx *in_ctx,
  1275. struct xhci_input_control_ctx *ctrl_ctx,
  1276. struct xhci_virt_device *virt_dev)
  1277. {
  1278. struct xhci_bw_info *bw_info;
  1279. struct xhci_ep_ctx *ep_ctx;
  1280. unsigned int ep_type;
  1281. int i;
  1282. for (i = 1; i < 31; ++i) {
  1283. bw_info = &virt_dev->eps[i].bw_info;
  1284. /* We can't tell what endpoint type is being dropped, but
  1285. * unconditionally clearing the bandwidth info for non-periodic
  1286. * endpoints should be harmless because the info will never be
  1287. * set in the first place.
  1288. */
  1289. if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
  1290. /* Dropped endpoint */
  1291. xhci_clear_endpoint_bw_info(bw_info);
  1292. continue;
  1293. }
  1294. if (EP_IS_ADDED(ctrl_ctx, i)) {
  1295. ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
  1296. ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
  1297. /* Ignore non-periodic endpoints */
  1298. if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
  1299. ep_type != ISOC_IN_EP &&
  1300. ep_type != INT_IN_EP)
  1301. continue;
  1302. /* Added or changed endpoint */
  1303. bw_info->ep_interval = CTX_TO_EP_INTERVAL(
  1304. le32_to_cpu(ep_ctx->ep_info));
  1305. /* Number of packets and mult are zero-based in the
  1306. * input context, but we want one-based for the
  1307. * interval table.
  1308. */
  1309. bw_info->mult = CTX_TO_EP_MULT(
  1310. le32_to_cpu(ep_ctx->ep_info)) + 1;
  1311. bw_info->num_packets = CTX_TO_MAX_BURST(
  1312. le32_to_cpu(ep_ctx->ep_info2)) + 1;
  1313. bw_info->max_packet_size = MAX_PACKET_DECODED(
  1314. le32_to_cpu(ep_ctx->ep_info2));
  1315. bw_info->type = ep_type;
  1316. bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
  1317. le32_to_cpu(ep_ctx->tx_info));
  1318. }
  1319. }
  1320. }
  1321. /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
  1322. * Useful when you want to change one particular aspect of the endpoint and then
  1323. * issue a configure endpoint command.
  1324. */
  1325. void xhci_endpoint_copy(struct xhci_hcd *xhci,
  1326. struct xhci_container_ctx *in_ctx,
  1327. struct xhci_container_ctx *out_ctx,
  1328. unsigned int ep_index)
  1329. {
  1330. struct xhci_ep_ctx *out_ep_ctx;
  1331. struct xhci_ep_ctx *in_ep_ctx;
  1332. out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
  1333. in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
  1334. in_ep_ctx->ep_info = out_ep_ctx->ep_info;
  1335. in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
  1336. in_ep_ctx->deq = out_ep_ctx->deq;
  1337. in_ep_ctx->tx_info = out_ep_ctx->tx_info;
  1338. }
  1339. /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
  1340. * Useful when you want to change one particular aspect of the endpoint and then
  1341. * issue a configure endpoint command. Only the context entries field matters,
  1342. * but we'll copy the whole thing anyway.
  1343. */
  1344. void xhci_slot_copy(struct xhci_hcd *xhci,
  1345. struct xhci_container_ctx *in_ctx,
  1346. struct xhci_container_ctx *out_ctx)
  1347. {
  1348. struct xhci_slot_ctx *in_slot_ctx;
  1349. struct xhci_slot_ctx *out_slot_ctx;
  1350. in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
  1351. out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
  1352. in_slot_ctx->dev_info = out_slot_ctx->dev_info;
  1353. in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
  1354. in_slot_ctx->tt_info = out_slot_ctx->tt_info;
  1355. in_slot_ctx->dev_state = out_slot_ctx->dev_state;
  1356. }
  1357. /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
  1358. static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
  1359. {
  1360. int i;
  1361. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  1362. int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  1363. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1364. "Allocating %d scratchpad buffers", num_sp);
  1365. if (!num_sp)
  1366. return 0;
  1367. xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
  1368. if (!xhci->scratchpad)
  1369. goto fail_sp;
  1370. xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
  1371. num_sp * sizeof(u64),
  1372. &xhci->scratchpad->sp_dma, flags);
  1373. if (!xhci->scratchpad->sp_array)
  1374. goto fail_sp2;
  1375. xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
  1376. if (!xhci->scratchpad->sp_buffers)
  1377. goto fail_sp3;
  1378. xhci->scratchpad->sp_dma_buffers =
  1379. kzalloc(sizeof(dma_addr_t) * num_sp, flags);
  1380. if (!xhci->scratchpad->sp_dma_buffers)
  1381. goto fail_sp4;
  1382. xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
  1383. for (i = 0; i < num_sp; i++) {
  1384. dma_addr_t dma;
  1385. void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
  1386. flags);
  1387. if (!buf)
  1388. goto fail_sp5;
  1389. xhci->scratchpad->sp_array[i] = dma;
  1390. xhci->scratchpad->sp_buffers[i] = buf;
  1391. xhci->scratchpad->sp_dma_buffers[i] = dma;
  1392. }
  1393. return 0;
  1394. fail_sp5:
  1395. for (i = i - 1; i >= 0; i--) {
  1396. dma_free_coherent(dev, xhci->page_size,
  1397. xhci->scratchpad->sp_buffers[i],
  1398. xhci->scratchpad->sp_dma_buffers[i]);
  1399. }
  1400. kfree(xhci->scratchpad->sp_dma_buffers);
  1401. fail_sp4:
  1402. kfree(xhci->scratchpad->sp_buffers);
  1403. fail_sp3:
  1404. dma_free_coherent(dev, num_sp * sizeof(u64),
  1405. xhci->scratchpad->sp_array,
  1406. xhci->scratchpad->sp_dma);
  1407. fail_sp2:
  1408. kfree(xhci->scratchpad);
  1409. xhci->scratchpad = NULL;
  1410. fail_sp:
  1411. return -ENOMEM;
  1412. }
  1413. static void scratchpad_free(struct xhci_hcd *xhci)
  1414. {
  1415. int num_sp;
  1416. int i;
  1417. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  1418. if (!xhci->scratchpad)
  1419. return;
  1420. num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  1421. for (i = 0; i < num_sp; i++) {
  1422. dma_free_coherent(&pdev->dev, xhci->page_size,
  1423. xhci->scratchpad->sp_buffers[i],
  1424. xhci->scratchpad->sp_dma_buffers[i]);
  1425. }
  1426. kfree(xhci->scratchpad->sp_dma_buffers);
  1427. kfree(xhci->scratchpad->sp_buffers);
  1428. dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
  1429. xhci->scratchpad->sp_array,
  1430. xhci->scratchpad->sp_dma);
  1431. kfree(xhci->scratchpad);
  1432. xhci->scratchpad = NULL;
  1433. }
  1434. struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
  1435. bool allocate_in_ctx, bool allocate_completion,
  1436. gfp_t mem_flags)
  1437. {
  1438. struct xhci_command *command;
  1439. command = kzalloc(sizeof(*command), mem_flags);
  1440. if (!command)
  1441. return NULL;
  1442. if (allocate_in_ctx) {
  1443. command->in_ctx =
  1444. xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
  1445. mem_flags);
  1446. if (!command->in_ctx) {
  1447. kfree(command);
  1448. return NULL;
  1449. }
  1450. }
  1451. if (allocate_completion) {
  1452. command->completion =
  1453. kzalloc(sizeof(struct completion), mem_flags);
  1454. if (!command->completion) {
  1455. xhci_free_container_ctx(xhci, command->in_ctx);
  1456. kfree(command);
  1457. return NULL;
  1458. }
  1459. init_completion(command->completion);
  1460. }
  1461. command->status = 0;
  1462. INIT_LIST_HEAD(&command->cmd_list);
  1463. return command;
  1464. }
  1465. void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
  1466. {
  1467. if (urb_priv) {
  1468. kfree(urb_priv->td[0]);
  1469. kfree(urb_priv);
  1470. }
  1471. }
  1472. void xhci_free_command(struct xhci_hcd *xhci,
  1473. struct xhci_command *command)
  1474. {
  1475. xhci_free_container_ctx(xhci,
  1476. command->in_ctx);
  1477. kfree(command->completion);
  1478. kfree(command);
  1479. }
  1480. void xhci_mem_cleanup(struct xhci_hcd *xhci)
  1481. {
  1482. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  1483. struct dev_info *dev_info, *next;
  1484. struct xhci_cd *cur_cd, *next_cd;
  1485. unsigned long flags;
  1486. int size;
  1487. int i, j, num_ports;
  1488. /* Free the Event Ring Segment Table and the actual Event Ring */
  1489. size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
  1490. if (xhci->erst.entries)
  1491. dma_free_coherent(&pdev->dev, size,
  1492. xhci->erst.entries, xhci->erst.erst_dma_addr);
  1493. xhci->erst.entries = NULL;
  1494. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
  1495. if (xhci->event_ring)
  1496. xhci_ring_free(xhci, xhci->event_ring);
  1497. xhci->event_ring = NULL;
  1498. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
  1499. if (xhci->lpm_command)
  1500. xhci_free_command(xhci, xhci->lpm_command);
  1501. xhci->cmd_ring_reserved_trbs = 0;
  1502. if (xhci->cmd_ring)
  1503. xhci_ring_free(xhci, xhci->cmd_ring);
  1504. xhci->cmd_ring = NULL;
  1505. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
  1506. list_for_each_entry_safe(cur_cd, next_cd,
  1507. &xhci->cancel_cmd_list, cancel_cmd_list) {
  1508. list_del(&cur_cd->cancel_cmd_list);
  1509. kfree(cur_cd);
  1510. }
  1511. for (i = 1; i < MAX_HC_SLOTS; ++i)
  1512. xhci_free_virt_device(xhci, i);
  1513. if (xhci->segment_pool)
  1514. dma_pool_destroy(xhci->segment_pool);
  1515. xhci->segment_pool = NULL;
  1516. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
  1517. if (xhci->device_pool)
  1518. dma_pool_destroy(xhci->device_pool);
  1519. xhci->device_pool = NULL;
  1520. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
  1521. if (xhci->small_streams_pool)
  1522. dma_pool_destroy(xhci->small_streams_pool);
  1523. xhci->small_streams_pool = NULL;
  1524. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1525. "Freed small stream array pool");
  1526. if (xhci->medium_streams_pool)
  1527. dma_pool_destroy(xhci->medium_streams_pool);
  1528. xhci->medium_streams_pool = NULL;
  1529. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1530. "Freed medium stream array pool");
  1531. if (xhci->dcbaa)
  1532. dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
  1533. xhci->dcbaa, xhci->dcbaa->dma);
  1534. xhci->dcbaa = NULL;
  1535. scratchpad_free(xhci);
  1536. spin_lock_irqsave(&xhci->lock, flags);
  1537. list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
  1538. list_del(&dev_info->list);
  1539. kfree(dev_info);
  1540. }
  1541. spin_unlock_irqrestore(&xhci->lock, flags);
  1542. if (!xhci->rh_bw)
  1543. goto no_bw;
  1544. num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1545. for (i = 0; i < num_ports; i++) {
  1546. struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
  1547. for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
  1548. struct list_head *ep = &bwt->interval_bw[j].endpoints;
  1549. while (!list_empty(ep))
  1550. list_del_init(ep->next);
  1551. }
  1552. }
  1553. for (i = 0; i < num_ports; i++) {
  1554. struct xhci_tt_bw_info *tt, *n;
  1555. list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
  1556. list_del(&tt->tt_list);
  1557. kfree(tt);
  1558. }
  1559. }
  1560. no_bw:
  1561. xhci->num_usb2_ports = 0;
  1562. xhci->num_usb3_ports = 0;
  1563. xhci->num_active_eps = 0;
  1564. kfree(xhci->usb2_ports);
  1565. kfree(xhci->usb3_ports);
  1566. kfree(xhci->port_array);
  1567. kfree(xhci->rh_bw);
  1568. kfree(xhci->ext_caps);
  1569. xhci->page_size = 0;
  1570. xhci->page_shift = 0;
  1571. xhci->bus_state[0].bus_suspended = 0;
  1572. xhci->bus_state[1].bus_suspended = 0;
  1573. }
  1574. static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
  1575. struct xhci_segment *input_seg,
  1576. union xhci_trb *start_trb,
  1577. union xhci_trb *end_trb,
  1578. dma_addr_t input_dma,
  1579. struct xhci_segment *result_seg,
  1580. char *test_name, int test_number)
  1581. {
  1582. unsigned long long start_dma;
  1583. unsigned long long end_dma;
  1584. struct xhci_segment *seg;
  1585. start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
  1586. end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
  1587. seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
  1588. if (seg != result_seg) {
  1589. xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
  1590. test_name, test_number);
  1591. xhci_warn(xhci, "Tested TRB math w/ seg %p and "
  1592. "input DMA 0x%llx\n",
  1593. input_seg,
  1594. (unsigned long long) input_dma);
  1595. xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
  1596. "ending TRB %p (0x%llx DMA)\n",
  1597. start_trb, start_dma,
  1598. end_trb, end_dma);
  1599. xhci_warn(xhci, "Expected seg %p, got seg %p\n",
  1600. result_seg, seg);
  1601. return -1;
  1602. }
  1603. return 0;
  1604. }
  1605. /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
  1606. static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
  1607. {
  1608. struct {
  1609. dma_addr_t input_dma;
  1610. struct xhci_segment *result_seg;
  1611. } simple_test_vector [] = {
  1612. /* A zeroed DMA field should fail */
  1613. { 0, NULL },
  1614. /* One TRB before the ring start should fail */
  1615. { xhci->event_ring->first_seg->dma - 16, NULL },
  1616. /* One byte before the ring start should fail */
  1617. { xhci->event_ring->first_seg->dma - 1, NULL },
  1618. /* Starting TRB should succeed */
  1619. { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
  1620. /* Ending TRB should succeed */
  1621. { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
  1622. xhci->event_ring->first_seg },
  1623. /* One byte after the ring end should fail */
  1624. { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
  1625. /* One TRB after the ring end should fail */
  1626. { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
  1627. /* An address of all ones should fail */
  1628. { (dma_addr_t) (~0), NULL },
  1629. };
  1630. struct {
  1631. struct xhci_segment *input_seg;
  1632. union xhci_trb *start_trb;
  1633. union xhci_trb *end_trb;
  1634. dma_addr_t input_dma;
  1635. struct xhci_segment *result_seg;
  1636. } complex_test_vector [] = {
  1637. /* Test feeding a valid DMA address from a different ring */
  1638. { .input_seg = xhci->event_ring->first_seg,
  1639. .start_trb = xhci->event_ring->first_seg->trbs,
  1640. .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1641. .input_dma = xhci->cmd_ring->first_seg->dma,
  1642. .result_seg = NULL,
  1643. },
  1644. /* Test feeding a valid end TRB from a different ring */
  1645. { .input_seg = xhci->event_ring->first_seg,
  1646. .start_trb = xhci->event_ring->first_seg->trbs,
  1647. .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1648. .input_dma = xhci->cmd_ring->first_seg->dma,
  1649. .result_seg = NULL,
  1650. },
  1651. /* Test feeding a valid start and end TRB from a different ring */
  1652. { .input_seg = xhci->event_ring->first_seg,
  1653. .start_trb = xhci->cmd_ring->first_seg->trbs,
  1654. .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1655. .input_dma = xhci->cmd_ring->first_seg->dma,
  1656. .result_seg = NULL,
  1657. },
  1658. /* TRB in this ring, but after this TD */
  1659. { .input_seg = xhci->event_ring->first_seg,
  1660. .start_trb = &xhci->event_ring->first_seg->trbs[0],
  1661. .end_trb = &xhci->event_ring->first_seg->trbs[3],
  1662. .input_dma = xhci->event_ring->first_seg->dma + 4*16,
  1663. .result_seg = NULL,
  1664. },
  1665. /* TRB in this ring, but before this TD */
  1666. { .input_seg = xhci->event_ring->first_seg,
  1667. .start_trb = &xhci->event_ring->first_seg->trbs[3],
  1668. .end_trb = &xhci->event_ring->first_seg->trbs[6],
  1669. .input_dma = xhci->event_ring->first_seg->dma + 2*16,
  1670. .result_seg = NULL,
  1671. },
  1672. /* TRB in this ring, but after this wrapped TD */
  1673. { .input_seg = xhci->event_ring->first_seg,
  1674. .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
  1675. .end_trb = &xhci->event_ring->first_seg->trbs[1],
  1676. .input_dma = xhci->event_ring->first_seg->dma + 2*16,
  1677. .result_seg = NULL,
  1678. },
  1679. /* TRB in this ring, but before this wrapped TD */
  1680. { .input_seg = xhci->event_ring->first_seg,
  1681. .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
  1682. .end_trb = &xhci->event_ring->first_seg->trbs[1],
  1683. .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
  1684. .result_seg = NULL,
  1685. },
  1686. /* TRB not in this ring, and we have a wrapped TD */
  1687. { .input_seg = xhci->event_ring->first_seg,
  1688. .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
  1689. .end_trb = &xhci->event_ring->first_seg->trbs[1],
  1690. .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
  1691. .result_seg = NULL,
  1692. },
  1693. };
  1694. unsigned int num_tests;
  1695. int i, ret;
  1696. num_tests = ARRAY_SIZE(simple_test_vector);
  1697. for (i = 0; i < num_tests; i++) {
  1698. ret = xhci_test_trb_in_td(xhci,
  1699. xhci->event_ring->first_seg,
  1700. xhci->event_ring->first_seg->trbs,
  1701. &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1702. simple_test_vector[i].input_dma,
  1703. simple_test_vector[i].result_seg,
  1704. "Simple", i);
  1705. if (ret < 0)
  1706. return ret;
  1707. }
  1708. num_tests = ARRAY_SIZE(complex_test_vector);
  1709. for (i = 0; i < num_tests; i++) {
  1710. ret = xhci_test_trb_in_td(xhci,
  1711. complex_test_vector[i].input_seg,
  1712. complex_test_vector[i].start_trb,
  1713. complex_test_vector[i].end_trb,
  1714. complex_test_vector[i].input_dma,
  1715. complex_test_vector[i].result_seg,
  1716. "Complex", i);
  1717. if (ret < 0)
  1718. return ret;
  1719. }
  1720. xhci_dbg(xhci, "TRB math tests passed.\n");
  1721. return 0;
  1722. }
  1723. static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
  1724. {
  1725. u64 temp;
  1726. dma_addr_t deq;
  1727. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  1728. xhci->event_ring->dequeue);
  1729. if (deq == 0 && !in_interrupt())
  1730. xhci_warn(xhci, "WARN something wrong with SW event ring "
  1731. "dequeue ptr.\n");
  1732. /* Update HC event ring dequeue pointer */
  1733. temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  1734. temp &= ERST_PTR_MASK;
  1735. /* Don't clear the EHB bit (which is RW1C) because
  1736. * there might be more events to service.
  1737. */
  1738. temp &= ~ERST_EHB;
  1739. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1740. "// Write event ring dequeue pointer, "
  1741. "preserving EHB bit");
  1742. xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
  1743. &xhci->ir_set->erst_dequeue);
  1744. }
  1745. static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
  1746. __le32 __iomem *addr, u8 major_revision, int max_caps)
  1747. {
  1748. u32 temp, port_offset, port_count;
  1749. int i;
  1750. if (major_revision > 0x03) {
  1751. xhci_warn(xhci, "Ignoring unknown port speed, "
  1752. "Ext Cap %p, revision = 0x%x\n",
  1753. addr, major_revision);
  1754. /* Ignoring port protocol we can't understand. FIXME */
  1755. return;
  1756. }
  1757. /* Port offset and count in the third dword, see section 7.2 */
  1758. temp = xhci_readl(xhci, addr + 2);
  1759. port_offset = XHCI_EXT_PORT_OFF(temp);
  1760. port_count = XHCI_EXT_PORT_COUNT(temp);
  1761. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1762. "Ext Cap %p, port offset = %u, "
  1763. "count = %u, revision = 0x%x",
  1764. addr, port_offset, port_count, major_revision);
  1765. /* Port count includes the current port offset */
  1766. if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
  1767. /* WTF? "Valid values are ‘1’ to MaxPorts" */
  1768. return;
  1769. /* cache usb2 port capabilities */
  1770. if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
  1771. xhci->ext_caps[xhci->num_ext_caps++] = temp;
  1772. /* Check the host's USB2 LPM capability */
  1773. if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
  1774. (temp & XHCI_L1C)) {
  1775. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1776. "xHCI 0.96: support USB2 software lpm");
  1777. xhci->sw_lpm_support = 1;
  1778. }
  1779. if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
  1780. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1781. "xHCI 1.0: support USB2 software lpm");
  1782. xhci->sw_lpm_support = 1;
  1783. if (temp & XHCI_HLC) {
  1784. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1785. "xHCI 1.0: support USB2 hardware lpm");
  1786. xhci->hw_lpm_support = 1;
  1787. }
  1788. }
  1789. port_offset--;
  1790. for (i = port_offset; i < (port_offset + port_count); i++) {
  1791. /* Duplicate entry. Ignore the port if the revisions differ. */
  1792. if (xhci->port_array[i] != 0) {
  1793. xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
  1794. " port %u\n", addr, i);
  1795. xhci_warn(xhci, "Port was marked as USB %u, "
  1796. "duplicated as USB %u\n",
  1797. xhci->port_array[i], major_revision);
  1798. /* Only adjust the roothub port counts if we haven't
  1799. * found a similar duplicate.
  1800. */
  1801. if (xhci->port_array[i] != major_revision &&
  1802. xhci->port_array[i] != DUPLICATE_ENTRY) {
  1803. if (xhci->port_array[i] == 0x03)
  1804. xhci->num_usb3_ports--;
  1805. else
  1806. xhci->num_usb2_ports--;
  1807. xhci->port_array[i] = DUPLICATE_ENTRY;
  1808. }
  1809. /* FIXME: Should we disable the port? */
  1810. continue;
  1811. }
  1812. xhci->port_array[i] = major_revision;
  1813. if (major_revision == 0x03)
  1814. xhci->num_usb3_ports++;
  1815. else
  1816. xhci->num_usb2_ports++;
  1817. }
  1818. /* FIXME: Should we disable ports not in the Extended Capabilities? */
  1819. }
  1820. /*
  1821. * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
  1822. * specify what speeds each port is supposed to be. We can't count on the port
  1823. * speed bits in the PORTSC register being correct until a device is connected,
  1824. * but we need to set up the two fake roothubs with the correct number of USB
  1825. * 3.0 and USB 2.0 ports at host controller initialization time.
  1826. */
  1827. static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
  1828. {
  1829. __le32 __iomem *addr, *tmp_addr;
  1830. u32 offset, tmp_offset;
  1831. unsigned int num_ports;
  1832. int i, j, port_index;
  1833. int cap_count = 0;
  1834. addr = &xhci->cap_regs->hcc_params;
  1835. offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
  1836. if (offset == 0) {
  1837. xhci_err(xhci, "No Extended Capability registers, "
  1838. "unable to set up roothub.\n");
  1839. return -ENODEV;
  1840. }
  1841. num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1842. xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
  1843. if (!xhci->port_array)
  1844. return -ENOMEM;
  1845. xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
  1846. if (!xhci->rh_bw)
  1847. return -ENOMEM;
  1848. for (i = 0; i < num_ports; i++) {
  1849. struct xhci_interval_bw_table *bw_table;
  1850. INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
  1851. bw_table = &xhci->rh_bw[i].bw_table;
  1852. for (j = 0; j < XHCI_MAX_INTERVAL; j++)
  1853. INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
  1854. }
  1855. /*
  1856. * For whatever reason, the first capability offset is from the
  1857. * capability register base, not from the HCCPARAMS register.
  1858. * See section 5.3.6 for offset calculation.
  1859. */
  1860. addr = &xhci->cap_regs->hc_capbase + offset;
  1861. tmp_addr = addr;
  1862. tmp_offset = offset;
  1863. /* count extended protocol capability entries for later caching */
  1864. do {
  1865. u32 cap_id;
  1866. cap_id = xhci_readl(xhci, tmp_addr);
  1867. if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
  1868. cap_count++;
  1869. tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
  1870. tmp_addr += tmp_offset;
  1871. } while (tmp_offset);
  1872. xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
  1873. if (!xhci->ext_caps)
  1874. return -ENOMEM;
  1875. while (1) {
  1876. u32 cap_id;
  1877. cap_id = xhci_readl(xhci, addr);
  1878. if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
  1879. xhci_add_in_port(xhci, num_ports, addr,
  1880. (u8) XHCI_EXT_PORT_MAJOR(cap_id),
  1881. cap_count);
  1882. offset = XHCI_EXT_CAPS_NEXT(cap_id);
  1883. if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
  1884. == num_ports)
  1885. break;
  1886. /*
  1887. * Once you're into the Extended Capabilities, the offset is
  1888. * always relative to the register holding the offset.
  1889. */
  1890. addr += offset;
  1891. }
  1892. if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
  1893. xhci_warn(xhci, "No ports on the roothubs?\n");
  1894. return -ENODEV;
  1895. }
  1896. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1897. "Found %u USB 2.0 ports and %u USB 3.0 ports.",
  1898. xhci->num_usb2_ports, xhci->num_usb3_ports);
  1899. /* Place limits on the number of roothub ports so that the hub
  1900. * descriptors aren't longer than the USB core will allocate.
  1901. */
  1902. if (xhci->num_usb3_ports > 15) {
  1903. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1904. "Limiting USB 3.0 roothub ports to 15.");
  1905. xhci->num_usb3_ports = 15;
  1906. }
  1907. if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
  1908. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1909. "Limiting USB 2.0 roothub ports to %u.",
  1910. USB_MAXCHILDREN);
  1911. xhci->num_usb2_ports = USB_MAXCHILDREN;
  1912. }
  1913. /*
  1914. * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
  1915. * Not sure how the USB core will handle a hub with no ports...
  1916. */
  1917. if (xhci->num_usb2_ports) {
  1918. xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
  1919. xhci->num_usb2_ports, flags);
  1920. if (!xhci->usb2_ports)
  1921. return -ENOMEM;
  1922. port_index = 0;
  1923. for (i = 0; i < num_ports; i++) {
  1924. if (xhci->port_array[i] == 0x03 ||
  1925. xhci->port_array[i] == 0 ||
  1926. xhci->port_array[i] == DUPLICATE_ENTRY)
  1927. continue;
  1928. xhci->usb2_ports[port_index] =
  1929. &xhci->op_regs->port_status_base +
  1930. NUM_PORT_REGS*i;
  1931. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1932. "USB 2.0 port at index %u, "
  1933. "addr = %p", i,
  1934. xhci->usb2_ports[port_index]);
  1935. port_index++;
  1936. if (port_index == xhci->num_usb2_ports)
  1937. break;
  1938. }
  1939. }
  1940. if (xhci->num_usb3_ports) {
  1941. xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
  1942. xhci->num_usb3_ports, flags);
  1943. if (!xhci->usb3_ports)
  1944. return -ENOMEM;
  1945. port_index = 0;
  1946. for (i = 0; i < num_ports; i++)
  1947. if (xhci->port_array[i] == 0x03) {
  1948. xhci->usb3_ports[port_index] =
  1949. &xhci->op_regs->port_status_base +
  1950. NUM_PORT_REGS*i;
  1951. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1952. "USB 3.0 port at index %u, "
  1953. "addr = %p", i,
  1954. xhci->usb3_ports[port_index]);
  1955. port_index++;
  1956. if (port_index == xhci->num_usb3_ports)
  1957. break;
  1958. }
  1959. }
  1960. return 0;
  1961. }
  1962. int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
  1963. {
  1964. dma_addr_t dma;
  1965. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  1966. unsigned int val, val2;
  1967. u64 val_64;
  1968. struct xhci_segment *seg;
  1969. u32 page_size, temp;
  1970. int i;
  1971. INIT_LIST_HEAD(&xhci->lpm_failed_devs);
  1972. INIT_LIST_HEAD(&xhci->cancel_cmd_list);
  1973. page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
  1974. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1975. "Supported page size register = 0x%x", page_size);
  1976. for (i = 0; i < 16; i++) {
  1977. if ((0x1 & page_size) != 0)
  1978. break;
  1979. page_size = page_size >> 1;
  1980. }
  1981. if (i < 16)
  1982. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1983. "Supported page size of %iK", (1 << (i+12)) / 1024);
  1984. else
  1985. xhci_warn(xhci, "WARN: no supported page size\n");
  1986. /* Use 4K pages, since that's common and the minimum the HC supports */
  1987. xhci->page_shift = 12;
  1988. xhci->page_size = 1 << xhci->page_shift;
  1989. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1990. "HCD page size set to %iK", xhci->page_size / 1024);
  1991. /*
  1992. * Program the Number of Device Slots Enabled field in the CONFIG
  1993. * register with the max value of slots the HC can handle.
  1994. */
  1995. val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
  1996. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1997. "// xHC can handle at most %d device slots.", val);
  1998. val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
  1999. val |= (val2 & ~HCS_SLOTS_MASK);
  2000. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2001. "// Setting Max device slots reg = 0x%x.", val);
  2002. xhci_writel(xhci, val, &xhci->op_regs->config_reg);
  2003. /*
  2004. * Section 5.4.8 - doorbell array must be
  2005. * "physically contiguous and 64-byte (cache line) aligned".
  2006. */
  2007. xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
  2008. GFP_KERNEL);
  2009. if (!xhci->dcbaa)
  2010. goto fail;
  2011. memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
  2012. xhci->dcbaa->dma = dma;
  2013. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2014. "// Device context base array address = 0x%llx (DMA), %p (virt)",
  2015. (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
  2016. xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
  2017. /*
  2018. * Initialize the ring segment pool. The ring must be a contiguous
  2019. * structure comprised of TRBs. The TRBs must be 16 byte aligned,
  2020. * however, the command ring segment needs 64-byte aligned segments,
  2021. * so we pick the greater alignment need.
  2022. */
  2023. xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
  2024. TRB_SEGMENT_SIZE, 64, xhci->page_size);
  2025. /* See Table 46 and Note on Figure 55 */
  2026. xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
  2027. 2112, 64, xhci->page_size);
  2028. if (!xhci->segment_pool || !xhci->device_pool)
  2029. goto fail;
  2030. /* Linear stream context arrays don't have any boundary restrictions,
  2031. * and only need to be 16-byte aligned.
  2032. */
  2033. xhci->small_streams_pool =
  2034. dma_pool_create("xHCI 256 byte stream ctx arrays",
  2035. dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
  2036. xhci->medium_streams_pool =
  2037. dma_pool_create("xHCI 1KB stream ctx arrays",
  2038. dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
  2039. /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
  2040. * will be allocated with dma_alloc_coherent()
  2041. */
  2042. if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
  2043. goto fail;
  2044. /* Set up the command ring to have one segments for now. */
  2045. xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
  2046. if (!xhci->cmd_ring)
  2047. goto fail;
  2048. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2049. "Allocated command ring at %p", xhci->cmd_ring);
  2050. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
  2051. (unsigned long long)xhci->cmd_ring->first_seg->dma);
  2052. /* Set the address in the Command Ring Control register */
  2053. val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  2054. val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
  2055. (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
  2056. xhci->cmd_ring->cycle_state;
  2057. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2058. "// Setting command ring address to 0x%x", val);
  2059. xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
  2060. xhci_dbg_cmd_ptrs(xhci);
  2061. xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
  2062. if (!xhci->lpm_command)
  2063. goto fail;
  2064. /* Reserve one command ring TRB for disabling LPM.
  2065. * Since the USB core grabs the shared usb_bus bandwidth mutex before
  2066. * disabling LPM, we only need to reserve one TRB for all devices.
  2067. */
  2068. xhci->cmd_ring_reserved_trbs++;
  2069. val = xhci_readl(xhci, &xhci->cap_regs->db_off);
  2070. val &= DBOFF_MASK;
  2071. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2072. "// Doorbell array is located at offset 0x%x"
  2073. " from cap regs base addr", val);
  2074. xhci->dba = (void __iomem *) xhci->cap_regs + val;
  2075. xhci_dbg_regs(xhci);
  2076. xhci_print_run_regs(xhci);
  2077. /* Set ir_set to interrupt register set 0 */
  2078. xhci->ir_set = &xhci->run_regs->ir_set[0];
  2079. /*
  2080. * Event ring setup: Allocate a normal ring, but also setup
  2081. * the event ring segment table (ERST). Section 4.9.3.
  2082. */
  2083. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
  2084. xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
  2085. flags);
  2086. if (!xhci->event_ring)
  2087. goto fail;
  2088. if (xhci_check_trb_in_td_math(xhci, flags) < 0)
  2089. goto fail;
  2090. xhci->erst.entries = dma_alloc_coherent(dev,
  2091. sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
  2092. GFP_KERNEL);
  2093. if (!xhci->erst.entries)
  2094. goto fail;
  2095. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2096. "// Allocated event ring segment table at 0x%llx",
  2097. (unsigned long long)dma);
  2098. memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
  2099. xhci->erst.num_entries = ERST_NUM_SEGS;
  2100. xhci->erst.erst_dma_addr = dma;
  2101. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2102. "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
  2103. xhci->erst.num_entries,
  2104. xhci->erst.entries,
  2105. (unsigned long long)xhci->erst.erst_dma_addr);
  2106. /* set ring base address and size for each segment table entry */
  2107. for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
  2108. struct xhci_erst_entry *entry = &xhci->erst.entries[val];
  2109. entry->seg_addr = cpu_to_le64(seg->dma);
  2110. entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
  2111. entry->rsvd = 0;
  2112. seg = seg->next;
  2113. }
  2114. /* set ERST count with the number of entries in the segment table */
  2115. val = xhci_readl(xhci, &xhci->ir_set->erst_size);
  2116. val &= ERST_SIZE_MASK;
  2117. val |= ERST_NUM_SEGS;
  2118. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2119. "// Write ERST size = %i to ir_set 0 (some bits preserved)",
  2120. val);
  2121. xhci_writel(xhci, val, &xhci->ir_set->erst_size);
  2122. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2123. "// Set ERST entries to point to event ring.");
  2124. /* set the segment table base address */
  2125. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2126. "// Set ERST base address for ir_set 0 = 0x%llx",
  2127. (unsigned long long)xhci->erst.erst_dma_addr);
  2128. val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
  2129. val_64 &= ERST_PTR_MASK;
  2130. val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
  2131. xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
  2132. /* Set the event ring dequeue address */
  2133. xhci_set_hc_event_deq(xhci);
  2134. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2135. "Wrote ERST address to ir_set 0.");
  2136. xhci_print_ir_set(xhci, 0);
  2137. /*
  2138. * XXX: Might need to set the Interrupter Moderation Register to
  2139. * something other than the default (~1ms minimum between interrupts).
  2140. * See section 5.5.1.2.
  2141. */
  2142. init_completion(&xhci->addr_dev);
  2143. for (i = 0; i < MAX_HC_SLOTS; ++i)
  2144. xhci->devs[i] = NULL;
  2145. for (i = 0; i < USB_MAXCHILDREN; ++i) {
  2146. xhci->bus_state[0].resume_done[i] = 0;
  2147. xhci->bus_state[1].resume_done[i] = 0;
  2148. }
  2149. if (scratchpad_alloc(xhci, flags))
  2150. goto fail;
  2151. if (xhci_setup_port_arrays(xhci, flags))
  2152. goto fail;
  2153. /* Enable USB 3.0 device notifications for function remote wake, which
  2154. * is necessary for allowing USB 3.0 devices to do remote wakeup from
  2155. * U3 (device suspend).
  2156. */
  2157. temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
  2158. temp &= ~DEV_NOTE_MASK;
  2159. temp |= DEV_NOTE_FWAKE;
  2160. xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
  2161. return 0;
  2162. fail:
  2163. xhci_warn(xhci, "Couldn't initialize memory\n");
  2164. xhci_halt(xhci);
  2165. xhci_reset(xhci);
  2166. xhci_mem_cleanup(xhci);
  2167. return -ENOMEM;
  2168. }