xhci-mem.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/usb.h>
  23. #include <linux/pci.h>
  24. #include <linux/slab.h>
  25. #include <linux/dmapool.h>
  26. #include "xhci.h"
  27. #include "xhci-trace.h"
  28. /*
  29. * Allocates a generic ring segment from the ring pool, sets the dma address,
  30. * initializes the segment to zero, and sets the private next pointer to NULL.
  31. *
  32. * Section 4.11.1.1:
  33. * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  34. */
  35. static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
  36. unsigned int cycle_state, gfp_t flags)
  37. {
  38. struct xhci_segment *seg;
  39. dma_addr_t dma;
  40. int i;
  41. seg = kzalloc(sizeof *seg, flags);
  42. if (!seg)
  43. return NULL;
  44. seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
  45. if (!seg->trbs) {
  46. kfree(seg);
  47. return NULL;
  48. }
  49. memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
  50. /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
  51. if (cycle_state == 0) {
  52. for (i = 0; i < TRBS_PER_SEGMENT; i++)
  53. seg->trbs[i].link.control |= TRB_CYCLE;
  54. }
  55. seg->dma = dma;
  56. seg->next = NULL;
  57. return seg;
  58. }
  59. static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  60. {
  61. if (seg->trbs) {
  62. dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
  63. seg->trbs = NULL;
  64. }
  65. kfree(seg);
  66. }
  67. static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
  68. struct xhci_segment *first)
  69. {
  70. struct xhci_segment *seg;
  71. seg = first->next;
  72. while (seg != first) {
  73. struct xhci_segment *next = seg->next;
  74. xhci_segment_free(xhci, seg);
  75. seg = next;
  76. }
  77. xhci_segment_free(xhci, first);
  78. }
  79. /*
  80. * Make the prev segment point to the next segment.
  81. *
  82. * Change the last TRB in the prev segment to be a Link TRB which points to the
  83. * DMA address of the next segment. The caller needs to set any Link TRB
  84. * related flags, such as End TRB, Toggle Cycle, and no snoop.
  85. */
  86. static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
  87. struct xhci_segment *next, enum xhci_ring_type type)
  88. {
  89. u32 val;
  90. if (!prev || !next)
  91. return;
  92. prev->next = next;
  93. if (type != TYPE_EVENT) {
  94. prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
  95. cpu_to_le64(next->dma);
  96. /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
  97. val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
  98. val &= ~TRB_TYPE_BITMASK;
  99. val |= TRB_TYPE(TRB_LINK);
  100. /* Always set the chain bit with 0.95 hardware */
  101. /* Set chain bit for isoc rings on AMD 0.96 host */
  102. if (xhci_link_trb_quirk(xhci) ||
  103. (type == TYPE_ISOC &&
  104. (xhci->quirks & XHCI_AMD_0x96_HOST)))
  105. val |= TRB_CHAIN;
  106. prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
  107. }
  108. }
  109. /*
  110. * Link the ring to the new segments.
  111. * Set Toggle Cycle for the new ring if needed.
  112. */
  113. static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
  114. struct xhci_segment *first, struct xhci_segment *last,
  115. unsigned int num_segs)
  116. {
  117. struct xhci_segment *next;
  118. if (!ring || !first || !last)
  119. return;
  120. next = ring->enq_seg->next;
  121. xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
  122. xhci_link_segments(xhci, last, next, ring->type);
  123. ring->num_segs += num_segs;
  124. ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
  125. if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
  126. ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
  127. &= ~cpu_to_le32(LINK_TOGGLE);
  128. last->trbs[TRBS_PER_SEGMENT-1].link.control
  129. |= cpu_to_le32(LINK_TOGGLE);
  130. ring->last_seg = last;
  131. }
  132. }
  133. /* XXX: Do we need the hcd structure in all these functions? */
  134. void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
  135. {
  136. if (!ring)
  137. return;
  138. if (ring->first_seg)
  139. xhci_free_segments_for_ring(xhci, ring->first_seg);
  140. kfree(ring);
  141. }
  142. static void xhci_initialize_ring_info(struct xhci_ring *ring,
  143. unsigned int cycle_state)
  144. {
  145. /* The ring is empty, so the enqueue pointer == dequeue pointer */
  146. ring->enqueue = ring->first_seg->trbs;
  147. ring->enq_seg = ring->first_seg;
  148. ring->dequeue = ring->enqueue;
  149. ring->deq_seg = ring->first_seg;
  150. /* The ring is initialized to 0. The producer must write 1 to the cycle
  151. * bit to handover ownership of the TRB, so PCS = 1. The consumer must
  152. * compare CCS to the cycle bit to check ownership, so CCS = 1.
  153. *
  154. * New rings are initialized with cycle state equal to 1; if we are
  155. * handling ring expansion, set the cycle state equal to the old ring.
  156. */
  157. ring->cycle_state = cycle_state;
  158. /* Not necessary for new rings, but needed for re-initialized rings */
  159. ring->enq_updates = 0;
  160. ring->deq_updates = 0;
  161. /*
  162. * Each segment has a link TRB, and leave an extra TRB for SW
  163. * accounting purpose
  164. */
  165. ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
  166. }
  167. /* Allocate segments and link them for a ring */
  168. static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
  169. struct xhci_segment **first, struct xhci_segment **last,
  170. unsigned int num_segs, unsigned int cycle_state,
  171. enum xhci_ring_type type, gfp_t flags)
  172. {
  173. struct xhci_segment *prev;
  174. prev = xhci_segment_alloc(xhci, cycle_state, flags);
  175. if (!prev)
  176. return -ENOMEM;
  177. num_segs--;
  178. *first = prev;
  179. while (num_segs > 0) {
  180. struct xhci_segment *next;
  181. next = xhci_segment_alloc(xhci, cycle_state, flags);
  182. if (!next) {
  183. prev = *first;
  184. while (prev) {
  185. next = prev->next;
  186. xhci_segment_free(xhci, prev);
  187. prev = next;
  188. }
  189. return -ENOMEM;
  190. }
  191. xhci_link_segments(xhci, prev, next, type);
  192. prev = next;
  193. num_segs--;
  194. }
  195. xhci_link_segments(xhci, prev, *first, type);
  196. *last = prev;
  197. return 0;
  198. }
  199. /**
  200. * Create a new ring with zero or more segments.
  201. *
  202. * Link each segment together into a ring.
  203. * Set the end flag and the cycle toggle bit on the last segment.
  204. * See section 4.9.1 and figures 15 and 16.
  205. */
  206. static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
  207. unsigned int num_segs, unsigned int cycle_state,
  208. enum xhci_ring_type type, gfp_t flags)
  209. {
  210. struct xhci_ring *ring;
  211. int ret;
  212. ring = kzalloc(sizeof *(ring), flags);
  213. if (!ring)
  214. return NULL;
  215. ring->num_segs = num_segs;
  216. INIT_LIST_HEAD(&ring->td_list);
  217. ring->type = type;
  218. if (num_segs == 0)
  219. return ring;
  220. ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
  221. &ring->last_seg, num_segs, cycle_state, type, flags);
  222. if (ret)
  223. goto fail;
  224. /* Only event ring does not use link TRB */
  225. if (type != TYPE_EVENT) {
  226. /* See section 4.9.2.1 and 6.4.4.1 */
  227. ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
  228. cpu_to_le32(LINK_TOGGLE);
  229. }
  230. xhci_initialize_ring_info(ring, cycle_state);
  231. return ring;
  232. fail:
  233. kfree(ring);
  234. return NULL;
  235. }
  236. void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
  237. struct xhci_virt_device *virt_dev,
  238. unsigned int ep_index)
  239. {
  240. int rings_cached;
  241. rings_cached = virt_dev->num_rings_cached;
  242. if (rings_cached < XHCI_MAX_RINGS_CACHED) {
  243. virt_dev->ring_cache[rings_cached] =
  244. virt_dev->eps[ep_index].ring;
  245. virt_dev->num_rings_cached++;
  246. xhci_dbg(xhci, "Cached old ring, "
  247. "%d ring%s cached\n",
  248. virt_dev->num_rings_cached,
  249. (virt_dev->num_rings_cached > 1) ? "s" : "");
  250. } else {
  251. xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
  252. xhci_dbg(xhci, "Ring cache full (%d rings), "
  253. "freeing ring\n",
  254. virt_dev->num_rings_cached);
  255. }
  256. virt_dev->eps[ep_index].ring = NULL;
  257. }
  258. /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
  259. * pointers to the beginning of the ring.
  260. */
  261. static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
  262. struct xhci_ring *ring, unsigned int cycle_state,
  263. enum xhci_ring_type type)
  264. {
  265. struct xhci_segment *seg = ring->first_seg;
  266. int i;
  267. do {
  268. memset(seg->trbs, 0,
  269. sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
  270. if (cycle_state == 0) {
  271. for (i = 0; i < TRBS_PER_SEGMENT; i++)
  272. seg->trbs[i].link.control |= TRB_CYCLE;
  273. }
  274. /* All endpoint rings have link TRBs */
  275. xhci_link_segments(xhci, seg, seg->next, type);
  276. seg = seg->next;
  277. } while (seg != ring->first_seg);
  278. ring->type = type;
  279. xhci_initialize_ring_info(ring, cycle_state);
  280. /* td list should be empty since all URBs have been cancelled,
  281. * but just in case...
  282. */
  283. INIT_LIST_HEAD(&ring->td_list);
  284. }
  285. /*
  286. * Expand an existing ring.
  287. * Look for a cached ring or allocate a new ring which has same segment numbers
  288. * and link the two rings.
  289. */
  290. int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
  291. unsigned int num_trbs, gfp_t flags)
  292. {
  293. struct xhci_segment *first;
  294. struct xhci_segment *last;
  295. unsigned int num_segs;
  296. unsigned int num_segs_needed;
  297. int ret;
  298. num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
  299. (TRBS_PER_SEGMENT - 1);
  300. /* Allocate number of segments we needed, or double the ring size */
  301. num_segs = ring->num_segs > num_segs_needed ?
  302. ring->num_segs : num_segs_needed;
  303. ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
  304. num_segs, ring->cycle_state, ring->type, flags);
  305. if (ret)
  306. return -ENOMEM;
  307. xhci_link_rings(xhci, ring, first, last, num_segs);
  308. xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
  309. "ring expansion succeed, now has %d segments",
  310. ring->num_segs);
  311. return 0;
  312. }
  313. #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
  314. static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
  315. int type, gfp_t flags)
  316. {
  317. struct xhci_container_ctx *ctx;
  318. if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
  319. return NULL;
  320. ctx = kzalloc(sizeof(*ctx), flags);
  321. if (!ctx)
  322. return NULL;
  323. ctx->type = type;
  324. ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
  325. if (type == XHCI_CTX_TYPE_INPUT)
  326. ctx->size += CTX_SIZE(xhci->hcc_params);
  327. ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
  328. if (!ctx->bytes) {
  329. kfree(ctx);
  330. return NULL;
  331. }
  332. memset(ctx->bytes, 0, ctx->size);
  333. return ctx;
  334. }
  335. static void xhci_free_container_ctx(struct xhci_hcd *xhci,
  336. struct xhci_container_ctx *ctx)
  337. {
  338. if (!ctx)
  339. return;
  340. dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
  341. kfree(ctx);
  342. }
  343. struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
  344. struct xhci_container_ctx *ctx)
  345. {
  346. if (ctx->type != XHCI_CTX_TYPE_INPUT)
  347. return NULL;
  348. return (struct xhci_input_control_ctx *)ctx->bytes;
  349. }
  350. struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
  351. struct xhci_container_ctx *ctx)
  352. {
  353. if (ctx->type == XHCI_CTX_TYPE_DEVICE)
  354. return (struct xhci_slot_ctx *)ctx->bytes;
  355. return (struct xhci_slot_ctx *)
  356. (ctx->bytes + CTX_SIZE(xhci->hcc_params));
  357. }
  358. struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
  359. struct xhci_container_ctx *ctx,
  360. unsigned int ep_index)
  361. {
  362. /* increment ep index by offset of start of ep ctx array */
  363. ep_index++;
  364. if (ctx->type == XHCI_CTX_TYPE_INPUT)
  365. ep_index++;
  366. return (struct xhci_ep_ctx *)
  367. (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
  368. }
  369. /***************** Streams structures manipulation *************************/
  370. static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
  371. unsigned int num_stream_ctxs,
  372. struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
  373. {
  374. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  375. if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
  376. dma_free_coherent(&pdev->dev,
  377. sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
  378. stream_ctx, dma);
  379. else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
  380. return dma_pool_free(xhci->small_streams_pool,
  381. stream_ctx, dma);
  382. else
  383. return dma_pool_free(xhci->medium_streams_pool,
  384. stream_ctx, dma);
  385. }
  386. /*
  387. * The stream context array for each endpoint with bulk streams enabled can
  388. * vary in size, based on:
  389. * - how many streams the endpoint supports,
  390. * - the maximum primary stream array size the host controller supports,
  391. * - and how many streams the device driver asks for.
  392. *
  393. * The stream context array must be a power of 2, and can be as small as
  394. * 64 bytes or as large as 1MB.
  395. */
  396. static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
  397. unsigned int num_stream_ctxs, dma_addr_t *dma,
  398. gfp_t mem_flags)
  399. {
  400. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  401. if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
  402. return dma_alloc_coherent(&pdev->dev,
  403. sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
  404. dma, mem_flags);
  405. else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
  406. return dma_pool_alloc(xhci->small_streams_pool,
  407. mem_flags, dma);
  408. else
  409. return dma_pool_alloc(xhci->medium_streams_pool,
  410. mem_flags, dma);
  411. }
  412. struct xhci_ring *xhci_dma_to_transfer_ring(
  413. struct xhci_virt_ep *ep,
  414. u64 address)
  415. {
  416. if (ep->ep_state & EP_HAS_STREAMS)
  417. return radix_tree_lookup(&ep->stream_info->trb_address_map,
  418. address >> TRB_SEGMENT_SHIFT);
  419. return ep->ring;
  420. }
  421. struct xhci_ring *xhci_stream_id_to_ring(
  422. struct xhci_virt_device *dev,
  423. unsigned int ep_index,
  424. unsigned int stream_id)
  425. {
  426. struct xhci_virt_ep *ep = &dev->eps[ep_index];
  427. if (stream_id == 0)
  428. return ep->ring;
  429. if (!ep->stream_info)
  430. return NULL;
  431. if (stream_id > ep->stream_info->num_streams)
  432. return NULL;
  433. return ep->stream_info->stream_rings[stream_id];
  434. }
  435. /*
  436. * Change an endpoint's internal structure so it supports stream IDs. The
  437. * number of requested streams includes stream 0, which cannot be used by device
  438. * drivers.
  439. *
  440. * The number of stream contexts in the stream context array may be bigger than
  441. * the number of streams the driver wants to use. This is because the number of
  442. * stream context array entries must be a power of two.
  443. *
  444. * We need a radix tree for mapping physical addresses of TRBs to which stream
  445. * ID they belong to. We need to do this because the host controller won't tell
  446. * us which stream ring the TRB came from. We could store the stream ID in an
  447. * event data TRB, but that doesn't help us for the cancellation case, since the
  448. * endpoint may stop before it reaches that event data TRB.
  449. *
  450. * The radix tree maps the upper portion of the TRB DMA address to a ring
  451. * segment that has the same upper portion of DMA addresses. For example, say I
  452. * have segments of size 1KB, that are always 64-byte aligned. A segment may
  453. * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
  454. * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
  455. * pass the radix tree a key to get the right stream ID:
  456. *
  457. * 0x10c90fff >> 10 = 0x43243
  458. * 0x10c912c0 >> 10 = 0x43244
  459. * 0x10c91400 >> 10 = 0x43245
  460. *
  461. * Obviously, only those TRBs with DMA addresses that are within the segment
  462. * will make the radix tree return the stream ID for that ring.
  463. *
  464. * Caveats for the radix tree:
  465. *
  466. * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
  467. * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
  468. * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
  469. * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
  470. * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
  471. * extended systems (where the DMA address can be bigger than 32-bits),
  472. * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
  473. */
  474. struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
  475. unsigned int num_stream_ctxs,
  476. unsigned int num_streams, gfp_t mem_flags)
  477. {
  478. struct xhci_stream_info *stream_info;
  479. u32 cur_stream;
  480. struct xhci_ring *cur_ring;
  481. unsigned long key;
  482. u64 addr;
  483. int ret;
  484. xhci_dbg(xhci, "Allocating %u streams and %u "
  485. "stream context array entries.\n",
  486. num_streams, num_stream_ctxs);
  487. if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
  488. xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
  489. return NULL;
  490. }
  491. xhci->cmd_ring_reserved_trbs++;
  492. stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
  493. if (!stream_info)
  494. goto cleanup_trbs;
  495. stream_info->num_streams = num_streams;
  496. stream_info->num_stream_ctxs = num_stream_ctxs;
  497. /* Initialize the array of virtual pointers to stream rings. */
  498. stream_info->stream_rings = kzalloc(
  499. sizeof(struct xhci_ring *)*num_streams,
  500. mem_flags);
  501. if (!stream_info->stream_rings)
  502. goto cleanup_info;
  503. /* Initialize the array of DMA addresses for stream rings for the HW. */
  504. stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
  505. num_stream_ctxs, &stream_info->ctx_array_dma,
  506. mem_flags);
  507. if (!stream_info->stream_ctx_array)
  508. goto cleanup_ctx;
  509. memset(stream_info->stream_ctx_array, 0,
  510. sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
  511. /* Allocate everything needed to free the stream rings later */
  512. stream_info->free_streams_command =
  513. xhci_alloc_command(xhci, true, true, mem_flags);
  514. if (!stream_info->free_streams_command)
  515. goto cleanup_ctx;
  516. INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
  517. /* Allocate rings for all the streams that the driver will use,
  518. * and add their segment DMA addresses to the radix tree.
  519. * Stream 0 is reserved.
  520. */
  521. for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
  522. stream_info->stream_rings[cur_stream] =
  523. xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
  524. cur_ring = stream_info->stream_rings[cur_stream];
  525. if (!cur_ring)
  526. goto cleanup_rings;
  527. cur_ring->stream_id = cur_stream;
  528. /* Set deq ptr, cycle bit, and stream context type */
  529. addr = cur_ring->first_seg->dma |
  530. SCT_FOR_CTX(SCT_PRI_TR) |
  531. cur_ring->cycle_state;
  532. stream_info->stream_ctx_array[cur_stream].stream_ring =
  533. cpu_to_le64(addr);
  534. xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
  535. cur_stream, (unsigned long long) addr);
  536. key = (unsigned long)
  537. (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
  538. ret = radix_tree_insert(&stream_info->trb_address_map,
  539. key, cur_ring);
  540. if (ret) {
  541. xhci_ring_free(xhci, cur_ring);
  542. stream_info->stream_rings[cur_stream] = NULL;
  543. goto cleanup_rings;
  544. }
  545. }
  546. /* Leave the other unused stream ring pointers in the stream context
  547. * array initialized to zero. This will cause the xHC to give us an
  548. * error if the device asks for a stream ID we don't have setup (if it
  549. * was any other way, the host controller would assume the ring is
  550. * "empty" and wait forever for data to be queued to that stream ID).
  551. */
  552. return stream_info;
  553. cleanup_rings:
  554. for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
  555. cur_ring = stream_info->stream_rings[cur_stream];
  556. if (cur_ring) {
  557. addr = cur_ring->first_seg->dma;
  558. radix_tree_delete(&stream_info->trb_address_map,
  559. addr >> TRB_SEGMENT_SHIFT);
  560. xhci_ring_free(xhci, cur_ring);
  561. stream_info->stream_rings[cur_stream] = NULL;
  562. }
  563. }
  564. xhci_free_command(xhci, stream_info->free_streams_command);
  565. cleanup_ctx:
  566. kfree(stream_info->stream_rings);
  567. cleanup_info:
  568. kfree(stream_info);
  569. cleanup_trbs:
  570. xhci->cmd_ring_reserved_trbs--;
  571. return NULL;
  572. }
  573. /*
  574. * Sets the MaxPStreams field and the Linear Stream Array field.
  575. * Sets the dequeue pointer to the stream context array.
  576. */
  577. void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
  578. struct xhci_ep_ctx *ep_ctx,
  579. struct xhci_stream_info *stream_info)
  580. {
  581. u32 max_primary_streams;
  582. /* MaxPStreams is the number of stream context array entries, not the
  583. * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
  584. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
  585. */
  586. max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
  587. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  588. "Setting number of stream ctx array entries to %u",
  589. 1 << (max_primary_streams + 1));
  590. ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
  591. ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
  592. | EP_HAS_LSA);
  593. ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
  594. }
  595. /*
  596. * Sets the MaxPStreams field and the Linear Stream Array field to 0.
  597. * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
  598. * not at the beginning of the ring).
  599. */
  600. void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
  601. struct xhci_ep_ctx *ep_ctx,
  602. struct xhci_virt_ep *ep)
  603. {
  604. dma_addr_t addr;
  605. ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
  606. addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
  607. ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
  608. }
  609. /* Frees all stream contexts associated with the endpoint,
  610. *
  611. * Caller should fix the endpoint context streams fields.
  612. */
  613. void xhci_free_stream_info(struct xhci_hcd *xhci,
  614. struct xhci_stream_info *stream_info)
  615. {
  616. int cur_stream;
  617. struct xhci_ring *cur_ring;
  618. dma_addr_t addr;
  619. if (!stream_info)
  620. return;
  621. for (cur_stream = 1; cur_stream < stream_info->num_streams;
  622. cur_stream++) {
  623. cur_ring = stream_info->stream_rings[cur_stream];
  624. if (cur_ring) {
  625. addr = cur_ring->first_seg->dma;
  626. radix_tree_delete(&stream_info->trb_address_map,
  627. addr >> TRB_SEGMENT_SHIFT);
  628. xhci_ring_free(xhci, cur_ring);
  629. stream_info->stream_rings[cur_stream] = NULL;
  630. }
  631. }
  632. xhci_free_command(xhci, stream_info->free_streams_command);
  633. xhci->cmd_ring_reserved_trbs--;
  634. if (stream_info->stream_ctx_array)
  635. xhci_free_stream_ctx(xhci,
  636. stream_info->num_stream_ctxs,
  637. stream_info->stream_ctx_array,
  638. stream_info->ctx_array_dma);
  639. if (stream_info)
  640. kfree(stream_info->stream_rings);
  641. kfree(stream_info);
  642. }
  643. /***************** Device context manipulation *************************/
  644. static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
  645. struct xhci_virt_ep *ep)
  646. {
  647. init_timer(&ep->stop_cmd_timer);
  648. ep->stop_cmd_timer.data = (unsigned long) ep;
  649. ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
  650. ep->xhci = xhci;
  651. }
  652. static void xhci_free_tt_info(struct xhci_hcd *xhci,
  653. struct xhci_virt_device *virt_dev,
  654. int slot_id)
  655. {
  656. struct list_head *tt_list_head;
  657. struct xhci_tt_bw_info *tt_info, *next;
  658. bool slot_found = false;
  659. /* If the device never made it past the Set Address stage,
  660. * it may not have the real_port set correctly.
  661. */
  662. if (virt_dev->real_port == 0 ||
  663. virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
  664. xhci_dbg(xhci, "Bad real port.\n");
  665. return;
  666. }
  667. tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
  668. list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
  669. /* Multi-TT hubs will have more than one entry */
  670. if (tt_info->slot_id == slot_id) {
  671. slot_found = true;
  672. list_del(&tt_info->tt_list);
  673. kfree(tt_info);
  674. } else if (slot_found) {
  675. break;
  676. }
  677. }
  678. }
  679. int xhci_alloc_tt_info(struct xhci_hcd *xhci,
  680. struct xhci_virt_device *virt_dev,
  681. struct usb_device *hdev,
  682. struct usb_tt *tt, gfp_t mem_flags)
  683. {
  684. struct xhci_tt_bw_info *tt_info;
  685. unsigned int num_ports;
  686. int i, j;
  687. if (!tt->multi)
  688. num_ports = 1;
  689. else
  690. num_ports = hdev->maxchild;
  691. for (i = 0; i < num_ports; i++, tt_info++) {
  692. struct xhci_interval_bw_table *bw_table;
  693. tt_info = kzalloc(sizeof(*tt_info), mem_flags);
  694. if (!tt_info)
  695. goto free_tts;
  696. INIT_LIST_HEAD(&tt_info->tt_list);
  697. list_add(&tt_info->tt_list,
  698. &xhci->rh_bw[virt_dev->real_port - 1].tts);
  699. tt_info->slot_id = virt_dev->udev->slot_id;
  700. if (tt->multi)
  701. tt_info->ttport = i+1;
  702. bw_table = &tt_info->bw_table;
  703. for (j = 0; j < XHCI_MAX_INTERVAL; j++)
  704. INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
  705. }
  706. return 0;
  707. free_tts:
  708. xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
  709. return -ENOMEM;
  710. }
  711. /* All the xhci_tds in the ring's TD list should be freed at this point.
  712. * Should be called with xhci->lock held if there is any chance the TT lists
  713. * will be manipulated by the configure endpoint, allocate device, or update
  714. * hub functions while this function is removing the TT entries from the list.
  715. */
  716. void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
  717. {
  718. struct xhci_virt_device *dev;
  719. int i;
  720. int old_active_eps = 0;
  721. /* Slot ID 0 is reserved */
  722. if (slot_id == 0 || !xhci->devs[slot_id])
  723. return;
  724. dev = xhci->devs[slot_id];
  725. xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
  726. if (!dev)
  727. return;
  728. if (dev->tt_info)
  729. old_active_eps = dev->tt_info->active_eps;
  730. for (i = 0; i < 31; ++i) {
  731. if (dev->eps[i].ring)
  732. xhci_ring_free(xhci, dev->eps[i].ring);
  733. if (dev->eps[i].stream_info)
  734. xhci_free_stream_info(xhci,
  735. dev->eps[i].stream_info);
  736. /* Endpoints on the TT/root port lists should have been removed
  737. * when usb_disable_device() was called for the device.
  738. * We can't drop them anyway, because the udev might have gone
  739. * away by this point, and we can't tell what speed it was.
  740. */
  741. if (!list_empty(&dev->eps[i].bw_endpoint_list))
  742. xhci_warn(xhci, "Slot %u endpoint %u "
  743. "not removed from BW list!\n",
  744. slot_id, i);
  745. }
  746. /* If this is a hub, free the TT(s) from the TT list */
  747. xhci_free_tt_info(xhci, dev, slot_id);
  748. /* If necessary, update the number of active TTs on this root port */
  749. xhci_update_tt_active_eps(xhci, dev, old_active_eps);
  750. if (dev->ring_cache) {
  751. for (i = 0; i < dev->num_rings_cached; i++)
  752. xhci_ring_free(xhci, dev->ring_cache[i]);
  753. kfree(dev->ring_cache);
  754. }
  755. if (dev->in_ctx)
  756. xhci_free_container_ctx(xhci, dev->in_ctx);
  757. if (dev->out_ctx)
  758. xhci_free_container_ctx(xhci, dev->out_ctx);
  759. kfree(xhci->devs[slot_id]);
  760. xhci->devs[slot_id] = NULL;
  761. }
  762. int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
  763. struct usb_device *udev, gfp_t flags)
  764. {
  765. struct xhci_virt_device *dev;
  766. int i;
  767. /* Slot ID 0 is reserved */
  768. if (slot_id == 0 || xhci->devs[slot_id]) {
  769. xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
  770. return 0;
  771. }
  772. xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
  773. if (!xhci->devs[slot_id])
  774. return 0;
  775. dev = xhci->devs[slot_id];
  776. /* Allocate the (output) device context that will be used in the HC. */
  777. dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
  778. if (!dev->out_ctx)
  779. goto fail;
  780. xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
  781. (unsigned long long)dev->out_ctx->dma);
  782. /* Allocate the (input) device context for address device command */
  783. dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
  784. if (!dev->in_ctx)
  785. goto fail;
  786. xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
  787. (unsigned long long)dev->in_ctx->dma);
  788. /* Initialize the cancellation list and watchdog timers for each ep */
  789. for (i = 0; i < 31; i++) {
  790. xhci_init_endpoint_timer(xhci, &dev->eps[i]);
  791. INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
  792. INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
  793. }
  794. /* Allocate endpoint 0 ring */
  795. dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
  796. if (!dev->eps[0].ring)
  797. goto fail;
  798. /* Allocate pointers to the ring cache */
  799. dev->ring_cache = kzalloc(
  800. sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
  801. flags);
  802. if (!dev->ring_cache)
  803. goto fail;
  804. dev->num_rings_cached = 0;
  805. init_completion(&dev->cmd_completion);
  806. INIT_LIST_HEAD(&dev->cmd_list);
  807. dev->udev = udev;
  808. /* Point to output device context in dcbaa. */
  809. xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
  810. xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
  811. slot_id,
  812. &xhci->dcbaa->dev_context_ptrs[slot_id],
  813. le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
  814. return 1;
  815. fail:
  816. xhci_free_virt_device(xhci, slot_id);
  817. return 0;
  818. }
  819. void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
  820. struct usb_device *udev)
  821. {
  822. struct xhci_virt_device *virt_dev;
  823. struct xhci_ep_ctx *ep0_ctx;
  824. struct xhci_ring *ep_ring;
  825. virt_dev = xhci->devs[udev->slot_id];
  826. ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
  827. ep_ring = virt_dev->eps[0].ring;
  828. /*
  829. * FIXME we don't keep track of the dequeue pointer very well after a
  830. * Set TR dequeue pointer, so we're setting the dequeue pointer of the
  831. * host to our enqueue pointer. This should only be called after a
  832. * configured device has reset, so all control transfers should have
  833. * been completed or cancelled before the reset.
  834. */
  835. ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
  836. ep_ring->enqueue)
  837. | ep_ring->cycle_state);
  838. }
  839. /*
  840. * The xHCI roothub may have ports of differing speeds in any order in the port
  841. * status registers. xhci->port_array provides an array of the port speed for
  842. * each offset into the port status registers.
  843. *
  844. * The xHCI hardware wants to know the roothub port number that the USB device
  845. * is attached to (or the roothub port its ancestor hub is attached to). All we
  846. * know is the index of that port under either the USB 2.0 or the USB 3.0
  847. * roothub, but that doesn't give us the real index into the HW port status
  848. * registers. Call xhci_find_raw_port_number() to get real index.
  849. */
  850. static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
  851. struct usb_device *udev)
  852. {
  853. struct usb_device *top_dev;
  854. struct usb_hcd *hcd;
  855. if (udev->speed == USB_SPEED_SUPER)
  856. hcd = xhci->shared_hcd;
  857. else
  858. hcd = xhci->main_hcd;
  859. for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
  860. top_dev = top_dev->parent)
  861. /* Found device below root hub */;
  862. return xhci_find_raw_port_number(hcd, top_dev->portnum);
  863. }
  864. /* Setup an xHCI virtual device for a Set Address command */
  865. int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
  866. {
  867. struct xhci_virt_device *dev;
  868. struct xhci_ep_ctx *ep0_ctx;
  869. struct xhci_slot_ctx *slot_ctx;
  870. u32 port_num;
  871. u32 max_packets;
  872. struct usb_device *top_dev;
  873. dev = xhci->devs[udev->slot_id];
  874. /* Slot ID 0 is reserved */
  875. if (udev->slot_id == 0 || !dev) {
  876. xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
  877. udev->slot_id);
  878. return -EINVAL;
  879. }
  880. ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
  881. slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
  882. /* 3) Only the control endpoint is valid - one endpoint context */
  883. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
  884. switch (udev->speed) {
  885. case USB_SPEED_SUPER:
  886. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
  887. max_packets = MAX_PACKET(512);
  888. break;
  889. case USB_SPEED_HIGH:
  890. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
  891. max_packets = MAX_PACKET(64);
  892. break;
  893. /* USB core guesses at a 64-byte max packet first for FS devices */
  894. case USB_SPEED_FULL:
  895. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
  896. max_packets = MAX_PACKET(64);
  897. break;
  898. case USB_SPEED_LOW:
  899. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
  900. max_packets = MAX_PACKET(8);
  901. break;
  902. case USB_SPEED_WIRELESS:
  903. xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
  904. return -EINVAL;
  905. break;
  906. default:
  907. /* Speed was set earlier, this shouldn't happen. */
  908. return -EINVAL;
  909. }
  910. /* Find the root hub port this device is under */
  911. port_num = xhci_find_real_port_number(xhci, udev);
  912. if (!port_num)
  913. return -EINVAL;
  914. slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
  915. /* Set the port number in the virtual_device to the faked port number */
  916. for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
  917. top_dev = top_dev->parent)
  918. /* Found device below root hub */;
  919. dev->fake_port = top_dev->portnum;
  920. dev->real_port = port_num;
  921. xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
  922. xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
  923. /* Find the right bandwidth table that this device will be a part of.
  924. * If this is a full speed device attached directly to a root port (or a
  925. * decendent of one), it counts as a primary bandwidth domain, not a
  926. * secondary bandwidth domain under a TT. An xhci_tt_info structure
  927. * will never be created for the HS root hub.
  928. */
  929. if (!udev->tt || !udev->tt->hub->parent) {
  930. dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
  931. } else {
  932. struct xhci_root_port_bw_info *rh_bw;
  933. struct xhci_tt_bw_info *tt_bw;
  934. rh_bw = &xhci->rh_bw[port_num - 1];
  935. /* Find the right TT. */
  936. list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
  937. if (tt_bw->slot_id != udev->tt->hub->slot_id)
  938. continue;
  939. if (!dev->udev->tt->multi ||
  940. (udev->tt->multi &&
  941. tt_bw->ttport == dev->udev->ttport)) {
  942. dev->bw_table = &tt_bw->bw_table;
  943. dev->tt_info = tt_bw;
  944. break;
  945. }
  946. }
  947. if (!dev->tt_info)
  948. xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
  949. }
  950. /* Is this a LS/FS device under an external HS hub? */
  951. if (udev->tt && udev->tt->hub->parent) {
  952. slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
  953. (udev->ttport << 8));
  954. if (udev->tt->multi)
  955. slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
  956. }
  957. xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
  958. xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
  959. /* Step 4 - ring already allocated */
  960. /* Step 5 */
  961. ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
  962. /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
  963. ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
  964. max_packets);
  965. ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
  966. dev->eps[0].ring->cycle_state);
  967. /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
  968. return 0;
  969. }
  970. /*
  971. * Convert interval expressed as 2^(bInterval - 1) == interval into
  972. * straight exponent value 2^n == interval.
  973. *
  974. */
  975. static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
  976. struct usb_host_endpoint *ep)
  977. {
  978. unsigned int interval;
  979. interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
  980. if (interval != ep->desc.bInterval - 1)
  981. dev_warn(&udev->dev,
  982. "ep %#x - rounding interval to %d %sframes\n",
  983. ep->desc.bEndpointAddress,
  984. 1 << interval,
  985. udev->speed == USB_SPEED_FULL ? "" : "micro");
  986. if (udev->speed == USB_SPEED_FULL) {
  987. /*
  988. * Full speed isoc endpoints specify interval in frames,
  989. * not microframes. We are using microframes everywhere,
  990. * so adjust accordingly.
  991. */
  992. interval += 3; /* 1 frame = 2^3 uframes */
  993. }
  994. return interval;
  995. }
  996. /*
  997. * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
  998. * microframes, rounded down to nearest power of 2.
  999. */
  1000. static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
  1001. struct usb_host_endpoint *ep, unsigned int desc_interval,
  1002. unsigned int min_exponent, unsigned int max_exponent)
  1003. {
  1004. unsigned int interval;
  1005. interval = fls(desc_interval) - 1;
  1006. interval = clamp_val(interval, min_exponent, max_exponent);
  1007. if ((1 << interval) != desc_interval)
  1008. dev_warn(&udev->dev,
  1009. "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
  1010. ep->desc.bEndpointAddress,
  1011. 1 << interval,
  1012. desc_interval);
  1013. return interval;
  1014. }
  1015. static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
  1016. struct usb_host_endpoint *ep)
  1017. {
  1018. if (ep->desc.bInterval == 0)
  1019. return 0;
  1020. return xhci_microframes_to_exponent(udev, ep,
  1021. ep->desc.bInterval, 0, 15);
  1022. }
  1023. static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
  1024. struct usb_host_endpoint *ep)
  1025. {
  1026. return xhci_microframes_to_exponent(udev, ep,
  1027. ep->desc.bInterval * 8, 3, 10);
  1028. }
  1029. /* Return the polling or NAK interval.
  1030. *
  1031. * The polling interval is expressed in "microframes". If xHCI's Interval field
  1032. * is set to N, it will service the endpoint every 2^(Interval)*125us.
  1033. *
  1034. * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
  1035. * is set to 0.
  1036. */
  1037. static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
  1038. struct usb_host_endpoint *ep)
  1039. {
  1040. unsigned int interval = 0;
  1041. switch (udev->speed) {
  1042. case USB_SPEED_HIGH:
  1043. /* Max NAK rate */
  1044. if (usb_endpoint_xfer_control(&ep->desc) ||
  1045. usb_endpoint_xfer_bulk(&ep->desc)) {
  1046. interval = xhci_parse_microframe_interval(udev, ep);
  1047. break;
  1048. }
  1049. /* Fall through - SS and HS isoc/int have same decoding */
  1050. case USB_SPEED_SUPER:
  1051. if (usb_endpoint_xfer_int(&ep->desc) ||
  1052. usb_endpoint_xfer_isoc(&ep->desc)) {
  1053. interval = xhci_parse_exponent_interval(udev, ep);
  1054. }
  1055. break;
  1056. case USB_SPEED_FULL:
  1057. if (usb_endpoint_xfer_isoc(&ep->desc)) {
  1058. interval = xhci_parse_exponent_interval(udev, ep);
  1059. break;
  1060. }
  1061. /*
  1062. * Fall through for interrupt endpoint interval decoding
  1063. * since it uses the same rules as low speed interrupt
  1064. * endpoints.
  1065. */
  1066. case USB_SPEED_LOW:
  1067. if (usb_endpoint_xfer_int(&ep->desc) ||
  1068. usb_endpoint_xfer_isoc(&ep->desc)) {
  1069. interval = xhci_parse_frame_interval(udev, ep);
  1070. }
  1071. break;
  1072. default:
  1073. BUG();
  1074. }
  1075. return EP_INTERVAL(interval);
  1076. }
  1077. /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
  1078. * High speed endpoint descriptors can define "the number of additional
  1079. * transaction opportunities per microframe", but that goes in the Max Burst
  1080. * endpoint context field.
  1081. */
  1082. static u32 xhci_get_endpoint_mult(struct usb_device *udev,
  1083. struct usb_host_endpoint *ep)
  1084. {
  1085. if (udev->speed != USB_SPEED_SUPER ||
  1086. !usb_endpoint_xfer_isoc(&ep->desc))
  1087. return 0;
  1088. return ep->ss_ep_comp.bmAttributes;
  1089. }
  1090. static u32 xhci_get_endpoint_type(struct usb_device *udev,
  1091. struct usb_host_endpoint *ep)
  1092. {
  1093. int in;
  1094. u32 type;
  1095. in = usb_endpoint_dir_in(&ep->desc);
  1096. if (usb_endpoint_xfer_control(&ep->desc)) {
  1097. type = EP_TYPE(CTRL_EP);
  1098. } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
  1099. if (in)
  1100. type = EP_TYPE(BULK_IN_EP);
  1101. else
  1102. type = EP_TYPE(BULK_OUT_EP);
  1103. } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
  1104. if (in)
  1105. type = EP_TYPE(ISOC_IN_EP);
  1106. else
  1107. type = EP_TYPE(ISOC_OUT_EP);
  1108. } else if (usb_endpoint_xfer_int(&ep->desc)) {
  1109. if (in)
  1110. type = EP_TYPE(INT_IN_EP);
  1111. else
  1112. type = EP_TYPE(INT_OUT_EP);
  1113. } else {
  1114. type = 0;
  1115. }
  1116. return type;
  1117. }
  1118. /* Return the maximum endpoint service interval time (ESIT) payload.
  1119. * Basically, this is the maxpacket size, multiplied by the burst size
  1120. * and mult size.
  1121. */
  1122. static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
  1123. struct usb_device *udev,
  1124. struct usb_host_endpoint *ep)
  1125. {
  1126. int max_burst;
  1127. int max_packet;
  1128. /* Only applies for interrupt or isochronous endpoints */
  1129. if (usb_endpoint_xfer_control(&ep->desc) ||
  1130. usb_endpoint_xfer_bulk(&ep->desc))
  1131. return 0;
  1132. if (udev->speed == USB_SPEED_SUPER)
  1133. return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
  1134. max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
  1135. max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
  1136. /* A 0 in max burst means 1 transfer per ESIT */
  1137. return max_packet * (max_burst + 1);
  1138. }
  1139. /* Set up an endpoint with one ring segment. Do not allocate stream rings.
  1140. * Drivers will have to call usb_alloc_streams() to do that.
  1141. */
  1142. int xhci_endpoint_init(struct xhci_hcd *xhci,
  1143. struct xhci_virt_device *virt_dev,
  1144. struct usb_device *udev,
  1145. struct usb_host_endpoint *ep,
  1146. gfp_t mem_flags)
  1147. {
  1148. unsigned int ep_index;
  1149. struct xhci_ep_ctx *ep_ctx;
  1150. struct xhci_ring *ep_ring;
  1151. unsigned int max_packet;
  1152. unsigned int max_burst;
  1153. enum xhci_ring_type type;
  1154. u32 max_esit_payload;
  1155. u32 endpoint_type;
  1156. ep_index = xhci_get_endpoint_index(&ep->desc);
  1157. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  1158. endpoint_type = xhci_get_endpoint_type(udev, ep);
  1159. if (!endpoint_type)
  1160. return -EINVAL;
  1161. ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
  1162. type = usb_endpoint_type(&ep->desc);
  1163. /* Set up the endpoint ring */
  1164. virt_dev->eps[ep_index].new_ring =
  1165. xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
  1166. if (!virt_dev->eps[ep_index].new_ring) {
  1167. /* Attempt to use the ring cache */
  1168. if (virt_dev->num_rings_cached == 0)
  1169. return -ENOMEM;
  1170. virt_dev->eps[ep_index].new_ring =
  1171. virt_dev->ring_cache[virt_dev->num_rings_cached];
  1172. virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
  1173. virt_dev->num_rings_cached--;
  1174. xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
  1175. 1, type);
  1176. }
  1177. virt_dev->eps[ep_index].skip = false;
  1178. ep_ring = virt_dev->eps[ep_index].new_ring;
  1179. ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
  1180. ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
  1181. | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
  1182. /* FIXME dig Mult and streams info out of ep companion desc */
  1183. /* Allow 3 retries for everything but isoc;
  1184. * CErr shall be set to 0 for Isoch endpoints.
  1185. */
  1186. if (!usb_endpoint_xfer_isoc(&ep->desc))
  1187. ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
  1188. else
  1189. ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
  1190. /* Set the max packet size and max burst */
  1191. max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
  1192. max_burst = 0;
  1193. switch (udev->speed) {
  1194. case USB_SPEED_SUPER:
  1195. /* dig out max burst from ep companion desc */
  1196. max_burst = ep->ss_ep_comp.bMaxBurst;
  1197. break;
  1198. case USB_SPEED_HIGH:
  1199. /* Some devices get this wrong */
  1200. if (usb_endpoint_xfer_bulk(&ep->desc))
  1201. max_packet = 512;
  1202. /* bits 11:12 specify the number of additional transaction
  1203. * opportunities per microframe (USB 2.0, section 9.6.6)
  1204. */
  1205. if (usb_endpoint_xfer_isoc(&ep->desc) ||
  1206. usb_endpoint_xfer_int(&ep->desc)) {
  1207. max_burst = (usb_endpoint_maxp(&ep->desc)
  1208. & 0x1800) >> 11;
  1209. }
  1210. break;
  1211. case USB_SPEED_FULL:
  1212. case USB_SPEED_LOW:
  1213. break;
  1214. default:
  1215. BUG();
  1216. }
  1217. ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
  1218. MAX_BURST(max_burst));
  1219. max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
  1220. ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
  1221. /*
  1222. * XXX no idea how to calculate the average TRB buffer length for bulk
  1223. * endpoints, as the driver gives us no clue how big each scatter gather
  1224. * list entry (or buffer) is going to be.
  1225. *
  1226. * For isochronous and interrupt endpoints, we set it to the max
  1227. * available, until we have new API in the USB core to allow drivers to
  1228. * declare how much bandwidth they actually need.
  1229. *
  1230. * Normally, it would be calculated by taking the total of the buffer
  1231. * lengths in the TD and then dividing by the number of TRBs in a TD,
  1232. * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
  1233. * use Event Data TRBs, and we don't chain in a link TRB on short
  1234. * transfers, we're basically dividing by 1.
  1235. *
  1236. * xHCI 1.0 specification indicates that the Average TRB Length should
  1237. * be set to 8 for control endpoints.
  1238. */
  1239. if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
  1240. ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
  1241. else
  1242. ep_ctx->tx_info |=
  1243. cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
  1244. /* FIXME Debug endpoint context */
  1245. return 0;
  1246. }
  1247. void xhci_endpoint_zero(struct xhci_hcd *xhci,
  1248. struct xhci_virt_device *virt_dev,
  1249. struct usb_host_endpoint *ep)
  1250. {
  1251. unsigned int ep_index;
  1252. struct xhci_ep_ctx *ep_ctx;
  1253. ep_index = xhci_get_endpoint_index(&ep->desc);
  1254. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  1255. ep_ctx->ep_info = 0;
  1256. ep_ctx->ep_info2 = 0;
  1257. ep_ctx->deq = 0;
  1258. ep_ctx->tx_info = 0;
  1259. /* Don't free the endpoint ring until the set interface or configuration
  1260. * request succeeds.
  1261. */
  1262. }
  1263. void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
  1264. {
  1265. bw_info->ep_interval = 0;
  1266. bw_info->mult = 0;
  1267. bw_info->num_packets = 0;
  1268. bw_info->max_packet_size = 0;
  1269. bw_info->type = 0;
  1270. bw_info->max_esit_payload = 0;
  1271. }
  1272. void xhci_update_bw_info(struct xhci_hcd *xhci,
  1273. struct xhci_container_ctx *in_ctx,
  1274. struct xhci_input_control_ctx *ctrl_ctx,
  1275. struct xhci_virt_device *virt_dev)
  1276. {
  1277. struct xhci_bw_info *bw_info;
  1278. struct xhci_ep_ctx *ep_ctx;
  1279. unsigned int ep_type;
  1280. int i;
  1281. for (i = 1; i < 31; ++i) {
  1282. bw_info = &virt_dev->eps[i].bw_info;
  1283. /* We can't tell what endpoint type is being dropped, but
  1284. * unconditionally clearing the bandwidth info for non-periodic
  1285. * endpoints should be harmless because the info will never be
  1286. * set in the first place.
  1287. */
  1288. if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
  1289. /* Dropped endpoint */
  1290. xhci_clear_endpoint_bw_info(bw_info);
  1291. continue;
  1292. }
  1293. if (EP_IS_ADDED(ctrl_ctx, i)) {
  1294. ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
  1295. ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
  1296. /* Ignore non-periodic endpoints */
  1297. if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
  1298. ep_type != ISOC_IN_EP &&
  1299. ep_type != INT_IN_EP)
  1300. continue;
  1301. /* Added or changed endpoint */
  1302. bw_info->ep_interval = CTX_TO_EP_INTERVAL(
  1303. le32_to_cpu(ep_ctx->ep_info));
  1304. /* Number of packets and mult are zero-based in the
  1305. * input context, but we want one-based for the
  1306. * interval table.
  1307. */
  1308. bw_info->mult = CTX_TO_EP_MULT(
  1309. le32_to_cpu(ep_ctx->ep_info)) + 1;
  1310. bw_info->num_packets = CTX_TO_MAX_BURST(
  1311. le32_to_cpu(ep_ctx->ep_info2)) + 1;
  1312. bw_info->max_packet_size = MAX_PACKET_DECODED(
  1313. le32_to_cpu(ep_ctx->ep_info2));
  1314. bw_info->type = ep_type;
  1315. bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
  1316. le32_to_cpu(ep_ctx->tx_info));
  1317. }
  1318. }
  1319. }
  1320. /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
  1321. * Useful when you want to change one particular aspect of the endpoint and then
  1322. * issue a configure endpoint command.
  1323. */
  1324. void xhci_endpoint_copy(struct xhci_hcd *xhci,
  1325. struct xhci_container_ctx *in_ctx,
  1326. struct xhci_container_ctx *out_ctx,
  1327. unsigned int ep_index)
  1328. {
  1329. struct xhci_ep_ctx *out_ep_ctx;
  1330. struct xhci_ep_ctx *in_ep_ctx;
  1331. out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
  1332. in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
  1333. in_ep_ctx->ep_info = out_ep_ctx->ep_info;
  1334. in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
  1335. in_ep_ctx->deq = out_ep_ctx->deq;
  1336. in_ep_ctx->tx_info = out_ep_ctx->tx_info;
  1337. }
  1338. /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
  1339. * Useful when you want to change one particular aspect of the endpoint and then
  1340. * issue a configure endpoint command. Only the context entries field matters,
  1341. * but we'll copy the whole thing anyway.
  1342. */
  1343. void xhci_slot_copy(struct xhci_hcd *xhci,
  1344. struct xhci_container_ctx *in_ctx,
  1345. struct xhci_container_ctx *out_ctx)
  1346. {
  1347. struct xhci_slot_ctx *in_slot_ctx;
  1348. struct xhci_slot_ctx *out_slot_ctx;
  1349. in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
  1350. out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
  1351. in_slot_ctx->dev_info = out_slot_ctx->dev_info;
  1352. in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
  1353. in_slot_ctx->tt_info = out_slot_ctx->tt_info;
  1354. in_slot_ctx->dev_state = out_slot_ctx->dev_state;
  1355. }
  1356. /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
  1357. static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
  1358. {
  1359. int i;
  1360. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  1361. int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  1362. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1363. "Allocating %d scratchpad buffers", num_sp);
  1364. if (!num_sp)
  1365. return 0;
  1366. xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
  1367. if (!xhci->scratchpad)
  1368. goto fail_sp;
  1369. xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
  1370. num_sp * sizeof(u64),
  1371. &xhci->scratchpad->sp_dma, flags);
  1372. if (!xhci->scratchpad->sp_array)
  1373. goto fail_sp2;
  1374. xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
  1375. if (!xhci->scratchpad->sp_buffers)
  1376. goto fail_sp3;
  1377. xhci->scratchpad->sp_dma_buffers =
  1378. kzalloc(sizeof(dma_addr_t) * num_sp, flags);
  1379. if (!xhci->scratchpad->sp_dma_buffers)
  1380. goto fail_sp4;
  1381. xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
  1382. for (i = 0; i < num_sp; i++) {
  1383. dma_addr_t dma;
  1384. void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
  1385. flags);
  1386. if (!buf)
  1387. goto fail_sp5;
  1388. xhci->scratchpad->sp_array[i] = dma;
  1389. xhci->scratchpad->sp_buffers[i] = buf;
  1390. xhci->scratchpad->sp_dma_buffers[i] = dma;
  1391. }
  1392. return 0;
  1393. fail_sp5:
  1394. for (i = i - 1; i >= 0; i--) {
  1395. dma_free_coherent(dev, xhci->page_size,
  1396. xhci->scratchpad->sp_buffers[i],
  1397. xhci->scratchpad->sp_dma_buffers[i]);
  1398. }
  1399. kfree(xhci->scratchpad->sp_dma_buffers);
  1400. fail_sp4:
  1401. kfree(xhci->scratchpad->sp_buffers);
  1402. fail_sp3:
  1403. dma_free_coherent(dev, num_sp * sizeof(u64),
  1404. xhci->scratchpad->sp_array,
  1405. xhci->scratchpad->sp_dma);
  1406. fail_sp2:
  1407. kfree(xhci->scratchpad);
  1408. xhci->scratchpad = NULL;
  1409. fail_sp:
  1410. return -ENOMEM;
  1411. }
  1412. static void scratchpad_free(struct xhci_hcd *xhci)
  1413. {
  1414. int num_sp;
  1415. int i;
  1416. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  1417. if (!xhci->scratchpad)
  1418. return;
  1419. num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  1420. for (i = 0; i < num_sp; i++) {
  1421. dma_free_coherent(&pdev->dev, xhci->page_size,
  1422. xhci->scratchpad->sp_buffers[i],
  1423. xhci->scratchpad->sp_dma_buffers[i]);
  1424. }
  1425. kfree(xhci->scratchpad->sp_dma_buffers);
  1426. kfree(xhci->scratchpad->sp_buffers);
  1427. dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
  1428. xhci->scratchpad->sp_array,
  1429. xhci->scratchpad->sp_dma);
  1430. kfree(xhci->scratchpad);
  1431. xhci->scratchpad = NULL;
  1432. }
  1433. struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
  1434. bool allocate_in_ctx, bool allocate_completion,
  1435. gfp_t mem_flags)
  1436. {
  1437. struct xhci_command *command;
  1438. command = kzalloc(sizeof(*command), mem_flags);
  1439. if (!command)
  1440. return NULL;
  1441. if (allocate_in_ctx) {
  1442. command->in_ctx =
  1443. xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
  1444. mem_flags);
  1445. if (!command->in_ctx) {
  1446. kfree(command);
  1447. return NULL;
  1448. }
  1449. }
  1450. if (allocate_completion) {
  1451. command->completion =
  1452. kzalloc(sizeof(struct completion), mem_flags);
  1453. if (!command->completion) {
  1454. xhci_free_container_ctx(xhci, command->in_ctx);
  1455. kfree(command);
  1456. return NULL;
  1457. }
  1458. init_completion(command->completion);
  1459. }
  1460. command->status = 0;
  1461. INIT_LIST_HEAD(&command->cmd_list);
  1462. return command;
  1463. }
  1464. void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
  1465. {
  1466. if (urb_priv) {
  1467. kfree(urb_priv->td[0]);
  1468. kfree(urb_priv);
  1469. }
  1470. }
  1471. void xhci_free_command(struct xhci_hcd *xhci,
  1472. struct xhci_command *command)
  1473. {
  1474. xhci_free_container_ctx(xhci,
  1475. command->in_ctx);
  1476. kfree(command->completion);
  1477. kfree(command);
  1478. }
  1479. void xhci_mem_cleanup(struct xhci_hcd *xhci)
  1480. {
  1481. struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
  1482. struct dev_info *dev_info, *next;
  1483. struct xhci_cd *cur_cd, *next_cd;
  1484. unsigned long flags;
  1485. int size;
  1486. int i, j, num_ports;
  1487. /* Free the Event Ring Segment Table and the actual Event Ring */
  1488. size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
  1489. if (xhci->erst.entries)
  1490. dma_free_coherent(&pdev->dev, size,
  1491. xhci->erst.entries, xhci->erst.erst_dma_addr);
  1492. xhci->erst.entries = NULL;
  1493. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
  1494. if (xhci->event_ring)
  1495. xhci_ring_free(xhci, xhci->event_ring);
  1496. xhci->event_ring = NULL;
  1497. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
  1498. if (xhci->lpm_command)
  1499. xhci_free_command(xhci, xhci->lpm_command);
  1500. xhci->cmd_ring_reserved_trbs = 0;
  1501. if (xhci->cmd_ring)
  1502. xhci_ring_free(xhci, xhci->cmd_ring);
  1503. xhci->cmd_ring = NULL;
  1504. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
  1505. list_for_each_entry_safe(cur_cd, next_cd,
  1506. &xhci->cancel_cmd_list, cancel_cmd_list) {
  1507. list_del(&cur_cd->cancel_cmd_list);
  1508. kfree(cur_cd);
  1509. }
  1510. for (i = 1; i < MAX_HC_SLOTS; ++i)
  1511. xhci_free_virt_device(xhci, i);
  1512. if (xhci->segment_pool)
  1513. dma_pool_destroy(xhci->segment_pool);
  1514. xhci->segment_pool = NULL;
  1515. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
  1516. if (xhci->device_pool)
  1517. dma_pool_destroy(xhci->device_pool);
  1518. xhci->device_pool = NULL;
  1519. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
  1520. if (xhci->small_streams_pool)
  1521. dma_pool_destroy(xhci->small_streams_pool);
  1522. xhci->small_streams_pool = NULL;
  1523. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1524. "Freed small stream array pool");
  1525. if (xhci->medium_streams_pool)
  1526. dma_pool_destroy(xhci->medium_streams_pool);
  1527. xhci->medium_streams_pool = NULL;
  1528. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1529. "Freed medium stream array pool");
  1530. if (xhci->dcbaa)
  1531. dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
  1532. xhci->dcbaa, xhci->dcbaa->dma);
  1533. xhci->dcbaa = NULL;
  1534. scratchpad_free(xhci);
  1535. spin_lock_irqsave(&xhci->lock, flags);
  1536. list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
  1537. list_del(&dev_info->list);
  1538. kfree(dev_info);
  1539. }
  1540. spin_unlock_irqrestore(&xhci->lock, flags);
  1541. if (!xhci->rh_bw)
  1542. goto no_bw;
  1543. num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1544. for (i = 0; i < num_ports; i++) {
  1545. struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
  1546. for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
  1547. struct list_head *ep = &bwt->interval_bw[j].endpoints;
  1548. while (!list_empty(ep))
  1549. list_del_init(ep->next);
  1550. }
  1551. }
  1552. for (i = 0; i < num_ports; i++) {
  1553. struct xhci_tt_bw_info *tt, *n;
  1554. list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
  1555. list_del(&tt->tt_list);
  1556. kfree(tt);
  1557. }
  1558. }
  1559. no_bw:
  1560. xhci->num_usb2_ports = 0;
  1561. xhci->num_usb3_ports = 0;
  1562. xhci->num_active_eps = 0;
  1563. kfree(xhci->usb2_ports);
  1564. kfree(xhci->usb3_ports);
  1565. kfree(xhci->port_array);
  1566. kfree(xhci->rh_bw);
  1567. kfree(xhci->ext_caps);
  1568. xhci->page_size = 0;
  1569. xhci->page_shift = 0;
  1570. xhci->bus_state[0].bus_suspended = 0;
  1571. xhci->bus_state[1].bus_suspended = 0;
  1572. }
  1573. static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
  1574. struct xhci_segment *input_seg,
  1575. union xhci_trb *start_trb,
  1576. union xhci_trb *end_trb,
  1577. dma_addr_t input_dma,
  1578. struct xhci_segment *result_seg,
  1579. char *test_name, int test_number)
  1580. {
  1581. unsigned long long start_dma;
  1582. unsigned long long end_dma;
  1583. struct xhci_segment *seg;
  1584. start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
  1585. end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
  1586. seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
  1587. if (seg != result_seg) {
  1588. xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
  1589. test_name, test_number);
  1590. xhci_warn(xhci, "Tested TRB math w/ seg %p and "
  1591. "input DMA 0x%llx\n",
  1592. input_seg,
  1593. (unsigned long long) input_dma);
  1594. xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
  1595. "ending TRB %p (0x%llx DMA)\n",
  1596. start_trb, start_dma,
  1597. end_trb, end_dma);
  1598. xhci_warn(xhci, "Expected seg %p, got seg %p\n",
  1599. result_seg, seg);
  1600. return -1;
  1601. }
  1602. return 0;
  1603. }
  1604. /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
  1605. static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
  1606. {
  1607. struct {
  1608. dma_addr_t input_dma;
  1609. struct xhci_segment *result_seg;
  1610. } simple_test_vector [] = {
  1611. /* A zeroed DMA field should fail */
  1612. { 0, NULL },
  1613. /* One TRB before the ring start should fail */
  1614. { xhci->event_ring->first_seg->dma - 16, NULL },
  1615. /* One byte before the ring start should fail */
  1616. { xhci->event_ring->first_seg->dma - 1, NULL },
  1617. /* Starting TRB should succeed */
  1618. { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
  1619. /* Ending TRB should succeed */
  1620. { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
  1621. xhci->event_ring->first_seg },
  1622. /* One byte after the ring end should fail */
  1623. { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
  1624. /* One TRB after the ring end should fail */
  1625. { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
  1626. /* An address of all ones should fail */
  1627. { (dma_addr_t) (~0), NULL },
  1628. };
  1629. struct {
  1630. struct xhci_segment *input_seg;
  1631. union xhci_trb *start_trb;
  1632. union xhci_trb *end_trb;
  1633. dma_addr_t input_dma;
  1634. struct xhci_segment *result_seg;
  1635. } complex_test_vector [] = {
  1636. /* Test feeding a valid DMA address from a different ring */
  1637. { .input_seg = xhci->event_ring->first_seg,
  1638. .start_trb = xhci->event_ring->first_seg->trbs,
  1639. .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1640. .input_dma = xhci->cmd_ring->first_seg->dma,
  1641. .result_seg = NULL,
  1642. },
  1643. /* Test feeding a valid end TRB from a different ring */
  1644. { .input_seg = xhci->event_ring->first_seg,
  1645. .start_trb = xhci->event_ring->first_seg->trbs,
  1646. .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1647. .input_dma = xhci->cmd_ring->first_seg->dma,
  1648. .result_seg = NULL,
  1649. },
  1650. /* Test feeding a valid start and end TRB from a different ring */
  1651. { .input_seg = xhci->event_ring->first_seg,
  1652. .start_trb = xhci->cmd_ring->first_seg->trbs,
  1653. .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1654. .input_dma = xhci->cmd_ring->first_seg->dma,
  1655. .result_seg = NULL,
  1656. },
  1657. /* TRB in this ring, but after this TD */
  1658. { .input_seg = xhci->event_ring->first_seg,
  1659. .start_trb = &xhci->event_ring->first_seg->trbs[0],
  1660. .end_trb = &xhci->event_ring->first_seg->trbs[3],
  1661. .input_dma = xhci->event_ring->first_seg->dma + 4*16,
  1662. .result_seg = NULL,
  1663. },
  1664. /* TRB in this ring, but before this TD */
  1665. { .input_seg = xhci->event_ring->first_seg,
  1666. .start_trb = &xhci->event_ring->first_seg->trbs[3],
  1667. .end_trb = &xhci->event_ring->first_seg->trbs[6],
  1668. .input_dma = xhci->event_ring->first_seg->dma + 2*16,
  1669. .result_seg = NULL,
  1670. },
  1671. /* TRB in this ring, but after this wrapped TD */
  1672. { .input_seg = xhci->event_ring->first_seg,
  1673. .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
  1674. .end_trb = &xhci->event_ring->first_seg->trbs[1],
  1675. .input_dma = xhci->event_ring->first_seg->dma + 2*16,
  1676. .result_seg = NULL,
  1677. },
  1678. /* TRB in this ring, but before this wrapped TD */
  1679. { .input_seg = xhci->event_ring->first_seg,
  1680. .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
  1681. .end_trb = &xhci->event_ring->first_seg->trbs[1],
  1682. .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
  1683. .result_seg = NULL,
  1684. },
  1685. /* TRB not in this ring, and we have a wrapped TD */
  1686. { .input_seg = xhci->event_ring->first_seg,
  1687. .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
  1688. .end_trb = &xhci->event_ring->first_seg->trbs[1],
  1689. .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
  1690. .result_seg = NULL,
  1691. },
  1692. };
  1693. unsigned int num_tests;
  1694. int i, ret;
  1695. num_tests = ARRAY_SIZE(simple_test_vector);
  1696. for (i = 0; i < num_tests; i++) {
  1697. ret = xhci_test_trb_in_td(xhci,
  1698. xhci->event_ring->first_seg,
  1699. xhci->event_ring->first_seg->trbs,
  1700. &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
  1701. simple_test_vector[i].input_dma,
  1702. simple_test_vector[i].result_seg,
  1703. "Simple", i);
  1704. if (ret < 0)
  1705. return ret;
  1706. }
  1707. num_tests = ARRAY_SIZE(complex_test_vector);
  1708. for (i = 0; i < num_tests; i++) {
  1709. ret = xhci_test_trb_in_td(xhci,
  1710. complex_test_vector[i].input_seg,
  1711. complex_test_vector[i].start_trb,
  1712. complex_test_vector[i].end_trb,
  1713. complex_test_vector[i].input_dma,
  1714. complex_test_vector[i].result_seg,
  1715. "Complex", i);
  1716. if (ret < 0)
  1717. return ret;
  1718. }
  1719. xhci_dbg(xhci, "TRB math tests passed.\n");
  1720. return 0;
  1721. }
  1722. static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
  1723. {
  1724. u64 temp;
  1725. dma_addr_t deq;
  1726. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  1727. xhci->event_ring->dequeue);
  1728. if (deq == 0 && !in_interrupt())
  1729. xhci_warn(xhci, "WARN something wrong with SW event ring "
  1730. "dequeue ptr.\n");
  1731. /* Update HC event ring dequeue pointer */
  1732. temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  1733. temp &= ERST_PTR_MASK;
  1734. /* Don't clear the EHB bit (which is RW1C) because
  1735. * there might be more events to service.
  1736. */
  1737. temp &= ~ERST_EHB;
  1738. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1739. "// Write event ring dequeue pointer, "
  1740. "preserving EHB bit");
  1741. xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
  1742. &xhci->ir_set->erst_dequeue);
  1743. }
  1744. static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
  1745. __le32 __iomem *addr, u8 major_revision, int max_caps)
  1746. {
  1747. u32 temp, port_offset, port_count;
  1748. int i;
  1749. if (major_revision > 0x03) {
  1750. xhci_warn(xhci, "Ignoring unknown port speed, "
  1751. "Ext Cap %p, revision = 0x%x\n",
  1752. addr, major_revision);
  1753. /* Ignoring port protocol we can't understand. FIXME */
  1754. return;
  1755. }
  1756. /* Port offset and count in the third dword, see section 7.2 */
  1757. temp = xhci_readl(xhci, addr + 2);
  1758. port_offset = XHCI_EXT_PORT_OFF(temp);
  1759. port_count = XHCI_EXT_PORT_COUNT(temp);
  1760. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1761. "Ext Cap %p, port offset = %u, "
  1762. "count = %u, revision = 0x%x",
  1763. addr, port_offset, port_count, major_revision);
  1764. /* Port count includes the current port offset */
  1765. if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
  1766. /* WTF? "Valid values are ‘1’ to MaxPorts" */
  1767. return;
  1768. /* cache usb2 port capabilities */
  1769. if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
  1770. xhci->ext_caps[xhci->num_ext_caps++] = temp;
  1771. /* Check the host's USB2 LPM capability */
  1772. if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
  1773. (temp & XHCI_L1C)) {
  1774. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1775. "xHCI 0.96: support USB2 software lpm");
  1776. xhci->sw_lpm_support = 1;
  1777. }
  1778. if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
  1779. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1780. "xHCI 1.0: support USB2 software lpm");
  1781. xhci->sw_lpm_support = 1;
  1782. if (temp & XHCI_HLC) {
  1783. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1784. "xHCI 1.0: support USB2 hardware lpm");
  1785. xhci->hw_lpm_support = 1;
  1786. }
  1787. }
  1788. port_offset--;
  1789. for (i = port_offset; i < (port_offset + port_count); i++) {
  1790. /* Duplicate entry. Ignore the port if the revisions differ. */
  1791. if (xhci->port_array[i] != 0) {
  1792. xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
  1793. " port %u\n", addr, i);
  1794. xhci_warn(xhci, "Port was marked as USB %u, "
  1795. "duplicated as USB %u\n",
  1796. xhci->port_array[i], major_revision);
  1797. /* Only adjust the roothub port counts if we haven't
  1798. * found a similar duplicate.
  1799. */
  1800. if (xhci->port_array[i] != major_revision &&
  1801. xhci->port_array[i] != DUPLICATE_ENTRY) {
  1802. if (xhci->port_array[i] == 0x03)
  1803. xhci->num_usb3_ports--;
  1804. else
  1805. xhci->num_usb2_ports--;
  1806. xhci->port_array[i] = DUPLICATE_ENTRY;
  1807. }
  1808. /* FIXME: Should we disable the port? */
  1809. continue;
  1810. }
  1811. xhci->port_array[i] = major_revision;
  1812. if (major_revision == 0x03)
  1813. xhci->num_usb3_ports++;
  1814. else
  1815. xhci->num_usb2_ports++;
  1816. }
  1817. /* FIXME: Should we disable ports not in the Extended Capabilities? */
  1818. }
  1819. /*
  1820. * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
  1821. * specify what speeds each port is supposed to be. We can't count on the port
  1822. * speed bits in the PORTSC register being correct until a device is connected,
  1823. * but we need to set up the two fake roothubs with the correct number of USB
  1824. * 3.0 and USB 2.0 ports at host controller initialization time.
  1825. */
  1826. static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
  1827. {
  1828. __le32 __iomem *addr, *tmp_addr;
  1829. u32 offset, tmp_offset;
  1830. unsigned int num_ports;
  1831. int i, j, port_index;
  1832. int cap_count = 0;
  1833. addr = &xhci->cap_regs->hcc_params;
  1834. offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
  1835. if (offset == 0) {
  1836. xhci_err(xhci, "No Extended Capability registers, "
  1837. "unable to set up roothub.\n");
  1838. return -ENODEV;
  1839. }
  1840. num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1841. xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
  1842. if (!xhci->port_array)
  1843. return -ENOMEM;
  1844. xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
  1845. if (!xhci->rh_bw)
  1846. return -ENOMEM;
  1847. for (i = 0; i < num_ports; i++) {
  1848. struct xhci_interval_bw_table *bw_table;
  1849. INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
  1850. bw_table = &xhci->rh_bw[i].bw_table;
  1851. for (j = 0; j < XHCI_MAX_INTERVAL; j++)
  1852. INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
  1853. }
  1854. /*
  1855. * For whatever reason, the first capability offset is from the
  1856. * capability register base, not from the HCCPARAMS register.
  1857. * See section 5.3.6 for offset calculation.
  1858. */
  1859. addr = &xhci->cap_regs->hc_capbase + offset;
  1860. tmp_addr = addr;
  1861. tmp_offset = offset;
  1862. /* count extended protocol capability entries for later caching */
  1863. do {
  1864. u32 cap_id;
  1865. cap_id = xhci_readl(xhci, tmp_addr);
  1866. if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
  1867. cap_count++;
  1868. tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
  1869. tmp_addr += tmp_offset;
  1870. } while (tmp_offset);
  1871. xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
  1872. if (!xhci->ext_caps)
  1873. return -ENOMEM;
  1874. while (1) {
  1875. u32 cap_id;
  1876. cap_id = xhci_readl(xhci, addr);
  1877. if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
  1878. xhci_add_in_port(xhci, num_ports, addr,
  1879. (u8) XHCI_EXT_PORT_MAJOR(cap_id),
  1880. cap_count);
  1881. offset = XHCI_EXT_CAPS_NEXT(cap_id);
  1882. if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
  1883. == num_ports)
  1884. break;
  1885. /*
  1886. * Once you're into the Extended Capabilities, the offset is
  1887. * always relative to the register holding the offset.
  1888. */
  1889. addr += offset;
  1890. }
  1891. if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
  1892. xhci_warn(xhci, "No ports on the roothubs?\n");
  1893. return -ENODEV;
  1894. }
  1895. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1896. "Found %u USB 2.0 ports and %u USB 3.0 ports.",
  1897. xhci->num_usb2_ports, xhci->num_usb3_ports);
  1898. /* Place limits on the number of roothub ports so that the hub
  1899. * descriptors aren't longer than the USB core will allocate.
  1900. */
  1901. if (xhci->num_usb3_ports > 15) {
  1902. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1903. "Limiting USB 3.0 roothub ports to 15.");
  1904. xhci->num_usb3_ports = 15;
  1905. }
  1906. if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
  1907. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1908. "Limiting USB 2.0 roothub ports to %u.",
  1909. USB_MAXCHILDREN);
  1910. xhci->num_usb2_ports = USB_MAXCHILDREN;
  1911. }
  1912. /*
  1913. * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
  1914. * Not sure how the USB core will handle a hub with no ports...
  1915. */
  1916. if (xhci->num_usb2_ports) {
  1917. xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
  1918. xhci->num_usb2_ports, flags);
  1919. if (!xhci->usb2_ports)
  1920. return -ENOMEM;
  1921. port_index = 0;
  1922. for (i = 0; i < num_ports; i++) {
  1923. if (xhci->port_array[i] == 0x03 ||
  1924. xhci->port_array[i] == 0 ||
  1925. xhci->port_array[i] == DUPLICATE_ENTRY)
  1926. continue;
  1927. xhci->usb2_ports[port_index] =
  1928. &xhci->op_regs->port_status_base +
  1929. NUM_PORT_REGS*i;
  1930. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1931. "USB 2.0 port at index %u, "
  1932. "addr = %p", i,
  1933. xhci->usb2_ports[port_index]);
  1934. port_index++;
  1935. if (port_index == xhci->num_usb2_ports)
  1936. break;
  1937. }
  1938. }
  1939. if (xhci->num_usb3_ports) {
  1940. xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
  1941. xhci->num_usb3_ports, flags);
  1942. if (!xhci->usb3_ports)
  1943. return -ENOMEM;
  1944. port_index = 0;
  1945. for (i = 0; i < num_ports; i++)
  1946. if (xhci->port_array[i] == 0x03) {
  1947. xhci->usb3_ports[port_index] =
  1948. &xhci->op_regs->port_status_base +
  1949. NUM_PORT_REGS*i;
  1950. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1951. "USB 3.0 port at index %u, "
  1952. "addr = %p", i,
  1953. xhci->usb3_ports[port_index]);
  1954. port_index++;
  1955. if (port_index == xhci->num_usb3_ports)
  1956. break;
  1957. }
  1958. }
  1959. return 0;
  1960. }
  1961. int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
  1962. {
  1963. dma_addr_t dma;
  1964. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  1965. unsigned int val, val2;
  1966. u64 val_64;
  1967. struct xhci_segment *seg;
  1968. u32 page_size, temp;
  1969. int i;
  1970. INIT_LIST_HEAD(&xhci->lpm_failed_devs);
  1971. INIT_LIST_HEAD(&xhci->cancel_cmd_list);
  1972. page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
  1973. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1974. "Supported page size register = 0x%x", page_size);
  1975. for (i = 0; i < 16; i++) {
  1976. if ((0x1 & page_size) != 0)
  1977. break;
  1978. page_size = page_size >> 1;
  1979. }
  1980. if (i < 16)
  1981. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1982. "Supported page size of %iK", (1 << (i+12)) / 1024);
  1983. else
  1984. xhci_warn(xhci, "WARN: no supported page size\n");
  1985. /* Use 4K pages, since that's common and the minimum the HC supports */
  1986. xhci->page_shift = 12;
  1987. xhci->page_size = 1 << xhci->page_shift;
  1988. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1989. "HCD page size set to %iK", xhci->page_size / 1024);
  1990. /*
  1991. * Program the Number of Device Slots Enabled field in the CONFIG
  1992. * register with the max value of slots the HC can handle.
  1993. */
  1994. val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
  1995. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  1996. "// xHC can handle at most %d device slots.", val);
  1997. val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
  1998. val |= (val2 & ~HCS_SLOTS_MASK);
  1999. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2000. "// Setting Max device slots reg = 0x%x.", val);
  2001. xhci_writel(xhci, val, &xhci->op_regs->config_reg);
  2002. /*
  2003. * Section 5.4.8 - doorbell array must be
  2004. * "physically contiguous and 64-byte (cache line) aligned".
  2005. */
  2006. xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
  2007. GFP_KERNEL);
  2008. if (!xhci->dcbaa)
  2009. goto fail;
  2010. memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
  2011. xhci->dcbaa->dma = dma;
  2012. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2013. "// Device context base array address = 0x%llx (DMA), %p (virt)",
  2014. (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
  2015. xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
  2016. /*
  2017. * Initialize the ring segment pool. The ring must be a contiguous
  2018. * structure comprised of TRBs. The TRBs must be 16 byte aligned,
  2019. * however, the command ring segment needs 64-byte aligned segments,
  2020. * so we pick the greater alignment need.
  2021. */
  2022. xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
  2023. TRB_SEGMENT_SIZE, 64, xhci->page_size);
  2024. /* See Table 46 and Note on Figure 55 */
  2025. xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
  2026. 2112, 64, xhci->page_size);
  2027. if (!xhci->segment_pool || !xhci->device_pool)
  2028. goto fail;
  2029. /* Linear stream context arrays don't have any boundary restrictions,
  2030. * and only need to be 16-byte aligned.
  2031. */
  2032. xhci->small_streams_pool =
  2033. dma_pool_create("xHCI 256 byte stream ctx arrays",
  2034. dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
  2035. xhci->medium_streams_pool =
  2036. dma_pool_create("xHCI 1KB stream ctx arrays",
  2037. dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
  2038. /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
  2039. * will be allocated with dma_alloc_coherent()
  2040. */
  2041. if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
  2042. goto fail;
  2043. /* Set up the command ring to have one segments for now. */
  2044. xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
  2045. if (!xhci->cmd_ring)
  2046. goto fail;
  2047. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2048. "Allocated command ring at %p", xhci->cmd_ring);
  2049. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
  2050. (unsigned long long)xhci->cmd_ring->first_seg->dma);
  2051. /* Set the address in the Command Ring Control register */
  2052. val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  2053. val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
  2054. (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
  2055. xhci->cmd_ring->cycle_state;
  2056. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2057. "// Setting command ring address to 0x%x", val);
  2058. xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
  2059. xhci_dbg_cmd_ptrs(xhci);
  2060. xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
  2061. if (!xhci->lpm_command)
  2062. goto fail;
  2063. /* Reserve one command ring TRB for disabling LPM.
  2064. * Since the USB core grabs the shared usb_bus bandwidth mutex before
  2065. * disabling LPM, we only need to reserve one TRB for all devices.
  2066. */
  2067. xhci->cmd_ring_reserved_trbs++;
  2068. val = xhci_readl(xhci, &xhci->cap_regs->db_off);
  2069. val &= DBOFF_MASK;
  2070. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2071. "// Doorbell array is located at offset 0x%x"
  2072. " from cap regs base addr", val);
  2073. xhci->dba = (void __iomem *) xhci->cap_regs + val;
  2074. xhci_dbg_regs(xhci);
  2075. xhci_print_run_regs(xhci);
  2076. /* Set ir_set to interrupt register set 0 */
  2077. xhci->ir_set = &xhci->run_regs->ir_set[0];
  2078. /*
  2079. * Event ring setup: Allocate a normal ring, but also setup
  2080. * the event ring segment table (ERST). Section 4.9.3.
  2081. */
  2082. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
  2083. xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
  2084. flags);
  2085. if (!xhci->event_ring)
  2086. goto fail;
  2087. if (xhci_check_trb_in_td_math(xhci, flags) < 0)
  2088. goto fail;
  2089. xhci->erst.entries = dma_alloc_coherent(dev,
  2090. sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
  2091. GFP_KERNEL);
  2092. if (!xhci->erst.entries)
  2093. goto fail;
  2094. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2095. "// Allocated event ring segment table at 0x%llx",
  2096. (unsigned long long)dma);
  2097. memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
  2098. xhci->erst.num_entries = ERST_NUM_SEGS;
  2099. xhci->erst.erst_dma_addr = dma;
  2100. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2101. "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
  2102. xhci->erst.num_entries,
  2103. xhci->erst.entries,
  2104. (unsigned long long)xhci->erst.erst_dma_addr);
  2105. /* set ring base address and size for each segment table entry */
  2106. for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
  2107. struct xhci_erst_entry *entry = &xhci->erst.entries[val];
  2108. entry->seg_addr = cpu_to_le64(seg->dma);
  2109. entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
  2110. entry->rsvd = 0;
  2111. seg = seg->next;
  2112. }
  2113. /* set ERST count with the number of entries in the segment table */
  2114. val = xhci_readl(xhci, &xhci->ir_set->erst_size);
  2115. val &= ERST_SIZE_MASK;
  2116. val |= ERST_NUM_SEGS;
  2117. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2118. "// Write ERST size = %i to ir_set 0 (some bits preserved)",
  2119. val);
  2120. xhci_writel(xhci, val, &xhci->ir_set->erst_size);
  2121. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2122. "// Set ERST entries to point to event ring.");
  2123. /* set the segment table base address */
  2124. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2125. "// Set ERST base address for ir_set 0 = 0x%llx",
  2126. (unsigned long long)xhci->erst.erst_dma_addr);
  2127. val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
  2128. val_64 &= ERST_PTR_MASK;
  2129. val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
  2130. xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
  2131. /* Set the event ring dequeue address */
  2132. xhci_set_hc_event_deq(xhci);
  2133. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  2134. "Wrote ERST address to ir_set 0.");
  2135. xhci_print_ir_set(xhci, 0);
  2136. /*
  2137. * XXX: Might need to set the Interrupter Moderation Register to
  2138. * something other than the default (~1ms minimum between interrupts).
  2139. * See section 5.5.1.2.
  2140. */
  2141. init_completion(&xhci->addr_dev);
  2142. for (i = 0; i < MAX_HC_SLOTS; ++i)
  2143. xhci->devs[i] = NULL;
  2144. for (i = 0; i < USB_MAXCHILDREN; ++i) {
  2145. xhci->bus_state[0].resume_done[i] = 0;
  2146. xhci->bus_state[1].resume_done[i] = 0;
  2147. }
  2148. if (scratchpad_alloc(xhci, flags))
  2149. goto fail;
  2150. if (xhci_setup_port_arrays(xhci, flags))
  2151. goto fail;
  2152. /* Enable USB 3.0 device notifications for function remote wake, which
  2153. * is necessary for allowing USB 3.0 devices to do remote wakeup from
  2154. * U3 (device suspend).
  2155. */
  2156. temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
  2157. temp &= ~DEV_NOTE_MASK;
  2158. temp |= DEV_NOTE_FWAKE;
  2159. xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
  2160. return 0;
  2161. fail:
  2162. xhci_warn(xhci, "Couldn't initialize memory\n");
  2163. xhci_halt(xhci);
  2164. xhci_reset(xhci);
  2165. xhci_mem_cleanup(xhci);
  2166. return -ENOMEM;
  2167. }