wa-xfer.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876
  1. /*
  2. * WUSB Wire Adapter
  3. * Data transfer and URB enqueing
  4. *
  5. * Copyright (C) 2005-2006 Intel Corporation
  6. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. *
  22. *
  23. * How transfers work: get a buffer, break it up in segments (segment
  24. * size is a multiple of the maxpacket size). For each segment issue a
  25. * segment request (struct wa_xfer_*), then send the data buffer if
  26. * out or nothing if in (all over the DTO endpoint).
  27. *
  28. * For each submitted segment request, a notification will come over
  29. * the NEP endpoint and a transfer result (struct xfer_result) will
  30. * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  31. * data coming (inbound transfer), schedule a read and handle it.
  32. *
  33. * Sounds simple, it is a pain to implement.
  34. *
  35. *
  36. * ENTRY POINTS
  37. *
  38. * FIXME
  39. *
  40. * LIFE CYCLE / STATE DIAGRAM
  41. *
  42. * FIXME
  43. *
  44. * THIS CODE IS DISGUSTING
  45. *
  46. * Warned you are; it's my second try and still not happy with it.
  47. *
  48. * NOTES:
  49. *
  50. * - No iso
  51. *
  52. * - Supports DMA xfers, control, bulk and maybe interrupt
  53. *
  54. * - Does not recycle unused rpipes
  55. *
  56. * An rpipe is assigned to an endpoint the first time it is used,
  57. * and then it's there, assigned, until the endpoint is disabled
  58. * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  59. * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  60. * (should be a mutex).
  61. *
  62. * Two methods it could be done:
  63. *
  64. * (a) set up a timer every time an rpipe's use count drops to 1
  65. * (which means unused) or when a transfer ends. Reset the
  66. * timer when a xfer is queued. If the timer expires, release
  67. * the rpipe [see rpipe_ep_disable()].
  68. *
  69. * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  70. * when none are found go over the list, check their endpoint
  71. * and their activity record (if no last-xfer-done-ts in the
  72. * last x seconds) take it
  73. *
  74. * However, due to the fact that we have a set of limited
  75. * resources (max-segments-at-the-same-time per xfer,
  76. * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  77. * we are going to have to rebuild all this based on an scheduler,
  78. * to where we have a list of transactions to do and based on the
  79. * availability of the different required components (blocks,
  80. * rpipes, segment slots, etc), we go scheduling them. Painful.
  81. */
  82. #include <linux/init.h>
  83. #include <linux/spinlock.h>
  84. #include <linux/slab.h>
  85. #include <linux/hash.h>
  86. #include <linux/ratelimit.h>
  87. #include <linux/export.h>
  88. #include <linux/scatterlist.h>
  89. #include "wa-hc.h"
  90. #include "wusbhc.h"
  91. enum {
  92. WA_SEGS_MAX = 255,
  93. };
  94. enum wa_seg_status {
  95. WA_SEG_NOTREADY,
  96. WA_SEG_READY,
  97. WA_SEG_DELAYED,
  98. WA_SEG_SUBMITTED,
  99. WA_SEG_PENDING,
  100. WA_SEG_DTI_PENDING,
  101. WA_SEG_DONE,
  102. WA_SEG_ERROR,
  103. WA_SEG_ABORTED,
  104. };
  105. static void wa_xfer_delayed_run(struct wa_rpipe *);
  106. /*
  107. * Life cycle governed by 'struct urb' (the refcount of the struct is
  108. * that of the 'struct urb' and usb_free_urb() would free the whole
  109. * struct).
  110. */
  111. struct wa_seg {
  112. struct urb tr_urb; /* transfer request urb. */
  113. struct urb *dto_urb; /* for data output. */
  114. struct list_head list_node; /* for rpipe->req_list */
  115. struct wa_xfer *xfer; /* out xfer */
  116. u8 index; /* which segment we are */
  117. enum wa_seg_status status;
  118. ssize_t result; /* bytes xfered or error */
  119. struct wa_xfer_hdr xfer_hdr;
  120. u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
  121. };
  122. static inline void wa_seg_init(struct wa_seg *seg)
  123. {
  124. usb_init_urb(&seg->tr_urb);
  125. /* set the remaining memory to 0. */
  126. memset(((void *)seg) + sizeof(seg->tr_urb), 0,
  127. sizeof(*seg) - sizeof(seg->tr_urb));
  128. }
  129. /*
  130. * Protected by xfer->lock
  131. *
  132. */
  133. struct wa_xfer {
  134. struct kref refcnt;
  135. struct list_head list_node;
  136. spinlock_t lock;
  137. u32 id;
  138. struct wahc *wa; /* Wire adapter we are plugged to */
  139. struct usb_host_endpoint *ep;
  140. struct urb *urb; /* URB we are transferring for */
  141. struct wa_seg **seg; /* transfer segments */
  142. u8 segs, segs_submitted, segs_done;
  143. unsigned is_inbound:1;
  144. unsigned is_dma:1;
  145. size_t seg_size;
  146. int result;
  147. gfp_t gfp; /* allocation mask */
  148. struct wusb_dev *wusb_dev; /* for activity timestamps */
  149. };
  150. static inline void wa_xfer_init(struct wa_xfer *xfer)
  151. {
  152. kref_init(&xfer->refcnt);
  153. INIT_LIST_HEAD(&xfer->list_node);
  154. spin_lock_init(&xfer->lock);
  155. }
  156. /*
  157. * Destroy a transfer structure
  158. *
  159. * Note that freeing xfer->seg[cnt]->urb will free the containing
  160. * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
  161. */
  162. static void wa_xfer_destroy(struct kref *_xfer)
  163. {
  164. struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
  165. if (xfer->seg) {
  166. unsigned cnt;
  167. for (cnt = 0; cnt < xfer->segs; cnt++) {
  168. if (xfer->seg[cnt]) {
  169. if (xfer->seg[cnt]->dto_urb) {
  170. kfree(xfer->seg[cnt]->dto_urb->sg);
  171. usb_free_urb(xfer->seg[cnt]->dto_urb);
  172. }
  173. usb_free_urb(&xfer->seg[cnt]->tr_urb);
  174. }
  175. }
  176. kfree(xfer->seg);
  177. }
  178. kfree(xfer);
  179. }
  180. static void wa_xfer_get(struct wa_xfer *xfer)
  181. {
  182. kref_get(&xfer->refcnt);
  183. }
  184. static void wa_xfer_put(struct wa_xfer *xfer)
  185. {
  186. kref_put(&xfer->refcnt, wa_xfer_destroy);
  187. }
  188. /*
  189. * xfer is referenced
  190. *
  191. * xfer->lock has to be unlocked
  192. *
  193. * We take xfer->lock for setting the result; this is a barrier
  194. * against drivers/usb/core/hcd.c:unlink1() being called after we call
  195. * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
  196. * reference to the transfer.
  197. */
  198. static void wa_xfer_giveback(struct wa_xfer *xfer)
  199. {
  200. unsigned long flags;
  201. spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
  202. list_del_init(&xfer->list_node);
  203. spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
  204. /* FIXME: segmentation broken -- kills DWA */
  205. wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
  206. wa_put(xfer->wa);
  207. wa_xfer_put(xfer);
  208. }
  209. /*
  210. * xfer is referenced
  211. *
  212. * xfer->lock has to be unlocked
  213. */
  214. static void wa_xfer_completion(struct wa_xfer *xfer)
  215. {
  216. if (xfer->wusb_dev)
  217. wusb_dev_put(xfer->wusb_dev);
  218. rpipe_put(xfer->ep->hcpriv);
  219. wa_xfer_giveback(xfer);
  220. }
  221. /*
  222. * Initialize a transfer's ID
  223. *
  224. * We need to use a sequential number; if we use the pointer or the
  225. * hash of the pointer, it can repeat over sequential transfers and
  226. * then it will confuse the HWA....wonder why in hell they put a 32
  227. * bit handle in there then.
  228. */
  229. static void wa_xfer_id_init(struct wa_xfer *xfer)
  230. {
  231. xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
  232. }
  233. /* Return the xfer's ID. */
  234. static inline u32 wa_xfer_id(struct wa_xfer *xfer)
  235. {
  236. return xfer->id;
  237. }
  238. /* Return the xfer's ID in transport format (little endian). */
  239. static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
  240. {
  241. return cpu_to_le32(xfer->id);
  242. }
  243. /*
  244. * If transfer is done, wrap it up and return true
  245. *
  246. * xfer->lock has to be locked
  247. */
  248. static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
  249. {
  250. struct device *dev = &xfer->wa->usb_iface->dev;
  251. unsigned result, cnt;
  252. struct wa_seg *seg;
  253. struct urb *urb = xfer->urb;
  254. unsigned found_short = 0;
  255. result = xfer->segs_done == xfer->segs_submitted;
  256. if (result == 0)
  257. goto out;
  258. urb->actual_length = 0;
  259. for (cnt = 0; cnt < xfer->segs; cnt++) {
  260. seg = xfer->seg[cnt];
  261. switch (seg->status) {
  262. case WA_SEG_DONE:
  263. if (found_short && seg->result > 0) {
  264. dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
  265. xfer, wa_xfer_id(xfer), cnt,
  266. seg->result);
  267. urb->status = -EINVAL;
  268. goto out;
  269. }
  270. urb->actual_length += seg->result;
  271. if (seg->result < xfer->seg_size
  272. && cnt != xfer->segs-1)
  273. found_short = 1;
  274. dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
  275. "result %zu urb->actual_length %d\n",
  276. xfer, wa_xfer_id(xfer), seg->index, found_short,
  277. seg->result, urb->actual_length);
  278. break;
  279. case WA_SEG_ERROR:
  280. xfer->result = seg->result;
  281. dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08X)\n",
  282. xfer, wa_xfer_id(xfer), seg->index, seg->result,
  283. seg->result);
  284. goto out;
  285. case WA_SEG_ABORTED:
  286. dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
  287. xfer, wa_xfer_id(xfer), seg->index,
  288. urb->status);
  289. xfer->result = urb->status;
  290. goto out;
  291. default:
  292. dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
  293. xfer, wa_xfer_id(xfer), cnt, seg->status);
  294. xfer->result = -EINVAL;
  295. goto out;
  296. }
  297. }
  298. xfer->result = 0;
  299. out:
  300. return result;
  301. }
  302. /*
  303. * Search for a transfer list ID on the HCD's URB list
  304. *
  305. * For 32 bit architectures, we use the pointer itself; for 64 bits, a
  306. * 32-bit hash of the pointer.
  307. *
  308. * @returns NULL if not found.
  309. */
  310. static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
  311. {
  312. unsigned long flags;
  313. struct wa_xfer *xfer_itr;
  314. spin_lock_irqsave(&wa->xfer_list_lock, flags);
  315. list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
  316. if (id == xfer_itr->id) {
  317. wa_xfer_get(xfer_itr);
  318. goto out;
  319. }
  320. }
  321. xfer_itr = NULL;
  322. out:
  323. spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
  324. return xfer_itr;
  325. }
  326. struct wa_xfer_abort_buffer {
  327. struct urb urb;
  328. struct wa_xfer_abort cmd;
  329. };
  330. static void __wa_xfer_abort_cb(struct urb *urb)
  331. {
  332. struct wa_xfer_abort_buffer *b = urb->context;
  333. usb_put_urb(&b->urb);
  334. }
  335. /*
  336. * Aborts an ongoing transaction
  337. *
  338. * Assumes the transfer is referenced and locked and in a submitted
  339. * state (mainly that there is an endpoint/rpipe assigned).
  340. *
  341. * The callback (see above) does nothing but freeing up the data by
  342. * putting the URB. Because the URB is allocated at the head of the
  343. * struct, the whole space we allocated is kfreed.
  344. *
  345. * We'll get an 'aborted transaction' xfer result on DTI, that'll
  346. * politely ignore because at this point the transaction has been
  347. * marked as aborted already.
  348. */
  349. static void __wa_xfer_abort(struct wa_xfer *xfer)
  350. {
  351. int result;
  352. struct device *dev = &xfer->wa->usb_iface->dev;
  353. struct wa_xfer_abort_buffer *b;
  354. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  355. b = kmalloc(sizeof(*b), GFP_ATOMIC);
  356. if (b == NULL)
  357. goto error_kmalloc;
  358. b->cmd.bLength = sizeof(b->cmd);
  359. b->cmd.bRequestType = WA_XFER_ABORT;
  360. b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
  361. b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
  362. usb_init_urb(&b->urb);
  363. usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
  364. usb_sndbulkpipe(xfer->wa->usb_dev,
  365. xfer->wa->dto_epd->bEndpointAddress),
  366. &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
  367. result = usb_submit_urb(&b->urb, GFP_ATOMIC);
  368. if (result < 0)
  369. goto error_submit;
  370. return; /* callback frees! */
  371. error_submit:
  372. if (printk_ratelimit())
  373. dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
  374. xfer, result);
  375. kfree(b);
  376. error_kmalloc:
  377. return;
  378. }
  379. /*
  380. *
  381. * @returns < 0 on error, transfer segment request size if ok
  382. */
  383. static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
  384. enum wa_xfer_type *pxfer_type)
  385. {
  386. ssize_t result;
  387. struct device *dev = &xfer->wa->usb_iface->dev;
  388. size_t maxpktsize;
  389. struct urb *urb = xfer->urb;
  390. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  391. switch (rpipe->descr.bmAttribute & 0x3) {
  392. case USB_ENDPOINT_XFER_CONTROL:
  393. *pxfer_type = WA_XFER_TYPE_CTL;
  394. result = sizeof(struct wa_xfer_ctl);
  395. break;
  396. case USB_ENDPOINT_XFER_INT:
  397. case USB_ENDPOINT_XFER_BULK:
  398. *pxfer_type = WA_XFER_TYPE_BI;
  399. result = sizeof(struct wa_xfer_bi);
  400. break;
  401. case USB_ENDPOINT_XFER_ISOC:
  402. dev_err(dev, "FIXME: ISOC not implemented\n");
  403. result = -ENOSYS;
  404. goto error;
  405. default:
  406. /* never happens */
  407. BUG();
  408. result = -EINVAL; /* shut gcc up */
  409. };
  410. xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
  411. xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
  412. xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
  413. * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
  414. /* Compute the segment size and make sure it is a multiple of
  415. * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
  416. * a check (FIXME) */
  417. maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
  418. if (xfer->seg_size < maxpktsize) {
  419. dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
  420. "%zu\n", xfer->seg_size, maxpktsize);
  421. result = -EINVAL;
  422. goto error;
  423. }
  424. xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
  425. xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
  426. if (xfer->segs >= WA_SEGS_MAX) {
  427. dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
  428. (int)(urb->transfer_buffer_length / xfer->seg_size),
  429. WA_SEGS_MAX);
  430. result = -EINVAL;
  431. goto error;
  432. }
  433. if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
  434. xfer->segs = 1;
  435. error:
  436. return result;
  437. }
  438. /* Fill in the common request header and xfer-type specific data. */
  439. static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
  440. struct wa_xfer_hdr *xfer_hdr0,
  441. enum wa_xfer_type xfer_type,
  442. size_t xfer_hdr_size)
  443. {
  444. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  445. xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
  446. xfer_hdr0->bLength = xfer_hdr_size;
  447. xfer_hdr0->bRequestType = xfer_type;
  448. xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
  449. xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
  450. xfer_hdr0->bTransferSegment = 0;
  451. switch (xfer_type) {
  452. case WA_XFER_TYPE_CTL: {
  453. struct wa_xfer_ctl *xfer_ctl =
  454. container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
  455. xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
  456. memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
  457. sizeof(xfer_ctl->baSetupData));
  458. break;
  459. }
  460. case WA_XFER_TYPE_BI:
  461. break;
  462. case WA_XFER_TYPE_ISO:
  463. printk(KERN_ERR "FIXME: ISOC not implemented\n");
  464. default:
  465. BUG();
  466. };
  467. }
  468. /*
  469. * Callback for the OUT data phase of the segment request
  470. *
  471. * Check wa_seg_tr_cb(); most comments also apply here because this
  472. * function does almost the same thing and they work closely
  473. * together.
  474. *
  475. * If the seg request has failed but this DTO phase has succeeded,
  476. * wa_seg_tr_cb() has already failed the segment and moved the
  477. * status to WA_SEG_ERROR, so this will go through 'case 0' and
  478. * effectively do nothing.
  479. */
  480. static void wa_seg_dto_cb(struct urb *urb)
  481. {
  482. struct wa_seg *seg = urb->context;
  483. struct wa_xfer *xfer = seg->xfer;
  484. struct wahc *wa;
  485. struct device *dev;
  486. struct wa_rpipe *rpipe;
  487. unsigned long flags;
  488. unsigned rpipe_ready = 0;
  489. u8 done = 0;
  490. /* free the sg if it was used. */
  491. kfree(urb->sg);
  492. urb->sg = NULL;
  493. switch (urb->status) {
  494. case 0:
  495. spin_lock_irqsave(&xfer->lock, flags);
  496. wa = xfer->wa;
  497. dev = &wa->usb_iface->dev;
  498. dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
  499. xfer, seg->index, urb->actual_length);
  500. if (seg->status < WA_SEG_PENDING)
  501. seg->status = WA_SEG_PENDING;
  502. seg->result = urb->actual_length;
  503. spin_unlock_irqrestore(&xfer->lock, flags);
  504. break;
  505. case -ECONNRESET: /* URB unlinked; no need to do anything */
  506. case -ENOENT: /* as it was done by the who unlinked us */
  507. break;
  508. default: /* Other errors ... */
  509. spin_lock_irqsave(&xfer->lock, flags);
  510. wa = xfer->wa;
  511. dev = &wa->usb_iface->dev;
  512. rpipe = xfer->ep->hcpriv;
  513. dev_dbg(dev, "xfer %p#%u: data out error %d\n",
  514. xfer, seg->index, urb->status);
  515. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  516. EDC_ERROR_TIMEFRAME)){
  517. dev_err(dev, "DTO: URB max acceptable errors "
  518. "exceeded, resetting device\n");
  519. wa_reset_all(wa);
  520. }
  521. if (seg->status != WA_SEG_ERROR) {
  522. seg->status = WA_SEG_ERROR;
  523. seg->result = urb->status;
  524. xfer->segs_done++;
  525. __wa_xfer_abort(xfer);
  526. rpipe_ready = rpipe_avail_inc(rpipe);
  527. done = __wa_xfer_is_done(xfer);
  528. }
  529. spin_unlock_irqrestore(&xfer->lock, flags);
  530. if (done)
  531. wa_xfer_completion(xfer);
  532. if (rpipe_ready)
  533. wa_xfer_delayed_run(rpipe);
  534. }
  535. }
  536. /*
  537. * Callback for the segment request
  538. *
  539. * If successful transition state (unless already transitioned or
  540. * outbound transfer); otherwise, take a note of the error, mark this
  541. * segment done and try completion.
  542. *
  543. * Note we don't access until we are sure that the transfer hasn't
  544. * been cancelled (ECONNRESET, ENOENT), which could mean that
  545. * seg->xfer could be already gone.
  546. *
  547. * We have to check before setting the status to WA_SEG_PENDING
  548. * because sometimes the xfer result callback arrives before this
  549. * callback (geeeeeeze), so it might happen that we are already in
  550. * another state. As well, we don't set it if the transfer is inbound,
  551. * as in that case, wa_seg_dto_cb will do it when the OUT data phase
  552. * finishes.
  553. */
  554. static void wa_seg_tr_cb(struct urb *urb)
  555. {
  556. struct wa_seg *seg = urb->context;
  557. struct wa_xfer *xfer = seg->xfer;
  558. struct wahc *wa;
  559. struct device *dev;
  560. struct wa_rpipe *rpipe;
  561. unsigned long flags;
  562. unsigned rpipe_ready;
  563. u8 done = 0;
  564. switch (urb->status) {
  565. case 0:
  566. spin_lock_irqsave(&xfer->lock, flags);
  567. wa = xfer->wa;
  568. dev = &wa->usb_iface->dev;
  569. dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
  570. if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
  571. seg->status = WA_SEG_PENDING;
  572. spin_unlock_irqrestore(&xfer->lock, flags);
  573. break;
  574. case -ECONNRESET: /* URB unlinked; no need to do anything */
  575. case -ENOENT: /* as it was done by the who unlinked us */
  576. break;
  577. default: /* Other errors ... */
  578. spin_lock_irqsave(&xfer->lock, flags);
  579. wa = xfer->wa;
  580. dev = &wa->usb_iface->dev;
  581. rpipe = xfer->ep->hcpriv;
  582. if (printk_ratelimit())
  583. dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
  584. xfer, wa_xfer_id(xfer), seg->index,
  585. urb->status);
  586. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  587. EDC_ERROR_TIMEFRAME)){
  588. dev_err(dev, "DTO: URB max acceptable errors "
  589. "exceeded, resetting device\n");
  590. wa_reset_all(wa);
  591. }
  592. usb_unlink_urb(seg->dto_urb);
  593. seg->status = WA_SEG_ERROR;
  594. seg->result = urb->status;
  595. xfer->segs_done++;
  596. __wa_xfer_abort(xfer);
  597. rpipe_ready = rpipe_avail_inc(rpipe);
  598. done = __wa_xfer_is_done(xfer);
  599. spin_unlock_irqrestore(&xfer->lock, flags);
  600. if (done)
  601. wa_xfer_completion(xfer);
  602. if (rpipe_ready)
  603. wa_xfer_delayed_run(rpipe);
  604. }
  605. }
  606. /*
  607. * Allocate an SG list to store bytes_to_transfer bytes and copy the
  608. * subset of the in_sg that matches the buffer subset
  609. * we are about to transfer.
  610. */
  611. static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
  612. const unsigned int bytes_transferred,
  613. const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
  614. {
  615. struct scatterlist *out_sg;
  616. unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
  617. nents;
  618. struct scatterlist *current_xfer_sg = in_sg;
  619. struct scatterlist *current_seg_sg, *last_seg_sg;
  620. /* skip previously transferred pages. */
  621. while ((current_xfer_sg) &&
  622. (bytes_processed < bytes_transferred)) {
  623. bytes_processed += current_xfer_sg->length;
  624. /* advance the sg if current segment starts on or past the
  625. next page. */
  626. if (bytes_processed <= bytes_transferred)
  627. current_xfer_sg = sg_next(current_xfer_sg);
  628. }
  629. /* the data for the current segment starts in current_xfer_sg.
  630. calculate the offset. */
  631. if (bytes_processed > bytes_transferred) {
  632. offset_into_current_page_data = current_xfer_sg->length -
  633. (bytes_processed - bytes_transferred);
  634. }
  635. /* calculate the number of pages needed by this segment. */
  636. nents = DIV_ROUND_UP((bytes_to_transfer +
  637. offset_into_current_page_data +
  638. current_xfer_sg->offset),
  639. PAGE_SIZE);
  640. out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
  641. if (out_sg) {
  642. sg_init_table(out_sg, nents);
  643. /* copy the portion of the incoming SG that correlates to the
  644. * data to be transferred by this segment to the segment SG. */
  645. last_seg_sg = current_seg_sg = out_sg;
  646. bytes_processed = 0;
  647. /* reset nents and calculate the actual number of sg entries
  648. needed. */
  649. nents = 0;
  650. while ((bytes_processed < bytes_to_transfer) &&
  651. current_seg_sg && current_xfer_sg) {
  652. unsigned int page_len = min((current_xfer_sg->length -
  653. offset_into_current_page_data),
  654. (bytes_to_transfer - bytes_processed));
  655. sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
  656. page_len,
  657. current_xfer_sg->offset +
  658. offset_into_current_page_data);
  659. bytes_processed += page_len;
  660. last_seg_sg = current_seg_sg;
  661. current_seg_sg = sg_next(current_seg_sg);
  662. current_xfer_sg = sg_next(current_xfer_sg);
  663. /* only the first page may require additional offset. */
  664. offset_into_current_page_data = 0;
  665. nents++;
  666. }
  667. /* update num_sgs and terminate the list since we may have
  668. * concatenated pages. */
  669. sg_mark_end(last_seg_sg);
  670. *out_num_sgs = nents;
  671. }
  672. return out_sg;
  673. }
  674. /*
  675. * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
  676. */
  677. static int __wa_populate_dto_urb(struct wa_xfer *xfer,
  678. struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
  679. {
  680. int result = 0;
  681. if (xfer->is_dma) {
  682. seg->dto_urb->transfer_dma =
  683. xfer->urb->transfer_dma + buf_itr_offset;
  684. seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  685. seg->dto_urb->sg = NULL;
  686. seg->dto_urb->num_sgs = 0;
  687. } else {
  688. /* do buffer or SG processing. */
  689. seg->dto_urb->transfer_flags &=
  690. ~URB_NO_TRANSFER_DMA_MAP;
  691. /* this should always be 0 before a resubmit. */
  692. seg->dto_urb->num_mapped_sgs = 0;
  693. if (xfer->urb->transfer_buffer) {
  694. seg->dto_urb->transfer_buffer =
  695. xfer->urb->transfer_buffer +
  696. buf_itr_offset;
  697. seg->dto_urb->sg = NULL;
  698. seg->dto_urb->num_sgs = 0;
  699. } else {
  700. seg->dto_urb->transfer_buffer = NULL;
  701. /*
  702. * allocate an SG list to store seg_size bytes
  703. * and copy the subset of the xfer->urb->sg that
  704. * matches the buffer subset we are about to
  705. * read.
  706. */
  707. seg->dto_urb->sg = wa_xfer_create_subset_sg(
  708. xfer->urb->sg,
  709. buf_itr_offset, buf_itr_size,
  710. &(seg->dto_urb->num_sgs));
  711. if (!(seg->dto_urb->sg))
  712. result = -ENOMEM;
  713. }
  714. }
  715. seg->dto_urb->transfer_buffer_length = buf_itr_size;
  716. return result;
  717. }
  718. /*
  719. * Allocate the segs array and initialize each of them
  720. *
  721. * The segments are freed by wa_xfer_destroy() when the xfer use count
  722. * drops to zero; however, because each segment is given the same life
  723. * cycle as the USB URB it contains, it is actually freed by
  724. * usb_put_urb() on the contained USB URB (twisted, eh?).
  725. */
  726. static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
  727. {
  728. int result, cnt;
  729. size_t alloc_size = sizeof(*xfer->seg[0])
  730. - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
  731. struct usb_device *usb_dev = xfer->wa->usb_dev;
  732. const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
  733. struct wa_seg *seg;
  734. size_t buf_itr, buf_size, buf_itr_size;
  735. result = -ENOMEM;
  736. xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
  737. if (xfer->seg == NULL)
  738. goto error_segs_kzalloc;
  739. buf_itr = 0;
  740. buf_size = xfer->urb->transfer_buffer_length;
  741. for (cnt = 0; cnt < xfer->segs; cnt++) {
  742. seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
  743. if (seg == NULL)
  744. goto error_seg_kmalloc;
  745. wa_seg_init(seg);
  746. seg->xfer = xfer;
  747. seg->index = cnt;
  748. usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
  749. usb_sndbulkpipe(usb_dev,
  750. dto_epd->bEndpointAddress),
  751. &seg->xfer_hdr, xfer_hdr_size,
  752. wa_seg_tr_cb, seg);
  753. buf_itr_size = min(buf_size, xfer->seg_size);
  754. if (xfer->is_inbound == 0 && buf_size > 0) {
  755. /* outbound data. */
  756. seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
  757. if (seg->dto_urb == NULL)
  758. goto error_dto_alloc;
  759. usb_fill_bulk_urb(
  760. seg->dto_urb, usb_dev,
  761. usb_sndbulkpipe(usb_dev,
  762. dto_epd->bEndpointAddress),
  763. NULL, 0, wa_seg_dto_cb, seg);
  764. /* fill in the xfer buffer information. */
  765. result = __wa_populate_dto_urb(xfer, seg,
  766. buf_itr, buf_itr_size);
  767. if (result < 0)
  768. goto error_seg_outbound_populate;
  769. }
  770. seg->status = WA_SEG_READY;
  771. buf_itr += buf_itr_size;
  772. buf_size -= buf_itr_size;
  773. }
  774. return 0;
  775. /*
  776. * Free the memory for the current segment which failed to init.
  777. * Use the fact that cnt is left at were it failed. The remaining
  778. * segments will be cleaned up by wa_xfer_destroy.
  779. */
  780. error_seg_outbound_populate:
  781. usb_free_urb(xfer->seg[cnt]->dto_urb);
  782. error_dto_alloc:
  783. kfree(xfer->seg[cnt]);
  784. xfer->seg[cnt] = NULL;
  785. error_seg_kmalloc:
  786. error_segs_kzalloc:
  787. return result;
  788. }
  789. /*
  790. * Allocates all the stuff needed to submit a transfer
  791. *
  792. * Breaks the whole data buffer in a list of segments, each one has a
  793. * structure allocated to it and linked in xfer->seg[index]
  794. *
  795. * FIXME: merge setup_segs() and the last part of this function, no
  796. * need to do two for loops when we could run everything in a
  797. * single one
  798. */
  799. static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
  800. {
  801. int result;
  802. struct device *dev = &xfer->wa->usb_iface->dev;
  803. enum wa_xfer_type xfer_type = 0; /* shut up GCC */
  804. size_t xfer_hdr_size, cnt, transfer_size;
  805. struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
  806. result = __wa_xfer_setup_sizes(xfer, &xfer_type);
  807. if (result < 0)
  808. goto error_setup_sizes;
  809. xfer_hdr_size = result;
  810. result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
  811. if (result < 0) {
  812. dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
  813. xfer, xfer->segs, result);
  814. goto error_setup_segs;
  815. }
  816. /* Fill the first header */
  817. xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
  818. wa_xfer_id_init(xfer);
  819. __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
  820. /* Fill remainig headers */
  821. xfer_hdr = xfer_hdr0;
  822. transfer_size = urb->transfer_buffer_length;
  823. xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
  824. xfer->seg_size : transfer_size;
  825. transfer_size -= xfer->seg_size;
  826. for (cnt = 1; cnt < xfer->segs; cnt++) {
  827. xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
  828. memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
  829. xfer_hdr->bTransferSegment = cnt;
  830. xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
  831. cpu_to_le32(xfer->seg_size)
  832. : cpu_to_le32(transfer_size);
  833. xfer->seg[cnt]->status = WA_SEG_READY;
  834. transfer_size -= xfer->seg_size;
  835. }
  836. xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
  837. result = 0;
  838. error_setup_segs:
  839. error_setup_sizes:
  840. return result;
  841. }
  842. /*
  843. *
  844. *
  845. * rpipe->seg_lock is held!
  846. */
  847. static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
  848. struct wa_seg *seg)
  849. {
  850. int result;
  851. /* submit the transfer request. */
  852. result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
  853. if (result < 0) {
  854. printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
  855. xfer, seg->index, result);
  856. goto error_seg_submit;
  857. }
  858. /* submit the out data if this is an out request. */
  859. if (seg->dto_urb) {
  860. result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
  861. if (result < 0) {
  862. printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
  863. xfer, seg->index, result);
  864. goto error_dto_submit;
  865. }
  866. }
  867. seg->status = WA_SEG_SUBMITTED;
  868. rpipe_avail_dec(rpipe);
  869. return 0;
  870. error_dto_submit:
  871. usb_unlink_urb(&seg->tr_urb);
  872. error_seg_submit:
  873. seg->status = WA_SEG_ERROR;
  874. seg->result = result;
  875. return result;
  876. }
  877. /*
  878. * Execute more queued request segments until the maximum concurrent allowed
  879. *
  880. * The ugly unlock/lock sequence on the error path is needed as the
  881. * xfer->lock normally nests the seg_lock and not viceversa.
  882. *
  883. */
  884. static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
  885. {
  886. int result;
  887. struct device *dev = &rpipe->wa->usb_iface->dev;
  888. struct wa_seg *seg;
  889. struct wa_xfer *xfer;
  890. unsigned long flags;
  891. spin_lock_irqsave(&rpipe->seg_lock, flags);
  892. while (atomic_read(&rpipe->segs_available) > 0
  893. && !list_empty(&rpipe->seg_list)) {
  894. seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
  895. list_node);
  896. list_del(&seg->list_node);
  897. xfer = seg->xfer;
  898. result = __wa_seg_submit(rpipe, xfer, seg);
  899. dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
  900. xfer, wa_xfer_id(xfer), seg->index,
  901. atomic_read(&rpipe->segs_available), result);
  902. if (unlikely(result < 0)) {
  903. spin_unlock_irqrestore(&rpipe->seg_lock, flags);
  904. spin_lock_irqsave(&xfer->lock, flags);
  905. __wa_xfer_abort(xfer);
  906. xfer->segs_done++;
  907. spin_unlock_irqrestore(&xfer->lock, flags);
  908. spin_lock_irqsave(&rpipe->seg_lock, flags);
  909. }
  910. }
  911. spin_unlock_irqrestore(&rpipe->seg_lock, flags);
  912. }
  913. /*
  914. *
  915. * xfer->lock is taken
  916. *
  917. * On failure submitting we just stop submitting and return error;
  918. * wa_urb_enqueue_b() will execute the completion path
  919. */
  920. static int __wa_xfer_submit(struct wa_xfer *xfer)
  921. {
  922. int result;
  923. struct wahc *wa = xfer->wa;
  924. struct device *dev = &wa->usb_iface->dev;
  925. unsigned cnt;
  926. struct wa_seg *seg;
  927. unsigned long flags;
  928. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  929. size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
  930. u8 available;
  931. u8 empty;
  932. spin_lock_irqsave(&wa->xfer_list_lock, flags);
  933. list_add_tail(&xfer->list_node, &wa->xfer_list);
  934. spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
  935. BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
  936. result = 0;
  937. spin_lock_irqsave(&rpipe->seg_lock, flags);
  938. for (cnt = 0; cnt < xfer->segs; cnt++) {
  939. available = atomic_read(&rpipe->segs_available);
  940. empty = list_empty(&rpipe->seg_list);
  941. seg = xfer->seg[cnt];
  942. dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u (%s)\n",
  943. xfer, wa_xfer_id(xfer), cnt, available, empty,
  944. available == 0 || !empty ? "delayed" : "submitted");
  945. if (available == 0 || !empty) {
  946. seg->status = WA_SEG_DELAYED;
  947. list_add_tail(&seg->list_node, &rpipe->seg_list);
  948. } else {
  949. result = __wa_seg_submit(rpipe, xfer, seg);
  950. if (result < 0) {
  951. __wa_xfer_abort(xfer);
  952. goto error_seg_submit;
  953. }
  954. }
  955. xfer->segs_submitted++;
  956. }
  957. error_seg_submit:
  958. spin_unlock_irqrestore(&rpipe->seg_lock, flags);
  959. return result;
  960. }
  961. /*
  962. * Second part of a URB/transfer enqueuement
  963. *
  964. * Assumes this comes from wa_urb_enqueue() [maybe through
  965. * wa_urb_enqueue_run()]. At this point:
  966. *
  967. * xfer->wa filled and refcounted
  968. * xfer->ep filled with rpipe refcounted if
  969. * delayed == 0
  970. * xfer->urb filled and refcounted (this is the case when called
  971. * from wa_urb_enqueue() as we come from usb_submit_urb()
  972. * and when called by wa_urb_enqueue_run(), as we took an
  973. * extra ref dropped by _run() after we return).
  974. * xfer->gfp filled
  975. *
  976. * If we fail at __wa_xfer_submit(), then we just check if we are done
  977. * and if so, we run the completion procedure. However, if we are not
  978. * yet done, we do nothing and wait for the completion handlers from
  979. * the submitted URBs or from the xfer-result path to kick in. If xfer
  980. * result never kicks in, the xfer will timeout from the USB code and
  981. * dequeue() will be called.
  982. */
  983. static void wa_urb_enqueue_b(struct wa_xfer *xfer)
  984. {
  985. int result;
  986. unsigned long flags;
  987. struct urb *urb = xfer->urb;
  988. struct wahc *wa = xfer->wa;
  989. struct wusbhc *wusbhc = wa->wusb;
  990. struct wusb_dev *wusb_dev;
  991. unsigned done;
  992. result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
  993. if (result < 0)
  994. goto error_rpipe_get;
  995. result = -ENODEV;
  996. /* FIXME: segmentation broken -- kills DWA */
  997. mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
  998. if (urb->dev == NULL) {
  999. mutex_unlock(&wusbhc->mutex);
  1000. goto error_dev_gone;
  1001. }
  1002. wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
  1003. if (wusb_dev == NULL) {
  1004. mutex_unlock(&wusbhc->mutex);
  1005. goto error_dev_gone;
  1006. }
  1007. mutex_unlock(&wusbhc->mutex);
  1008. spin_lock_irqsave(&xfer->lock, flags);
  1009. xfer->wusb_dev = wusb_dev;
  1010. result = urb->status;
  1011. if (urb->status != -EINPROGRESS)
  1012. goto error_dequeued;
  1013. result = __wa_xfer_setup(xfer, urb);
  1014. if (result < 0)
  1015. goto error_xfer_setup;
  1016. result = __wa_xfer_submit(xfer);
  1017. if (result < 0)
  1018. goto error_xfer_submit;
  1019. spin_unlock_irqrestore(&xfer->lock, flags);
  1020. return;
  1021. /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
  1022. * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
  1023. * upundo setup().
  1024. */
  1025. error_xfer_setup:
  1026. error_dequeued:
  1027. spin_unlock_irqrestore(&xfer->lock, flags);
  1028. /* FIXME: segmentation broken, kills DWA */
  1029. if (wusb_dev)
  1030. wusb_dev_put(wusb_dev);
  1031. error_dev_gone:
  1032. rpipe_put(xfer->ep->hcpriv);
  1033. error_rpipe_get:
  1034. xfer->result = result;
  1035. wa_xfer_giveback(xfer);
  1036. return;
  1037. error_xfer_submit:
  1038. done = __wa_xfer_is_done(xfer);
  1039. xfer->result = result;
  1040. spin_unlock_irqrestore(&xfer->lock, flags);
  1041. if (done)
  1042. wa_xfer_completion(xfer);
  1043. }
  1044. /*
  1045. * Execute the delayed transfers in the Wire Adapter @wa
  1046. *
  1047. * We need to be careful here, as dequeue() could be called in the
  1048. * middle. That's why we do the whole thing under the
  1049. * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
  1050. * and then checks the list -- so as we would be acquiring in inverse
  1051. * order, we move the delayed list to a separate list while locked and then
  1052. * submit them without the list lock held.
  1053. */
  1054. void wa_urb_enqueue_run(struct work_struct *ws)
  1055. {
  1056. struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
  1057. struct wa_xfer *xfer, *next;
  1058. struct urb *urb;
  1059. LIST_HEAD(tmp_list);
  1060. /* Create a copy of the wa->xfer_delayed_list while holding the lock */
  1061. spin_lock_irq(&wa->xfer_list_lock);
  1062. list_cut_position(&tmp_list, &wa->xfer_delayed_list,
  1063. wa->xfer_delayed_list.prev);
  1064. spin_unlock_irq(&wa->xfer_list_lock);
  1065. /*
  1066. * enqueue from temp list without list lock held since wa_urb_enqueue_b
  1067. * can take xfer->lock as well as lock mutexes.
  1068. */
  1069. list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
  1070. list_del_init(&xfer->list_node);
  1071. urb = xfer->urb;
  1072. wa_urb_enqueue_b(xfer);
  1073. usb_put_urb(urb); /* taken when queuing */
  1074. }
  1075. }
  1076. EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
  1077. /*
  1078. * Process the errored transfers on the Wire Adapter outside of interrupt.
  1079. */
  1080. void wa_process_errored_transfers_run(struct work_struct *ws)
  1081. {
  1082. struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
  1083. struct wa_xfer *xfer, *next;
  1084. LIST_HEAD(tmp_list);
  1085. pr_info("%s: Run delayed STALL processing.\n", __func__);
  1086. /* Create a copy of the wa->xfer_errored_list while holding the lock */
  1087. spin_lock_irq(&wa->xfer_list_lock);
  1088. list_cut_position(&tmp_list, &wa->xfer_errored_list,
  1089. wa->xfer_errored_list.prev);
  1090. spin_unlock_irq(&wa->xfer_list_lock);
  1091. /*
  1092. * run rpipe_clear_feature_stalled from temp list without list lock
  1093. * held.
  1094. */
  1095. list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
  1096. struct usb_host_endpoint *ep;
  1097. unsigned long flags;
  1098. struct wa_rpipe *rpipe;
  1099. spin_lock_irqsave(&xfer->lock, flags);
  1100. ep = xfer->ep;
  1101. rpipe = ep->hcpriv;
  1102. spin_unlock_irqrestore(&xfer->lock, flags);
  1103. /* clear RPIPE feature stalled without holding a lock. */
  1104. rpipe_clear_feature_stalled(wa, ep);
  1105. /* complete the xfer. This removes it from the tmp list. */
  1106. wa_xfer_completion(xfer);
  1107. /* check for work. */
  1108. wa_xfer_delayed_run(rpipe);
  1109. }
  1110. }
  1111. EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
  1112. /*
  1113. * Submit a transfer to the Wire Adapter in a delayed way
  1114. *
  1115. * The process of enqueuing involves possible sleeps() [see
  1116. * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
  1117. * in an atomic section, we defer the enqueue_b() call--else we call direct.
  1118. *
  1119. * @urb: We own a reference to it done by the HCI Linux USB stack that
  1120. * will be given up by calling usb_hcd_giveback_urb() or by
  1121. * returning error from this function -> ergo we don't have to
  1122. * refcount it.
  1123. */
  1124. int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
  1125. struct urb *urb, gfp_t gfp)
  1126. {
  1127. int result;
  1128. struct device *dev = &wa->usb_iface->dev;
  1129. struct wa_xfer *xfer;
  1130. unsigned long my_flags;
  1131. unsigned cant_sleep = irqs_disabled() | in_atomic();
  1132. if ((urb->transfer_buffer == NULL)
  1133. && (urb->sg == NULL)
  1134. && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  1135. && urb->transfer_buffer_length != 0) {
  1136. dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
  1137. dump_stack();
  1138. }
  1139. result = -ENOMEM;
  1140. xfer = kzalloc(sizeof(*xfer), gfp);
  1141. if (xfer == NULL)
  1142. goto error_kmalloc;
  1143. result = -ENOENT;
  1144. if (urb->status != -EINPROGRESS) /* cancelled */
  1145. goto error_dequeued; /* before starting? */
  1146. wa_xfer_init(xfer);
  1147. xfer->wa = wa_get(wa);
  1148. xfer->urb = urb;
  1149. xfer->gfp = gfp;
  1150. xfer->ep = ep;
  1151. urb->hcpriv = xfer;
  1152. dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
  1153. xfer, urb, urb->pipe, urb->transfer_buffer_length,
  1154. urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
  1155. urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
  1156. cant_sleep ? "deferred" : "inline");
  1157. if (cant_sleep) {
  1158. usb_get_urb(urb);
  1159. spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
  1160. list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
  1161. spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
  1162. queue_work(wusbd, &wa->xfer_enqueue_work);
  1163. } else {
  1164. wa_urb_enqueue_b(xfer);
  1165. }
  1166. return 0;
  1167. error_dequeued:
  1168. kfree(xfer);
  1169. error_kmalloc:
  1170. return result;
  1171. }
  1172. EXPORT_SYMBOL_GPL(wa_urb_enqueue);
  1173. /*
  1174. * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
  1175. * handler] is called.
  1176. *
  1177. * Until a transfer goes successfully through wa_urb_enqueue() it
  1178. * needs to be dequeued with completion calling; when stuck in delayed
  1179. * or before wa_xfer_setup() is called, we need to do completion.
  1180. *
  1181. * not setup If there is no hcpriv yet, that means that that enqueue
  1182. * still had no time to set the xfer up. Because
  1183. * urb->status should be other than -EINPROGRESS,
  1184. * enqueue() will catch that and bail out.
  1185. *
  1186. * If the transfer has gone through setup, we just need to clean it
  1187. * up. If it has gone through submit(), we have to abort it [with an
  1188. * asynch request] and then make sure we cancel each segment.
  1189. *
  1190. */
  1191. int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
  1192. {
  1193. unsigned long flags, flags2;
  1194. struct wa_xfer *xfer;
  1195. struct wa_seg *seg;
  1196. struct wa_rpipe *rpipe;
  1197. unsigned cnt;
  1198. unsigned rpipe_ready = 0;
  1199. xfer = urb->hcpriv;
  1200. if (xfer == NULL) {
  1201. /*
  1202. * Nothing setup yet enqueue will see urb->status !=
  1203. * -EINPROGRESS (by hcd layer) and bail out with
  1204. * error, no need to do completion
  1205. */
  1206. BUG_ON(urb->status == -EINPROGRESS);
  1207. goto out;
  1208. }
  1209. spin_lock_irqsave(&xfer->lock, flags);
  1210. rpipe = xfer->ep->hcpriv;
  1211. if (rpipe == NULL) {
  1212. pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
  1213. __func__, wa_xfer_id(xfer),
  1214. "Probably already aborted.\n" );
  1215. goto out_unlock;
  1216. }
  1217. /* Check the delayed list -> if there, release and complete */
  1218. spin_lock_irqsave(&wa->xfer_list_lock, flags2);
  1219. if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
  1220. goto dequeue_delayed;
  1221. spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
  1222. if (xfer->seg == NULL) /* still hasn't reached */
  1223. goto out_unlock; /* setup(), enqueue_b() completes */
  1224. /* Ok, the xfer is in flight already, it's been setup and submitted.*/
  1225. __wa_xfer_abort(xfer);
  1226. for (cnt = 0; cnt < xfer->segs; cnt++) {
  1227. seg = xfer->seg[cnt];
  1228. switch (seg->status) {
  1229. case WA_SEG_NOTREADY:
  1230. case WA_SEG_READY:
  1231. printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
  1232. xfer, cnt, seg->status);
  1233. WARN_ON(1);
  1234. break;
  1235. case WA_SEG_DELAYED:
  1236. seg->status = WA_SEG_ABORTED;
  1237. spin_lock_irqsave(&rpipe->seg_lock, flags2);
  1238. list_del(&seg->list_node);
  1239. xfer->segs_done++;
  1240. rpipe_ready = rpipe_avail_inc(rpipe);
  1241. spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
  1242. break;
  1243. case WA_SEG_SUBMITTED:
  1244. seg->status = WA_SEG_ABORTED;
  1245. usb_unlink_urb(&seg->tr_urb);
  1246. if (xfer->is_inbound == 0)
  1247. usb_unlink_urb(seg->dto_urb);
  1248. xfer->segs_done++;
  1249. rpipe_ready = rpipe_avail_inc(rpipe);
  1250. break;
  1251. case WA_SEG_PENDING:
  1252. seg->status = WA_SEG_ABORTED;
  1253. xfer->segs_done++;
  1254. rpipe_ready = rpipe_avail_inc(rpipe);
  1255. break;
  1256. case WA_SEG_DTI_PENDING:
  1257. usb_unlink_urb(wa->dti_urb);
  1258. seg->status = WA_SEG_ABORTED;
  1259. xfer->segs_done++;
  1260. rpipe_ready = rpipe_avail_inc(rpipe);
  1261. break;
  1262. case WA_SEG_DONE:
  1263. case WA_SEG_ERROR:
  1264. case WA_SEG_ABORTED:
  1265. break;
  1266. }
  1267. }
  1268. xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
  1269. __wa_xfer_is_done(xfer);
  1270. spin_unlock_irqrestore(&xfer->lock, flags);
  1271. wa_xfer_completion(xfer);
  1272. if (rpipe_ready)
  1273. wa_xfer_delayed_run(rpipe);
  1274. return 0;
  1275. out_unlock:
  1276. spin_unlock_irqrestore(&xfer->lock, flags);
  1277. out:
  1278. return 0;
  1279. dequeue_delayed:
  1280. list_del_init(&xfer->list_node);
  1281. spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
  1282. xfer->result = urb->status;
  1283. spin_unlock_irqrestore(&xfer->lock, flags);
  1284. wa_xfer_giveback(xfer);
  1285. usb_put_urb(urb); /* we got a ref in enqueue() */
  1286. return 0;
  1287. }
  1288. EXPORT_SYMBOL_GPL(wa_urb_dequeue);
  1289. /*
  1290. * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
  1291. * codes
  1292. *
  1293. * Positive errno values are internal inconsistencies and should be
  1294. * flagged louder. Negative are to be passed up to the user in the
  1295. * normal way.
  1296. *
  1297. * @status: USB WA status code -- high two bits are stripped.
  1298. */
  1299. static int wa_xfer_status_to_errno(u8 status)
  1300. {
  1301. int errno;
  1302. u8 real_status = status;
  1303. static int xlat[] = {
  1304. [WA_XFER_STATUS_SUCCESS] = 0,
  1305. [WA_XFER_STATUS_HALTED] = -EPIPE,
  1306. [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
  1307. [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
  1308. [WA_XFER_RESERVED] = EINVAL,
  1309. [WA_XFER_STATUS_NOT_FOUND] = 0,
  1310. [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
  1311. [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
  1312. [WA_XFER_STATUS_ABORTED] = -EINTR,
  1313. [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
  1314. [WA_XFER_INVALID_FORMAT] = EINVAL,
  1315. [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
  1316. [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
  1317. };
  1318. status &= 0x3f;
  1319. if (status == 0)
  1320. return 0;
  1321. if (status >= ARRAY_SIZE(xlat)) {
  1322. printk_ratelimited(KERN_ERR "%s(): BUG? "
  1323. "Unknown WA transfer status 0x%02x\n",
  1324. __func__, real_status);
  1325. return -EINVAL;
  1326. }
  1327. errno = xlat[status];
  1328. if (unlikely(errno > 0)) {
  1329. printk_ratelimited(KERN_ERR "%s(): BUG? "
  1330. "Inconsistent WA status: 0x%02x\n",
  1331. __func__, real_status);
  1332. errno = -errno;
  1333. }
  1334. return errno;
  1335. }
  1336. /*
  1337. * Process a xfer result completion message
  1338. *
  1339. * inbound transfers: need to schedule a DTI read
  1340. *
  1341. * FIXME: this function needs to be broken up in parts
  1342. */
  1343. static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
  1344. struct wa_xfer_result *xfer_result)
  1345. {
  1346. int result;
  1347. struct device *dev = &wa->usb_iface->dev;
  1348. unsigned long flags;
  1349. u8 seg_idx;
  1350. struct wa_seg *seg;
  1351. struct wa_rpipe *rpipe;
  1352. unsigned done = 0;
  1353. u8 usb_status;
  1354. unsigned rpipe_ready = 0;
  1355. spin_lock_irqsave(&xfer->lock, flags);
  1356. seg_idx = xfer_result->bTransferSegment & 0x7f;
  1357. if (unlikely(seg_idx >= xfer->segs))
  1358. goto error_bad_seg;
  1359. seg = xfer->seg[seg_idx];
  1360. rpipe = xfer->ep->hcpriv;
  1361. usb_status = xfer_result->bTransferStatus;
  1362. dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
  1363. xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
  1364. if (seg->status == WA_SEG_ABORTED
  1365. || seg->status == WA_SEG_ERROR) /* already handled */
  1366. goto segment_aborted;
  1367. if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
  1368. seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
  1369. if (seg->status != WA_SEG_PENDING) {
  1370. if (printk_ratelimit())
  1371. dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
  1372. xfer, seg_idx, seg->status);
  1373. seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
  1374. }
  1375. if (usb_status & 0x80) {
  1376. seg->result = wa_xfer_status_to_errno(usb_status);
  1377. dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
  1378. xfer, xfer->id, seg->index, usb_status);
  1379. goto error_complete;
  1380. }
  1381. /* FIXME: we ignore warnings, tally them for stats */
  1382. if (usb_status & 0x40) /* Warning?... */
  1383. usb_status = 0; /* ... pass */
  1384. if (xfer->is_inbound) { /* IN data phase: read to buffer */
  1385. seg->status = WA_SEG_DTI_PENDING;
  1386. BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
  1387. /* this should always be 0 before a resubmit. */
  1388. wa->buf_in_urb->num_mapped_sgs = 0;
  1389. if (xfer->is_dma) {
  1390. wa->buf_in_urb->transfer_dma =
  1391. xfer->urb->transfer_dma
  1392. + (seg_idx * xfer->seg_size);
  1393. wa->buf_in_urb->transfer_flags
  1394. |= URB_NO_TRANSFER_DMA_MAP;
  1395. wa->buf_in_urb->transfer_buffer = NULL;
  1396. wa->buf_in_urb->sg = NULL;
  1397. wa->buf_in_urb->num_sgs = 0;
  1398. } else {
  1399. /* do buffer or SG processing. */
  1400. wa->buf_in_urb->transfer_flags
  1401. &= ~URB_NO_TRANSFER_DMA_MAP;
  1402. if (xfer->urb->transfer_buffer) {
  1403. wa->buf_in_urb->transfer_buffer =
  1404. xfer->urb->transfer_buffer
  1405. + (seg_idx * xfer->seg_size);
  1406. wa->buf_in_urb->sg = NULL;
  1407. wa->buf_in_urb->num_sgs = 0;
  1408. } else {
  1409. /* allocate an SG list to store seg_size bytes
  1410. and copy the subset of the xfer->urb->sg
  1411. that matches the buffer subset we are
  1412. about to read. */
  1413. wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
  1414. xfer->urb->sg,
  1415. seg_idx * xfer->seg_size,
  1416. le32_to_cpu(
  1417. xfer_result->dwTransferLength),
  1418. &(wa->buf_in_urb->num_sgs));
  1419. if (!(wa->buf_in_urb->sg)) {
  1420. wa->buf_in_urb->num_sgs = 0;
  1421. goto error_sg_alloc;
  1422. }
  1423. wa->buf_in_urb->transfer_buffer = NULL;
  1424. }
  1425. }
  1426. wa->buf_in_urb->transfer_buffer_length =
  1427. le32_to_cpu(xfer_result->dwTransferLength);
  1428. wa->buf_in_urb->context = seg;
  1429. result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
  1430. if (result < 0)
  1431. goto error_submit_buf_in;
  1432. } else {
  1433. /* OUT data phase, complete it -- */
  1434. seg->status = WA_SEG_DONE;
  1435. seg->result = le32_to_cpu(xfer_result->dwTransferLength);
  1436. xfer->segs_done++;
  1437. rpipe_ready = rpipe_avail_inc(rpipe);
  1438. done = __wa_xfer_is_done(xfer);
  1439. }
  1440. spin_unlock_irqrestore(&xfer->lock, flags);
  1441. if (done)
  1442. wa_xfer_completion(xfer);
  1443. if (rpipe_ready)
  1444. wa_xfer_delayed_run(rpipe);
  1445. return;
  1446. error_submit_buf_in:
  1447. if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
  1448. dev_err(dev, "DTI: URB max acceptable errors "
  1449. "exceeded, resetting device\n");
  1450. wa_reset_all(wa);
  1451. }
  1452. if (printk_ratelimit())
  1453. dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
  1454. xfer, seg_idx, result);
  1455. seg->result = result;
  1456. kfree(wa->buf_in_urb->sg);
  1457. wa->buf_in_urb->sg = NULL;
  1458. error_sg_alloc:
  1459. __wa_xfer_abort(xfer);
  1460. error_complete:
  1461. seg->status = WA_SEG_ERROR;
  1462. xfer->segs_done++;
  1463. rpipe_ready = rpipe_avail_inc(rpipe);
  1464. done = __wa_xfer_is_done(xfer);
  1465. /*
  1466. * queue work item to clear STALL for control endpoints.
  1467. * Otherwise, let endpoint_reset take care of it.
  1468. */
  1469. if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
  1470. usb_endpoint_xfer_control(&xfer->ep->desc) &&
  1471. done) {
  1472. dev_info(dev, "Control EP stall. Queue delayed work.\n");
  1473. spin_lock_irq(&wa->xfer_list_lock);
  1474. /* move xfer from xfer_list to xfer_errored_list. */
  1475. list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
  1476. spin_unlock_irq(&wa->xfer_list_lock);
  1477. spin_unlock_irqrestore(&xfer->lock, flags);
  1478. queue_work(wusbd, &wa->xfer_error_work);
  1479. } else {
  1480. spin_unlock_irqrestore(&xfer->lock, flags);
  1481. if (done)
  1482. wa_xfer_completion(xfer);
  1483. if (rpipe_ready)
  1484. wa_xfer_delayed_run(rpipe);
  1485. }
  1486. return;
  1487. error_bad_seg:
  1488. spin_unlock_irqrestore(&xfer->lock, flags);
  1489. wa_urb_dequeue(wa, xfer->urb);
  1490. if (printk_ratelimit())
  1491. dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
  1492. if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
  1493. dev_err(dev, "DTI: URB max acceptable errors "
  1494. "exceeded, resetting device\n");
  1495. wa_reset_all(wa);
  1496. }
  1497. return;
  1498. segment_aborted:
  1499. /* nothing to do, as the aborter did the completion */
  1500. spin_unlock_irqrestore(&xfer->lock, flags);
  1501. }
  1502. /*
  1503. * Callback for the IN data phase
  1504. *
  1505. * If successful transition state; otherwise, take a note of the
  1506. * error, mark this segment done and try completion.
  1507. *
  1508. * Note we don't access until we are sure that the transfer hasn't
  1509. * been cancelled (ECONNRESET, ENOENT), which could mean that
  1510. * seg->xfer could be already gone.
  1511. */
  1512. static void wa_buf_in_cb(struct urb *urb)
  1513. {
  1514. struct wa_seg *seg = urb->context;
  1515. struct wa_xfer *xfer = seg->xfer;
  1516. struct wahc *wa;
  1517. struct device *dev;
  1518. struct wa_rpipe *rpipe;
  1519. unsigned rpipe_ready;
  1520. unsigned long flags;
  1521. u8 done = 0;
  1522. /* free the sg if it was used. */
  1523. kfree(urb->sg);
  1524. urb->sg = NULL;
  1525. switch (urb->status) {
  1526. case 0:
  1527. spin_lock_irqsave(&xfer->lock, flags);
  1528. wa = xfer->wa;
  1529. dev = &wa->usb_iface->dev;
  1530. rpipe = xfer->ep->hcpriv;
  1531. dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
  1532. xfer, seg->index, (size_t)urb->actual_length);
  1533. seg->status = WA_SEG_DONE;
  1534. seg->result = urb->actual_length;
  1535. xfer->segs_done++;
  1536. rpipe_ready = rpipe_avail_inc(rpipe);
  1537. done = __wa_xfer_is_done(xfer);
  1538. spin_unlock_irqrestore(&xfer->lock, flags);
  1539. if (done)
  1540. wa_xfer_completion(xfer);
  1541. if (rpipe_ready)
  1542. wa_xfer_delayed_run(rpipe);
  1543. break;
  1544. case -ECONNRESET: /* URB unlinked; no need to do anything */
  1545. case -ENOENT: /* as it was done by the who unlinked us */
  1546. break;
  1547. default: /* Other errors ... */
  1548. spin_lock_irqsave(&xfer->lock, flags);
  1549. wa = xfer->wa;
  1550. dev = &wa->usb_iface->dev;
  1551. rpipe = xfer->ep->hcpriv;
  1552. if (printk_ratelimit())
  1553. dev_err(dev, "xfer %p#%u: data in error %d\n",
  1554. xfer, seg->index, urb->status);
  1555. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  1556. EDC_ERROR_TIMEFRAME)){
  1557. dev_err(dev, "DTO: URB max acceptable errors "
  1558. "exceeded, resetting device\n");
  1559. wa_reset_all(wa);
  1560. }
  1561. seg->status = WA_SEG_ERROR;
  1562. seg->result = urb->status;
  1563. xfer->segs_done++;
  1564. rpipe_ready = rpipe_avail_inc(rpipe);
  1565. __wa_xfer_abort(xfer);
  1566. done = __wa_xfer_is_done(xfer);
  1567. spin_unlock_irqrestore(&xfer->lock, flags);
  1568. if (done)
  1569. wa_xfer_completion(xfer);
  1570. if (rpipe_ready)
  1571. wa_xfer_delayed_run(rpipe);
  1572. }
  1573. }
  1574. /*
  1575. * Handle an incoming transfer result buffer
  1576. *
  1577. * Given a transfer result buffer, it completes the transfer (possibly
  1578. * scheduling and buffer in read) and then resubmits the DTI URB for a
  1579. * new transfer result read.
  1580. *
  1581. *
  1582. * The xfer_result DTI URB state machine
  1583. *
  1584. * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
  1585. *
  1586. * We start in OFF mode, the first xfer_result notification [through
  1587. * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
  1588. * read.
  1589. *
  1590. * We receive a buffer -- if it is not a xfer_result, we complain and
  1591. * repost the DTI-URB. If it is a xfer_result then do the xfer seg
  1592. * request accounting. If it is an IN segment, we move to RBI and post
  1593. * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
  1594. * repost the DTI-URB and move to RXR state. if there was no IN
  1595. * segment, it will repost the DTI-URB.
  1596. *
  1597. * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
  1598. * errors) in the URBs.
  1599. */
  1600. static void wa_dti_cb(struct urb *urb)
  1601. {
  1602. int result;
  1603. struct wahc *wa = urb->context;
  1604. struct device *dev = &wa->usb_iface->dev;
  1605. struct wa_xfer_result *xfer_result;
  1606. u32 xfer_id;
  1607. struct wa_xfer *xfer;
  1608. u8 usb_status;
  1609. BUG_ON(wa->dti_urb != urb);
  1610. switch (wa->dti_urb->status) {
  1611. case 0:
  1612. /* We have a xfer result buffer; check it */
  1613. dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
  1614. urb->actual_length, urb->transfer_buffer);
  1615. if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
  1616. dev_err(dev, "DTI Error: xfer result--bad size "
  1617. "xfer result (%d bytes vs %zu needed)\n",
  1618. urb->actual_length, sizeof(*xfer_result));
  1619. break;
  1620. }
  1621. xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
  1622. if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
  1623. dev_err(dev, "DTI Error: xfer result--"
  1624. "bad header length %u\n",
  1625. xfer_result->hdr.bLength);
  1626. break;
  1627. }
  1628. if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
  1629. dev_err(dev, "DTI Error: xfer result--"
  1630. "bad header type 0x%02x\n",
  1631. xfer_result->hdr.bNotifyType);
  1632. break;
  1633. }
  1634. usb_status = xfer_result->bTransferStatus & 0x3f;
  1635. if (usb_status == WA_XFER_STATUS_NOT_FOUND)
  1636. /* taken care of already */
  1637. break;
  1638. xfer_id = le32_to_cpu(xfer_result->dwTransferID);
  1639. xfer = wa_xfer_get_by_id(wa, xfer_id);
  1640. if (xfer == NULL) {
  1641. /* FIXME: transaction might have been cancelled */
  1642. dev_err(dev, "DTI Error: xfer result--"
  1643. "unknown xfer 0x%08x (status 0x%02x)\n",
  1644. xfer_id, usb_status);
  1645. break;
  1646. }
  1647. wa_xfer_result_chew(wa, xfer, xfer_result);
  1648. wa_xfer_put(xfer);
  1649. break;
  1650. case -ENOENT: /* (we killed the URB)...so, no broadcast */
  1651. case -ESHUTDOWN: /* going away! */
  1652. dev_dbg(dev, "DTI: going down! %d\n", urb->status);
  1653. goto out;
  1654. default:
  1655. /* Unknown error */
  1656. if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
  1657. EDC_ERROR_TIMEFRAME)) {
  1658. dev_err(dev, "DTI: URB max acceptable errors "
  1659. "exceeded, resetting device\n");
  1660. wa_reset_all(wa);
  1661. goto out;
  1662. }
  1663. if (printk_ratelimit())
  1664. dev_err(dev, "DTI: URB error %d\n", urb->status);
  1665. break;
  1666. }
  1667. /* Resubmit the DTI URB */
  1668. result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
  1669. if (result < 0) {
  1670. dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
  1671. "resetting\n", result);
  1672. wa_reset_all(wa);
  1673. }
  1674. out:
  1675. return;
  1676. }
  1677. /*
  1678. * Transfer complete notification
  1679. *
  1680. * Called from the notif.c code. We get a notification on EP2 saying
  1681. * that some endpoint has some transfer result data available. We are
  1682. * about to read it.
  1683. *
  1684. * To speed up things, we always have a URB reading the DTI URB; we
  1685. * don't really set it up and start it until the first xfer complete
  1686. * notification arrives, which is what we do here.
  1687. *
  1688. * Follow up in wa_dti_cb(), as that's where the whole state
  1689. * machine starts.
  1690. *
  1691. * So here we just initialize the DTI URB for reading transfer result
  1692. * notifications and also the buffer-in URB, for reading buffers. Then
  1693. * we just submit the DTI URB.
  1694. *
  1695. * @wa shall be referenced
  1696. */
  1697. void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
  1698. {
  1699. int result;
  1700. struct device *dev = &wa->usb_iface->dev;
  1701. struct wa_notif_xfer *notif_xfer;
  1702. const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
  1703. notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
  1704. BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
  1705. if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
  1706. /* FIXME: hardcoded limitation, adapt */
  1707. dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
  1708. notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
  1709. goto error;
  1710. }
  1711. if (wa->dti_urb != NULL) /* DTI URB already started */
  1712. goto out;
  1713. wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
  1714. if (wa->dti_urb == NULL) {
  1715. dev_err(dev, "Can't allocate DTI URB\n");
  1716. goto error_dti_urb_alloc;
  1717. }
  1718. usb_fill_bulk_urb(
  1719. wa->dti_urb, wa->usb_dev,
  1720. usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
  1721. wa->dti_buf, wa->dti_buf_size,
  1722. wa_dti_cb, wa);
  1723. wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
  1724. if (wa->buf_in_urb == NULL) {
  1725. dev_err(dev, "Can't allocate BUF-IN URB\n");
  1726. goto error_buf_in_urb_alloc;
  1727. }
  1728. usb_fill_bulk_urb(
  1729. wa->buf_in_urb, wa->usb_dev,
  1730. usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
  1731. NULL, 0, wa_buf_in_cb, wa);
  1732. result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
  1733. if (result < 0) {
  1734. dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
  1735. "resetting\n", result);
  1736. goto error_dti_urb_submit;
  1737. }
  1738. out:
  1739. return;
  1740. error_dti_urb_submit:
  1741. usb_put_urb(wa->buf_in_urb);
  1742. wa->buf_in_urb = NULL;
  1743. error_buf_in_urb_alloc:
  1744. usb_put_urb(wa->dti_urb);
  1745. wa->dti_urb = NULL;
  1746. error_dti_urb_alloc:
  1747. error:
  1748. wa_reset_all(wa);
  1749. }