usbtest.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/moduleparam.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/mutex.h>
  10. #include <linux/usb.h>
  11. /*-------------------------------------------------------------------------*/
  12. static int override_alt = -1;
  13. module_param_named(alt, override_alt, int, 0644);
  14. MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
  15. /*-------------------------------------------------------------------------*/
  16. /* FIXME make these public somewhere; usbdevfs.h? */
  17. struct usbtest_param {
  18. /* inputs */
  19. unsigned test_num; /* 0..(TEST_CASES-1) */
  20. unsigned iterations;
  21. unsigned length;
  22. unsigned vary;
  23. unsigned sglen;
  24. /* outputs */
  25. struct timeval duration;
  26. };
  27. #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
  28. /*-------------------------------------------------------------------------*/
  29. #define GENERIC /* let probe() bind using module params */
  30. /* Some devices that can be used for testing will have "real" drivers.
  31. * Entries for those need to be enabled here by hand, after disabling
  32. * that "real" driver.
  33. */
  34. //#define IBOT2 /* grab iBOT2 webcams */
  35. //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
  36. /*-------------------------------------------------------------------------*/
  37. struct usbtest_info {
  38. const char *name;
  39. u8 ep_in; /* bulk/intr source */
  40. u8 ep_out; /* bulk/intr sink */
  41. unsigned autoconf:1;
  42. unsigned ctrl_out:1;
  43. unsigned iso:1; /* try iso in/out */
  44. int alt;
  45. };
  46. /* this is accessed only through usbfs ioctl calls.
  47. * one ioctl to issue a test ... one lock per device.
  48. * tests create other threads if they need them.
  49. * urbs and buffers are allocated dynamically,
  50. * and data generated deterministically.
  51. */
  52. struct usbtest_dev {
  53. struct usb_interface *intf;
  54. struct usbtest_info *info;
  55. int in_pipe;
  56. int out_pipe;
  57. int in_iso_pipe;
  58. int out_iso_pipe;
  59. struct usb_endpoint_descriptor *iso_in, *iso_out;
  60. struct mutex lock;
  61. #define TBUF_SIZE 256
  62. u8 *buf;
  63. };
  64. static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
  65. {
  66. return interface_to_usbdev(test->intf);
  67. }
  68. /* set up all urbs so they can be used with either bulk or interrupt */
  69. #define INTERRUPT_RATE 1 /* msec/transfer */
  70. #define ERROR(tdev, fmt, args...) \
  71. dev_err(&(tdev)->intf->dev , fmt , ## args)
  72. #define WARNING(tdev, fmt, args...) \
  73. dev_warn(&(tdev)->intf->dev , fmt , ## args)
  74. #define GUARD_BYTE 0xA5
  75. /*-------------------------------------------------------------------------*/
  76. static int
  77. get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
  78. {
  79. int tmp;
  80. struct usb_host_interface *alt;
  81. struct usb_host_endpoint *in, *out;
  82. struct usb_host_endpoint *iso_in, *iso_out;
  83. struct usb_device *udev;
  84. for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
  85. unsigned ep;
  86. in = out = NULL;
  87. iso_in = iso_out = NULL;
  88. alt = intf->altsetting + tmp;
  89. if (override_alt >= 0 &&
  90. override_alt != alt->desc.bAlternateSetting)
  91. continue;
  92. /* take the first altsetting with in-bulk + out-bulk;
  93. * ignore other endpoints and altsettings.
  94. */
  95. for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
  96. struct usb_host_endpoint *e;
  97. e = alt->endpoint + ep;
  98. switch (usb_endpoint_type(&e->desc)) {
  99. case USB_ENDPOINT_XFER_BULK:
  100. break;
  101. case USB_ENDPOINT_XFER_ISOC:
  102. if (dev->info->iso)
  103. goto try_iso;
  104. /* FALLTHROUGH */
  105. default:
  106. continue;
  107. }
  108. if (usb_endpoint_dir_in(&e->desc)) {
  109. if (!in)
  110. in = e;
  111. } else {
  112. if (!out)
  113. out = e;
  114. }
  115. continue;
  116. try_iso:
  117. if (usb_endpoint_dir_in(&e->desc)) {
  118. if (!iso_in)
  119. iso_in = e;
  120. } else {
  121. if (!iso_out)
  122. iso_out = e;
  123. }
  124. }
  125. if ((in && out) || iso_in || iso_out)
  126. goto found;
  127. }
  128. return -EINVAL;
  129. found:
  130. udev = testdev_to_usbdev(dev);
  131. dev->info->alt = alt->desc.bAlternateSetting;
  132. if (alt->desc.bAlternateSetting != 0) {
  133. tmp = usb_set_interface(udev,
  134. alt->desc.bInterfaceNumber,
  135. alt->desc.bAlternateSetting);
  136. if (tmp < 0)
  137. return tmp;
  138. }
  139. if (in) {
  140. dev->in_pipe = usb_rcvbulkpipe(udev,
  141. in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  142. dev->out_pipe = usb_sndbulkpipe(udev,
  143. out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  144. }
  145. if (iso_in) {
  146. dev->iso_in = &iso_in->desc;
  147. dev->in_iso_pipe = usb_rcvisocpipe(udev,
  148. iso_in->desc.bEndpointAddress
  149. & USB_ENDPOINT_NUMBER_MASK);
  150. }
  151. if (iso_out) {
  152. dev->iso_out = &iso_out->desc;
  153. dev->out_iso_pipe = usb_sndisocpipe(udev,
  154. iso_out->desc.bEndpointAddress
  155. & USB_ENDPOINT_NUMBER_MASK);
  156. }
  157. return 0;
  158. }
  159. /*-------------------------------------------------------------------------*/
  160. /* Support for testing basic non-queued I/O streams.
  161. *
  162. * These just package urbs as requests that can be easily canceled.
  163. * Each urb's data buffer is dynamically allocated; callers can fill
  164. * them with non-zero test data (or test for it) when appropriate.
  165. */
  166. static void simple_callback(struct urb *urb)
  167. {
  168. complete(urb->context);
  169. }
  170. static struct urb *usbtest_alloc_urb(
  171. struct usb_device *udev,
  172. int pipe,
  173. unsigned long bytes,
  174. unsigned transfer_flags,
  175. unsigned offset)
  176. {
  177. struct urb *urb;
  178. urb = usb_alloc_urb(0, GFP_KERNEL);
  179. if (!urb)
  180. return urb;
  181. usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
  182. urb->interval = (udev->speed == USB_SPEED_HIGH)
  183. ? (INTERRUPT_RATE << 3)
  184. : INTERRUPT_RATE;
  185. urb->transfer_flags = transfer_flags;
  186. if (usb_pipein(pipe))
  187. urb->transfer_flags |= URB_SHORT_NOT_OK;
  188. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  189. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  190. GFP_KERNEL, &urb->transfer_dma);
  191. else
  192. urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
  193. if (!urb->transfer_buffer) {
  194. usb_free_urb(urb);
  195. return NULL;
  196. }
  197. /* To test unaligned transfers add an offset and fill the
  198. unused memory with a guard value */
  199. if (offset) {
  200. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  201. urb->transfer_buffer += offset;
  202. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  203. urb->transfer_dma += offset;
  204. }
  205. /* For inbound transfers use guard byte so that test fails if
  206. data not correctly copied */
  207. memset(urb->transfer_buffer,
  208. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  209. bytes);
  210. return urb;
  211. }
  212. static struct urb *simple_alloc_urb(
  213. struct usb_device *udev,
  214. int pipe,
  215. unsigned long bytes)
  216. {
  217. return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
  218. }
  219. static unsigned pattern;
  220. static unsigned mod_pattern;
  221. module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
  222. MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
  223. static inline void simple_fill_buf(struct urb *urb)
  224. {
  225. unsigned i;
  226. u8 *buf = urb->transfer_buffer;
  227. unsigned len = urb->transfer_buffer_length;
  228. switch (pattern) {
  229. default:
  230. /* FALLTHROUGH */
  231. case 0:
  232. memset(buf, 0, len);
  233. break;
  234. case 1: /* mod63 */
  235. for (i = 0; i < len; i++)
  236. *buf++ = (u8) (i % 63);
  237. break;
  238. }
  239. }
  240. static inline unsigned long buffer_offset(void *buf)
  241. {
  242. return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
  243. }
  244. static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
  245. {
  246. u8 *buf = urb->transfer_buffer;
  247. u8 *guard = buf - buffer_offset(buf);
  248. unsigned i;
  249. for (i = 0; guard < buf; i++, guard++) {
  250. if (*guard != GUARD_BYTE) {
  251. ERROR(tdev, "guard byte[%d] %d (not %d)\n",
  252. i, *guard, GUARD_BYTE);
  253. return -EINVAL;
  254. }
  255. }
  256. return 0;
  257. }
  258. static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
  259. {
  260. unsigned i;
  261. u8 expected;
  262. u8 *buf = urb->transfer_buffer;
  263. unsigned len = urb->actual_length;
  264. int ret = check_guard_bytes(tdev, urb);
  265. if (ret)
  266. return ret;
  267. for (i = 0; i < len; i++, buf++) {
  268. switch (pattern) {
  269. /* all-zeroes has no synchronization issues */
  270. case 0:
  271. expected = 0;
  272. break;
  273. /* mod63 stays in sync with short-terminated transfers,
  274. * or otherwise when host and gadget agree on how large
  275. * each usb transfer request should be. resync is done
  276. * with set_interface or set_config.
  277. */
  278. case 1: /* mod63 */
  279. expected = i % 63;
  280. break;
  281. /* always fail unsupported patterns */
  282. default:
  283. expected = !*buf;
  284. break;
  285. }
  286. if (*buf == expected)
  287. continue;
  288. ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
  289. return -EINVAL;
  290. }
  291. return 0;
  292. }
  293. static void simple_free_urb(struct urb *urb)
  294. {
  295. unsigned long offset = buffer_offset(urb->transfer_buffer);
  296. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  297. usb_free_coherent(
  298. urb->dev,
  299. urb->transfer_buffer_length + offset,
  300. urb->transfer_buffer - offset,
  301. urb->transfer_dma - offset);
  302. else
  303. kfree(urb->transfer_buffer - offset);
  304. usb_free_urb(urb);
  305. }
  306. static int simple_io(
  307. struct usbtest_dev *tdev,
  308. struct urb *urb,
  309. int iterations,
  310. int vary,
  311. int expected,
  312. const char *label
  313. )
  314. {
  315. struct usb_device *udev = urb->dev;
  316. int max = urb->transfer_buffer_length;
  317. struct completion completion;
  318. int retval = 0;
  319. urb->context = &completion;
  320. while (retval == 0 && iterations-- > 0) {
  321. init_completion(&completion);
  322. if (usb_pipeout(urb->pipe)) {
  323. simple_fill_buf(urb);
  324. urb->transfer_flags |= URB_ZERO_PACKET;
  325. }
  326. retval = usb_submit_urb(urb, GFP_KERNEL);
  327. if (retval != 0)
  328. break;
  329. /* NOTE: no timeouts; can't be broken out of by interrupt */
  330. wait_for_completion(&completion);
  331. retval = urb->status;
  332. urb->dev = udev;
  333. if (retval == 0 && usb_pipein(urb->pipe))
  334. retval = simple_check_buf(tdev, urb);
  335. if (vary) {
  336. int len = urb->transfer_buffer_length;
  337. len += vary;
  338. len %= max;
  339. if (len == 0)
  340. len = (vary < max) ? vary : max;
  341. urb->transfer_buffer_length = len;
  342. }
  343. /* FIXME if endpoint halted, clear halt (and log) */
  344. }
  345. urb->transfer_buffer_length = max;
  346. if (expected != retval)
  347. dev_err(&udev->dev,
  348. "%s failed, iterations left %d, status %d (not %d)\n",
  349. label, iterations, retval, expected);
  350. return retval;
  351. }
  352. /*-------------------------------------------------------------------------*/
  353. /* We use scatterlist primitives to test queued I/O.
  354. * Yes, this also tests the scatterlist primitives.
  355. */
  356. static void free_sglist(struct scatterlist *sg, int nents)
  357. {
  358. unsigned i;
  359. if (!sg)
  360. return;
  361. for (i = 0; i < nents; i++) {
  362. if (!sg_page(&sg[i]))
  363. continue;
  364. kfree(sg_virt(&sg[i]));
  365. }
  366. kfree(sg);
  367. }
  368. static struct scatterlist *
  369. alloc_sglist(int nents, int max, int vary)
  370. {
  371. struct scatterlist *sg;
  372. unsigned i;
  373. unsigned size = max;
  374. if (max == 0)
  375. return NULL;
  376. sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
  377. if (!sg)
  378. return NULL;
  379. sg_init_table(sg, nents);
  380. for (i = 0; i < nents; i++) {
  381. char *buf;
  382. unsigned j;
  383. buf = kzalloc(size, GFP_KERNEL);
  384. if (!buf) {
  385. free_sglist(sg, i);
  386. return NULL;
  387. }
  388. /* kmalloc pages are always physically contiguous! */
  389. sg_set_buf(&sg[i], buf, size);
  390. switch (pattern) {
  391. case 0:
  392. /* already zeroed */
  393. break;
  394. case 1:
  395. for (j = 0; j < size; j++)
  396. *buf++ = (u8) (j % 63);
  397. break;
  398. }
  399. if (vary) {
  400. size += vary;
  401. size %= max;
  402. if (size == 0)
  403. size = (vary < max) ? vary : max;
  404. }
  405. }
  406. return sg;
  407. }
  408. static int perform_sglist(
  409. struct usbtest_dev *tdev,
  410. unsigned iterations,
  411. int pipe,
  412. struct usb_sg_request *req,
  413. struct scatterlist *sg,
  414. int nents
  415. )
  416. {
  417. struct usb_device *udev = testdev_to_usbdev(tdev);
  418. int retval = 0;
  419. while (retval == 0 && iterations-- > 0) {
  420. retval = usb_sg_init(req, udev, pipe,
  421. (udev->speed == USB_SPEED_HIGH)
  422. ? (INTERRUPT_RATE << 3)
  423. : INTERRUPT_RATE,
  424. sg, nents, 0, GFP_KERNEL);
  425. if (retval)
  426. break;
  427. usb_sg_wait(req);
  428. retval = req->status;
  429. /* FIXME check resulting data pattern */
  430. /* FIXME if endpoint halted, clear halt (and log) */
  431. }
  432. /* FIXME for unlink or fault handling tests, don't report
  433. * failure if retval is as we expected ...
  434. */
  435. if (retval)
  436. ERROR(tdev, "perform_sglist failed, "
  437. "iterations left %d, status %d\n",
  438. iterations, retval);
  439. return retval;
  440. }
  441. /*-------------------------------------------------------------------------*/
  442. /* unqueued control message testing
  443. *
  444. * there's a nice set of device functional requirements in chapter 9 of the
  445. * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
  446. * special test firmware.
  447. *
  448. * we know the device is configured (or suspended) by the time it's visible
  449. * through usbfs. we can't change that, so we won't test enumeration (which
  450. * worked 'well enough' to get here, this time), power management (ditto),
  451. * or remote wakeup (which needs human interaction).
  452. */
  453. static unsigned realworld = 1;
  454. module_param(realworld, uint, 0);
  455. MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
  456. static int get_altsetting(struct usbtest_dev *dev)
  457. {
  458. struct usb_interface *iface = dev->intf;
  459. struct usb_device *udev = interface_to_usbdev(iface);
  460. int retval;
  461. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  462. USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
  463. 0, iface->altsetting[0].desc.bInterfaceNumber,
  464. dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  465. switch (retval) {
  466. case 1:
  467. return dev->buf[0];
  468. case 0:
  469. retval = -ERANGE;
  470. /* FALLTHROUGH */
  471. default:
  472. return retval;
  473. }
  474. }
  475. static int set_altsetting(struct usbtest_dev *dev, int alternate)
  476. {
  477. struct usb_interface *iface = dev->intf;
  478. struct usb_device *udev;
  479. if (alternate < 0 || alternate >= 256)
  480. return -EINVAL;
  481. udev = interface_to_usbdev(iface);
  482. return usb_set_interface(udev,
  483. iface->altsetting[0].desc.bInterfaceNumber,
  484. alternate);
  485. }
  486. static int is_good_config(struct usbtest_dev *tdev, int len)
  487. {
  488. struct usb_config_descriptor *config;
  489. if (len < sizeof(*config))
  490. return 0;
  491. config = (struct usb_config_descriptor *) tdev->buf;
  492. switch (config->bDescriptorType) {
  493. case USB_DT_CONFIG:
  494. case USB_DT_OTHER_SPEED_CONFIG:
  495. if (config->bLength != 9) {
  496. ERROR(tdev, "bogus config descriptor length\n");
  497. return 0;
  498. }
  499. /* this bit 'must be 1' but often isn't */
  500. if (!realworld && !(config->bmAttributes & 0x80)) {
  501. ERROR(tdev, "high bit of config attributes not set\n");
  502. return 0;
  503. }
  504. if (config->bmAttributes & 0x1f) { /* reserved == 0 */
  505. ERROR(tdev, "reserved config bits set\n");
  506. return 0;
  507. }
  508. break;
  509. default:
  510. return 0;
  511. }
  512. if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
  513. return 1;
  514. if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
  515. return 1;
  516. ERROR(tdev, "bogus config descriptor read size\n");
  517. return 0;
  518. }
  519. static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
  520. {
  521. struct usb_ext_cap_descriptor *ext;
  522. u32 attr;
  523. ext = (struct usb_ext_cap_descriptor *) buf;
  524. if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
  525. ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
  526. return 0;
  527. }
  528. attr = le32_to_cpu(ext->bmAttributes);
  529. /* bits[1:4] is used and others are reserved */
  530. if (attr & ~0x1e) { /* reserved == 0 */
  531. ERROR(tdev, "reserved bits set\n");
  532. return 0;
  533. }
  534. return 1;
  535. }
  536. static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
  537. {
  538. struct usb_ss_cap_descriptor *ss;
  539. ss = (struct usb_ss_cap_descriptor *) buf;
  540. if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
  541. ERROR(tdev, "bogus superspeed device capability descriptor length\n");
  542. return 0;
  543. }
  544. /*
  545. * only bit[1] of bmAttributes is used for LTM and others are
  546. * reserved
  547. */
  548. if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
  549. ERROR(tdev, "reserved bits set in bmAttributes\n");
  550. return 0;
  551. }
  552. /* bits[0:3] of wSpeedSupported is used and others are reserved */
  553. if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
  554. ERROR(tdev, "reserved bits set in wSpeedSupported\n");
  555. return 0;
  556. }
  557. return 1;
  558. }
  559. /* sanity test for standard requests working with usb_control_mesg() and some
  560. * of the utility functions which use it.
  561. *
  562. * this doesn't test how endpoint halts behave or data toggles get set, since
  563. * we won't do I/O to bulk/interrupt endpoints here (which is how to change
  564. * halt or toggle). toggle testing is impractical without support from hcds.
  565. *
  566. * this avoids failing devices linux would normally work with, by not testing
  567. * config/altsetting operations for devices that only support their defaults.
  568. * such devices rarely support those needless operations.
  569. *
  570. * NOTE that since this is a sanity test, it's not examining boundary cases
  571. * to see if usbcore, hcd, and device all behave right. such testing would
  572. * involve varied read sizes and other operation sequences.
  573. */
  574. static int ch9_postconfig(struct usbtest_dev *dev)
  575. {
  576. struct usb_interface *iface = dev->intf;
  577. struct usb_device *udev = interface_to_usbdev(iface);
  578. int i, alt, retval;
  579. /* [9.2.3] if there's more than one altsetting, we need to be able to
  580. * set and get each one. mostly trusts the descriptors from usbcore.
  581. */
  582. for (i = 0; i < iface->num_altsetting; i++) {
  583. /* 9.2.3 constrains the range here */
  584. alt = iface->altsetting[i].desc.bAlternateSetting;
  585. if (alt < 0 || alt >= iface->num_altsetting) {
  586. dev_err(&iface->dev,
  587. "invalid alt [%d].bAltSetting = %d\n",
  588. i, alt);
  589. }
  590. /* [real world] get/set unimplemented if there's only one */
  591. if (realworld && iface->num_altsetting == 1)
  592. continue;
  593. /* [9.4.10] set_interface */
  594. retval = set_altsetting(dev, alt);
  595. if (retval) {
  596. dev_err(&iface->dev, "can't set_interface = %d, %d\n",
  597. alt, retval);
  598. return retval;
  599. }
  600. /* [9.4.4] get_interface always works */
  601. retval = get_altsetting(dev);
  602. if (retval != alt) {
  603. dev_err(&iface->dev, "get alt should be %d, was %d\n",
  604. alt, retval);
  605. return (retval < 0) ? retval : -EDOM;
  606. }
  607. }
  608. /* [real world] get_config unimplemented if there's only one */
  609. if (!realworld || udev->descriptor.bNumConfigurations != 1) {
  610. int expected = udev->actconfig->desc.bConfigurationValue;
  611. /* [9.4.2] get_configuration always works
  612. * ... although some cheap devices (like one TI Hub I've got)
  613. * won't return config descriptors except before set_config.
  614. */
  615. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  616. USB_REQ_GET_CONFIGURATION,
  617. USB_DIR_IN | USB_RECIP_DEVICE,
  618. 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  619. if (retval != 1 || dev->buf[0] != expected) {
  620. dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
  621. retval, dev->buf[0], expected);
  622. return (retval < 0) ? retval : -EDOM;
  623. }
  624. }
  625. /* there's always [9.4.3] a device descriptor [9.6.1] */
  626. retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
  627. dev->buf, sizeof(udev->descriptor));
  628. if (retval != sizeof(udev->descriptor)) {
  629. dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
  630. return (retval < 0) ? retval : -EDOM;
  631. }
  632. /*
  633. * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
  634. * 3.0 spec
  635. */
  636. if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0300) {
  637. struct usb_bos_descriptor *bos = NULL;
  638. struct usb_dev_cap_header *header = NULL;
  639. unsigned total, num, length;
  640. u8 *buf;
  641. retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
  642. sizeof(*udev->bos->desc));
  643. if (retval != sizeof(*udev->bos->desc)) {
  644. dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
  645. return (retval < 0) ? retval : -EDOM;
  646. }
  647. bos = (struct usb_bos_descriptor *)dev->buf;
  648. total = le16_to_cpu(bos->wTotalLength);
  649. num = bos->bNumDeviceCaps;
  650. if (total > TBUF_SIZE)
  651. total = TBUF_SIZE;
  652. /*
  653. * get generic device-level capability descriptors [9.6.2]
  654. * in USB 3.0 spec
  655. */
  656. retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
  657. total);
  658. if (retval != total) {
  659. dev_err(&iface->dev, "bos descriptor set --> %d\n",
  660. retval);
  661. return (retval < 0) ? retval : -EDOM;
  662. }
  663. length = sizeof(*udev->bos->desc);
  664. buf = dev->buf;
  665. for (i = 0; i < num; i++) {
  666. buf += length;
  667. if (buf + sizeof(struct usb_dev_cap_header) >
  668. dev->buf + total)
  669. break;
  670. header = (struct usb_dev_cap_header *)buf;
  671. length = header->bLength;
  672. if (header->bDescriptorType !=
  673. USB_DT_DEVICE_CAPABILITY) {
  674. dev_warn(&udev->dev, "not device capability descriptor, skip\n");
  675. continue;
  676. }
  677. switch (header->bDevCapabilityType) {
  678. case USB_CAP_TYPE_EXT:
  679. if (buf + USB_DT_USB_EXT_CAP_SIZE >
  680. dev->buf + total ||
  681. !is_good_ext(dev, buf)) {
  682. dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
  683. return -EDOM;
  684. }
  685. break;
  686. case USB_SS_CAP_TYPE:
  687. if (buf + USB_DT_USB_SS_CAP_SIZE >
  688. dev->buf + total ||
  689. !is_good_ss_cap(dev, buf)) {
  690. dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
  691. return -EDOM;
  692. }
  693. break;
  694. default:
  695. break;
  696. }
  697. }
  698. }
  699. /* there's always [9.4.3] at least one config descriptor [9.6.3] */
  700. for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
  701. retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
  702. dev->buf, TBUF_SIZE);
  703. if (!is_good_config(dev, retval)) {
  704. dev_err(&iface->dev,
  705. "config [%d] descriptor --> %d\n",
  706. i, retval);
  707. return (retval < 0) ? retval : -EDOM;
  708. }
  709. /* FIXME cross-checking udev->config[i] to make sure usbcore
  710. * parsed it right (etc) would be good testing paranoia
  711. */
  712. }
  713. /* and sometimes [9.2.6.6] speed dependent descriptors */
  714. if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
  715. struct usb_qualifier_descriptor *d = NULL;
  716. /* device qualifier [9.6.2] */
  717. retval = usb_get_descriptor(udev,
  718. USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
  719. sizeof(struct usb_qualifier_descriptor));
  720. if (retval == -EPIPE) {
  721. if (udev->speed == USB_SPEED_HIGH) {
  722. dev_err(&iface->dev,
  723. "hs dev qualifier --> %d\n",
  724. retval);
  725. return (retval < 0) ? retval : -EDOM;
  726. }
  727. /* usb2.0 but not high-speed capable; fine */
  728. } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
  729. dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
  730. return (retval < 0) ? retval : -EDOM;
  731. } else
  732. d = (struct usb_qualifier_descriptor *) dev->buf;
  733. /* might not have [9.6.2] any other-speed configs [9.6.4] */
  734. if (d) {
  735. unsigned max = d->bNumConfigurations;
  736. for (i = 0; i < max; i++) {
  737. retval = usb_get_descriptor(udev,
  738. USB_DT_OTHER_SPEED_CONFIG, i,
  739. dev->buf, TBUF_SIZE);
  740. if (!is_good_config(dev, retval)) {
  741. dev_err(&iface->dev,
  742. "other speed config --> %d\n",
  743. retval);
  744. return (retval < 0) ? retval : -EDOM;
  745. }
  746. }
  747. }
  748. }
  749. /* FIXME fetch strings from at least the device descriptor */
  750. /* [9.4.5] get_status always works */
  751. retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
  752. if (retval) {
  753. dev_err(&iface->dev, "get dev status --> %d\n", retval);
  754. return retval;
  755. }
  756. /* FIXME configuration.bmAttributes says if we could try to set/clear
  757. * the device's remote wakeup feature ... if we can, test that here
  758. */
  759. retval = usb_get_status(udev, USB_RECIP_INTERFACE,
  760. iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
  761. if (retval) {
  762. dev_err(&iface->dev, "get interface status --> %d\n", retval);
  763. return retval;
  764. }
  765. /* FIXME get status for each endpoint in the interface */
  766. return 0;
  767. }
  768. /*-------------------------------------------------------------------------*/
  769. /* use ch9 requests to test whether:
  770. * (a) queues work for control, keeping N subtests queued and
  771. * active (auto-resubmit) for M loops through the queue.
  772. * (b) protocol stalls (control-only) will autorecover.
  773. * it's not like bulk/intr; no halt clearing.
  774. * (c) short control reads are reported and handled.
  775. * (d) queues are always processed in-order
  776. */
  777. struct ctrl_ctx {
  778. spinlock_t lock;
  779. struct usbtest_dev *dev;
  780. struct completion complete;
  781. unsigned count;
  782. unsigned pending;
  783. int status;
  784. struct urb **urb;
  785. struct usbtest_param *param;
  786. int last;
  787. };
  788. #define NUM_SUBCASES 15 /* how many test subcases here? */
  789. struct subcase {
  790. struct usb_ctrlrequest setup;
  791. int number;
  792. int expected;
  793. };
  794. static void ctrl_complete(struct urb *urb)
  795. {
  796. struct ctrl_ctx *ctx = urb->context;
  797. struct usb_ctrlrequest *reqp;
  798. struct subcase *subcase;
  799. int status = urb->status;
  800. reqp = (struct usb_ctrlrequest *)urb->setup_packet;
  801. subcase = container_of(reqp, struct subcase, setup);
  802. spin_lock(&ctx->lock);
  803. ctx->count--;
  804. ctx->pending--;
  805. /* queue must transfer and complete in fifo order, unless
  806. * usb_unlink_urb() is used to unlink something not at the
  807. * physical queue head (not tested).
  808. */
  809. if (subcase->number > 0) {
  810. if ((subcase->number - ctx->last) != 1) {
  811. ERROR(ctx->dev,
  812. "subcase %d completed out of order, last %d\n",
  813. subcase->number, ctx->last);
  814. status = -EDOM;
  815. ctx->last = subcase->number;
  816. goto error;
  817. }
  818. }
  819. ctx->last = subcase->number;
  820. /* succeed or fault in only one way? */
  821. if (status == subcase->expected)
  822. status = 0;
  823. /* async unlink for cleanup? */
  824. else if (status != -ECONNRESET) {
  825. /* some faults are allowed, not required */
  826. if (subcase->expected > 0 && (
  827. ((status == -subcase->expected /* happened */
  828. || status == 0)))) /* didn't */
  829. status = 0;
  830. /* sometimes more than one fault is allowed */
  831. else if (subcase->number == 12 && status == -EPIPE)
  832. status = 0;
  833. else
  834. ERROR(ctx->dev, "subtest %d error, status %d\n",
  835. subcase->number, status);
  836. }
  837. /* unexpected status codes mean errors; ideally, in hardware */
  838. if (status) {
  839. error:
  840. if (ctx->status == 0) {
  841. int i;
  842. ctx->status = status;
  843. ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
  844. "%d left, subcase %d, len %d/%d\n",
  845. reqp->bRequestType, reqp->bRequest,
  846. status, ctx->count, subcase->number,
  847. urb->actual_length,
  848. urb->transfer_buffer_length);
  849. /* FIXME this "unlink everything" exit route should
  850. * be a separate test case.
  851. */
  852. /* unlink whatever's still pending */
  853. for (i = 1; i < ctx->param->sglen; i++) {
  854. struct urb *u = ctx->urb[
  855. (i + subcase->number)
  856. % ctx->param->sglen];
  857. if (u == urb || !u->dev)
  858. continue;
  859. spin_unlock(&ctx->lock);
  860. status = usb_unlink_urb(u);
  861. spin_lock(&ctx->lock);
  862. switch (status) {
  863. case -EINPROGRESS:
  864. case -EBUSY:
  865. case -EIDRM:
  866. continue;
  867. default:
  868. ERROR(ctx->dev, "urb unlink --> %d\n",
  869. status);
  870. }
  871. }
  872. status = ctx->status;
  873. }
  874. }
  875. /* resubmit if we need to, else mark this as done */
  876. if ((status == 0) && (ctx->pending < ctx->count)) {
  877. status = usb_submit_urb(urb, GFP_ATOMIC);
  878. if (status != 0) {
  879. ERROR(ctx->dev,
  880. "can't resubmit ctrl %02x.%02x, err %d\n",
  881. reqp->bRequestType, reqp->bRequest, status);
  882. urb->dev = NULL;
  883. } else
  884. ctx->pending++;
  885. } else
  886. urb->dev = NULL;
  887. /* signal completion when nothing's queued */
  888. if (ctx->pending == 0)
  889. complete(&ctx->complete);
  890. spin_unlock(&ctx->lock);
  891. }
  892. static int
  893. test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
  894. {
  895. struct usb_device *udev = testdev_to_usbdev(dev);
  896. struct urb **urb;
  897. struct ctrl_ctx context;
  898. int i;
  899. if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
  900. return -EOPNOTSUPP;
  901. spin_lock_init(&context.lock);
  902. context.dev = dev;
  903. init_completion(&context.complete);
  904. context.count = param->sglen * param->iterations;
  905. context.pending = 0;
  906. context.status = -ENOMEM;
  907. context.param = param;
  908. context.last = -1;
  909. /* allocate and init the urbs we'll queue.
  910. * as with bulk/intr sglists, sglen is the queue depth; it also
  911. * controls which subtests run (more tests than sglen) or rerun.
  912. */
  913. urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
  914. if (!urb)
  915. return -ENOMEM;
  916. for (i = 0; i < param->sglen; i++) {
  917. int pipe = usb_rcvctrlpipe(udev, 0);
  918. unsigned len;
  919. struct urb *u;
  920. struct usb_ctrlrequest req;
  921. struct subcase *reqp;
  922. /* sign of this variable means:
  923. * -: tested code must return this (negative) error code
  924. * +: tested code may return this (negative too) error code
  925. */
  926. int expected = 0;
  927. /* requests here are mostly expected to succeed on any
  928. * device, but some are chosen to trigger protocol stalls
  929. * or short reads.
  930. */
  931. memset(&req, 0, sizeof(req));
  932. req.bRequest = USB_REQ_GET_DESCRIPTOR;
  933. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  934. switch (i % NUM_SUBCASES) {
  935. case 0: /* get device descriptor */
  936. req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
  937. len = sizeof(struct usb_device_descriptor);
  938. break;
  939. case 1: /* get first config descriptor (only) */
  940. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  941. len = sizeof(struct usb_config_descriptor);
  942. break;
  943. case 2: /* get altsetting (OFTEN STALLS) */
  944. req.bRequest = USB_REQ_GET_INTERFACE;
  945. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  946. /* index = 0 means first interface */
  947. len = 1;
  948. expected = EPIPE;
  949. break;
  950. case 3: /* get interface status */
  951. req.bRequest = USB_REQ_GET_STATUS;
  952. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  953. /* interface 0 */
  954. len = 2;
  955. break;
  956. case 4: /* get device status */
  957. req.bRequest = USB_REQ_GET_STATUS;
  958. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  959. len = 2;
  960. break;
  961. case 5: /* get device qualifier (MAY STALL) */
  962. req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
  963. len = sizeof(struct usb_qualifier_descriptor);
  964. if (udev->speed != USB_SPEED_HIGH)
  965. expected = EPIPE;
  966. break;
  967. case 6: /* get first config descriptor, plus interface */
  968. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  969. len = sizeof(struct usb_config_descriptor);
  970. len += sizeof(struct usb_interface_descriptor);
  971. break;
  972. case 7: /* get interface descriptor (ALWAYS STALLS) */
  973. req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
  974. /* interface == 0 */
  975. len = sizeof(struct usb_interface_descriptor);
  976. expected = -EPIPE;
  977. break;
  978. /* NOTE: two consecutive stalls in the queue here.
  979. * that tests fault recovery a bit more aggressively. */
  980. case 8: /* clear endpoint halt (MAY STALL) */
  981. req.bRequest = USB_REQ_CLEAR_FEATURE;
  982. req.bRequestType = USB_RECIP_ENDPOINT;
  983. /* wValue 0 == ep halt */
  984. /* wIndex 0 == ep0 (shouldn't halt!) */
  985. len = 0;
  986. pipe = usb_sndctrlpipe(udev, 0);
  987. expected = EPIPE;
  988. break;
  989. case 9: /* get endpoint status */
  990. req.bRequest = USB_REQ_GET_STATUS;
  991. req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
  992. /* endpoint 0 */
  993. len = 2;
  994. break;
  995. case 10: /* trigger short read (EREMOTEIO) */
  996. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  997. len = 1024;
  998. expected = -EREMOTEIO;
  999. break;
  1000. /* NOTE: two consecutive _different_ faults in the queue. */
  1001. case 11: /* get endpoint descriptor (ALWAYS STALLS) */
  1002. req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
  1003. /* endpoint == 0 */
  1004. len = sizeof(struct usb_interface_descriptor);
  1005. expected = EPIPE;
  1006. break;
  1007. /* NOTE: sometimes even a third fault in the queue! */
  1008. case 12: /* get string 0 descriptor (MAY STALL) */
  1009. req.wValue = cpu_to_le16(USB_DT_STRING << 8);
  1010. /* string == 0, for language IDs */
  1011. len = sizeof(struct usb_interface_descriptor);
  1012. /* may succeed when > 4 languages */
  1013. expected = EREMOTEIO; /* or EPIPE, if no strings */
  1014. break;
  1015. case 13: /* short read, resembling case 10 */
  1016. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  1017. /* last data packet "should" be DATA1, not DATA0 */
  1018. if (udev->speed == USB_SPEED_SUPER)
  1019. len = 1024 - 512;
  1020. else
  1021. len = 1024 - udev->descriptor.bMaxPacketSize0;
  1022. expected = -EREMOTEIO;
  1023. break;
  1024. case 14: /* short read; try to fill the last packet */
  1025. req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
  1026. /* device descriptor size == 18 bytes */
  1027. len = udev->descriptor.bMaxPacketSize0;
  1028. if (udev->speed == USB_SPEED_SUPER)
  1029. len = 512;
  1030. switch (len) {
  1031. case 8:
  1032. len = 24;
  1033. break;
  1034. case 16:
  1035. len = 32;
  1036. break;
  1037. }
  1038. expected = -EREMOTEIO;
  1039. break;
  1040. default:
  1041. ERROR(dev, "bogus number of ctrl queue testcases!\n");
  1042. context.status = -EINVAL;
  1043. goto cleanup;
  1044. }
  1045. req.wLength = cpu_to_le16(len);
  1046. urb[i] = u = simple_alloc_urb(udev, pipe, len);
  1047. if (!u)
  1048. goto cleanup;
  1049. reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
  1050. if (!reqp)
  1051. goto cleanup;
  1052. reqp->setup = req;
  1053. reqp->number = i % NUM_SUBCASES;
  1054. reqp->expected = expected;
  1055. u->setup_packet = (char *) &reqp->setup;
  1056. u->context = &context;
  1057. u->complete = ctrl_complete;
  1058. }
  1059. /* queue the urbs */
  1060. context.urb = urb;
  1061. spin_lock_irq(&context.lock);
  1062. for (i = 0; i < param->sglen; i++) {
  1063. context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
  1064. if (context.status != 0) {
  1065. ERROR(dev, "can't submit urb[%d], status %d\n",
  1066. i, context.status);
  1067. context.count = context.pending;
  1068. break;
  1069. }
  1070. context.pending++;
  1071. }
  1072. spin_unlock_irq(&context.lock);
  1073. /* FIXME set timer and time out; provide a disconnect hook */
  1074. /* wait for the last one to complete */
  1075. if (context.pending > 0)
  1076. wait_for_completion(&context.complete);
  1077. cleanup:
  1078. for (i = 0; i < param->sglen; i++) {
  1079. if (!urb[i])
  1080. continue;
  1081. urb[i]->dev = udev;
  1082. kfree(urb[i]->setup_packet);
  1083. simple_free_urb(urb[i]);
  1084. }
  1085. kfree(urb);
  1086. return context.status;
  1087. }
  1088. #undef NUM_SUBCASES
  1089. /*-------------------------------------------------------------------------*/
  1090. static void unlink1_callback(struct urb *urb)
  1091. {
  1092. int status = urb->status;
  1093. /* we "know" -EPIPE (stall) never happens */
  1094. if (!status)
  1095. status = usb_submit_urb(urb, GFP_ATOMIC);
  1096. if (status) {
  1097. urb->status = status;
  1098. complete(urb->context);
  1099. }
  1100. }
  1101. static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
  1102. {
  1103. struct urb *urb;
  1104. struct completion completion;
  1105. int retval = 0;
  1106. init_completion(&completion);
  1107. urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
  1108. if (!urb)
  1109. return -ENOMEM;
  1110. urb->context = &completion;
  1111. urb->complete = unlink1_callback;
  1112. /* keep the endpoint busy. there are lots of hc/hcd-internal
  1113. * states, and testing should get to all of them over time.
  1114. *
  1115. * FIXME want additional tests for when endpoint is STALLing
  1116. * due to errors, or is just NAKing requests.
  1117. */
  1118. retval = usb_submit_urb(urb, GFP_KERNEL);
  1119. if (retval != 0) {
  1120. dev_err(&dev->intf->dev, "submit fail %d\n", retval);
  1121. return retval;
  1122. }
  1123. /* unlinking that should always work. variable delay tests more
  1124. * hcd states and code paths, even with little other system load.
  1125. */
  1126. msleep(jiffies % (2 * INTERRUPT_RATE));
  1127. if (async) {
  1128. while (!completion_done(&completion)) {
  1129. retval = usb_unlink_urb(urb);
  1130. switch (retval) {
  1131. case -EBUSY:
  1132. case -EIDRM:
  1133. /* we can't unlink urbs while they're completing
  1134. * or if they've completed, and we haven't
  1135. * resubmitted. "normal" drivers would prevent
  1136. * resubmission, but since we're testing unlink
  1137. * paths, we can't.
  1138. */
  1139. ERROR(dev, "unlink retry\n");
  1140. continue;
  1141. case 0:
  1142. case -EINPROGRESS:
  1143. break;
  1144. default:
  1145. dev_err(&dev->intf->dev,
  1146. "unlink fail %d\n", retval);
  1147. return retval;
  1148. }
  1149. break;
  1150. }
  1151. } else
  1152. usb_kill_urb(urb);
  1153. wait_for_completion(&completion);
  1154. retval = urb->status;
  1155. simple_free_urb(urb);
  1156. if (async)
  1157. return (retval == -ECONNRESET) ? 0 : retval - 1000;
  1158. else
  1159. return (retval == -ENOENT || retval == -EPERM) ?
  1160. 0 : retval - 2000;
  1161. }
  1162. static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
  1163. {
  1164. int retval = 0;
  1165. /* test sync and async paths */
  1166. retval = unlink1(dev, pipe, len, 1);
  1167. if (!retval)
  1168. retval = unlink1(dev, pipe, len, 0);
  1169. return retval;
  1170. }
  1171. /*-------------------------------------------------------------------------*/
  1172. struct queued_ctx {
  1173. struct completion complete;
  1174. atomic_t pending;
  1175. unsigned num;
  1176. int status;
  1177. struct urb **urbs;
  1178. };
  1179. static void unlink_queued_callback(struct urb *urb)
  1180. {
  1181. int status = urb->status;
  1182. struct queued_ctx *ctx = urb->context;
  1183. if (ctx->status)
  1184. goto done;
  1185. if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
  1186. if (status == -ECONNRESET)
  1187. goto done;
  1188. /* What error should we report if the URB completed normally? */
  1189. }
  1190. if (status != 0)
  1191. ctx->status = status;
  1192. done:
  1193. if (atomic_dec_and_test(&ctx->pending))
  1194. complete(&ctx->complete);
  1195. }
  1196. static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
  1197. unsigned size)
  1198. {
  1199. struct queued_ctx ctx;
  1200. struct usb_device *udev = testdev_to_usbdev(dev);
  1201. void *buf;
  1202. dma_addr_t buf_dma;
  1203. int i;
  1204. int retval = -ENOMEM;
  1205. init_completion(&ctx.complete);
  1206. atomic_set(&ctx.pending, 1); /* One more than the actual value */
  1207. ctx.num = num;
  1208. ctx.status = 0;
  1209. buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
  1210. if (!buf)
  1211. return retval;
  1212. memset(buf, 0, size);
  1213. /* Allocate and init the urbs we'll queue */
  1214. ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
  1215. if (!ctx.urbs)
  1216. goto free_buf;
  1217. for (i = 0; i < num; i++) {
  1218. ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  1219. if (!ctx.urbs[i])
  1220. goto free_urbs;
  1221. usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
  1222. unlink_queued_callback, &ctx);
  1223. ctx.urbs[i]->transfer_dma = buf_dma;
  1224. ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
  1225. }
  1226. /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
  1227. for (i = 0; i < num; i++) {
  1228. atomic_inc(&ctx.pending);
  1229. retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
  1230. if (retval != 0) {
  1231. dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
  1232. i, retval);
  1233. atomic_dec(&ctx.pending);
  1234. ctx.status = retval;
  1235. break;
  1236. }
  1237. }
  1238. if (i == num) {
  1239. usb_unlink_urb(ctx.urbs[num - 4]);
  1240. usb_unlink_urb(ctx.urbs[num - 2]);
  1241. } else {
  1242. while (--i >= 0)
  1243. usb_unlink_urb(ctx.urbs[i]);
  1244. }
  1245. if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
  1246. complete(&ctx.complete);
  1247. wait_for_completion(&ctx.complete);
  1248. retval = ctx.status;
  1249. free_urbs:
  1250. for (i = 0; i < num; i++)
  1251. usb_free_urb(ctx.urbs[i]);
  1252. kfree(ctx.urbs);
  1253. free_buf:
  1254. usb_free_coherent(udev, size, buf, buf_dma);
  1255. return retval;
  1256. }
  1257. /*-------------------------------------------------------------------------*/
  1258. static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1259. {
  1260. int retval;
  1261. u16 status;
  1262. /* shouldn't look or act halted */
  1263. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1264. if (retval < 0) {
  1265. ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
  1266. ep, retval);
  1267. return retval;
  1268. }
  1269. if (status != 0) {
  1270. ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
  1271. return -EINVAL;
  1272. }
  1273. retval = simple_io(tdev, urb, 1, 0, 0, __func__);
  1274. if (retval != 0)
  1275. return -EINVAL;
  1276. return 0;
  1277. }
  1278. static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1279. {
  1280. int retval;
  1281. u16 status;
  1282. /* should look and act halted */
  1283. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1284. if (retval < 0) {
  1285. ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
  1286. ep, retval);
  1287. return retval;
  1288. }
  1289. if (status != 1) {
  1290. ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
  1291. return -EINVAL;
  1292. }
  1293. retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
  1294. if (retval != -EPIPE)
  1295. return -EINVAL;
  1296. retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
  1297. if (retval != -EPIPE)
  1298. return -EINVAL;
  1299. return 0;
  1300. }
  1301. static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1302. {
  1303. int retval;
  1304. /* shouldn't look or act halted now */
  1305. retval = verify_not_halted(tdev, ep, urb);
  1306. if (retval < 0)
  1307. return retval;
  1308. /* set halt (protocol test only), verify it worked */
  1309. retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
  1310. USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
  1311. USB_ENDPOINT_HALT, ep,
  1312. NULL, 0, USB_CTRL_SET_TIMEOUT);
  1313. if (retval < 0) {
  1314. ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
  1315. return retval;
  1316. }
  1317. retval = verify_halted(tdev, ep, urb);
  1318. if (retval < 0)
  1319. return retval;
  1320. /* clear halt (tests API + protocol), verify it worked */
  1321. retval = usb_clear_halt(urb->dev, urb->pipe);
  1322. if (retval < 0) {
  1323. ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
  1324. return retval;
  1325. }
  1326. retval = verify_not_halted(tdev, ep, urb);
  1327. if (retval < 0)
  1328. return retval;
  1329. /* NOTE: could also verify SET_INTERFACE clear halts ... */
  1330. return 0;
  1331. }
  1332. static int halt_simple(struct usbtest_dev *dev)
  1333. {
  1334. int ep;
  1335. int retval = 0;
  1336. struct urb *urb;
  1337. struct usb_device *udev = testdev_to_usbdev(dev);
  1338. if (udev->speed == USB_SPEED_SUPER)
  1339. urb = simple_alloc_urb(udev, 0, 1024);
  1340. else
  1341. urb = simple_alloc_urb(udev, 0, 512);
  1342. if (urb == NULL)
  1343. return -ENOMEM;
  1344. if (dev->in_pipe) {
  1345. ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
  1346. urb->pipe = dev->in_pipe;
  1347. retval = test_halt(dev, ep, urb);
  1348. if (retval < 0)
  1349. goto done;
  1350. }
  1351. if (dev->out_pipe) {
  1352. ep = usb_pipeendpoint(dev->out_pipe);
  1353. urb->pipe = dev->out_pipe;
  1354. retval = test_halt(dev, ep, urb);
  1355. }
  1356. done:
  1357. simple_free_urb(urb);
  1358. return retval;
  1359. }
  1360. /*-------------------------------------------------------------------------*/
  1361. /* Control OUT tests use the vendor control requests from Intel's
  1362. * USB 2.0 compliance test device: write a buffer, read it back.
  1363. *
  1364. * Intel's spec only _requires_ that it work for one packet, which
  1365. * is pretty weak. Some HCDs place limits here; most devices will
  1366. * need to be able to handle more than one OUT data packet. We'll
  1367. * try whatever we're told to try.
  1368. */
  1369. static int ctrl_out(struct usbtest_dev *dev,
  1370. unsigned count, unsigned length, unsigned vary, unsigned offset)
  1371. {
  1372. unsigned i, j, len;
  1373. int retval;
  1374. u8 *buf;
  1375. char *what = "?";
  1376. struct usb_device *udev;
  1377. if (length < 1 || length > 0xffff || vary >= length)
  1378. return -EINVAL;
  1379. buf = kmalloc(length + offset, GFP_KERNEL);
  1380. if (!buf)
  1381. return -ENOMEM;
  1382. buf += offset;
  1383. udev = testdev_to_usbdev(dev);
  1384. len = length;
  1385. retval = 0;
  1386. /* NOTE: hardware might well act differently if we pushed it
  1387. * with lots back-to-back queued requests.
  1388. */
  1389. for (i = 0; i < count; i++) {
  1390. /* write patterned data */
  1391. for (j = 0; j < len; j++)
  1392. buf[j] = i + j;
  1393. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  1394. 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
  1395. 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
  1396. if (retval != len) {
  1397. what = "write";
  1398. if (retval >= 0) {
  1399. ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
  1400. retval, len);
  1401. retval = -EBADMSG;
  1402. }
  1403. break;
  1404. }
  1405. /* read it back -- assuming nothing intervened!! */
  1406. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  1407. 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
  1408. 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
  1409. if (retval != len) {
  1410. what = "read";
  1411. if (retval >= 0) {
  1412. ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
  1413. retval, len);
  1414. retval = -EBADMSG;
  1415. }
  1416. break;
  1417. }
  1418. /* fail if we can't verify */
  1419. for (j = 0; j < len; j++) {
  1420. if (buf[j] != (u8) (i + j)) {
  1421. ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
  1422. j, buf[j], (u8) i + j);
  1423. retval = -EBADMSG;
  1424. break;
  1425. }
  1426. }
  1427. if (retval < 0) {
  1428. what = "verify";
  1429. break;
  1430. }
  1431. len += vary;
  1432. /* [real world] the "zero bytes IN" case isn't really used.
  1433. * hardware can easily trip up in this weird case, since its
  1434. * status stage is IN, not OUT like other ep0in transfers.
  1435. */
  1436. if (len > length)
  1437. len = realworld ? 1 : 0;
  1438. }
  1439. if (retval < 0)
  1440. ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
  1441. what, retval, i);
  1442. kfree(buf - offset);
  1443. return retval;
  1444. }
  1445. /*-------------------------------------------------------------------------*/
  1446. /* ISO tests ... mimics common usage
  1447. * - buffer length is split into N packets (mostly maxpacket sized)
  1448. * - multi-buffers according to sglen
  1449. */
  1450. struct iso_context {
  1451. unsigned count;
  1452. unsigned pending;
  1453. spinlock_t lock;
  1454. struct completion done;
  1455. int submit_error;
  1456. unsigned long errors;
  1457. unsigned long packet_count;
  1458. struct usbtest_dev *dev;
  1459. };
  1460. static void iso_callback(struct urb *urb)
  1461. {
  1462. struct iso_context *ctx = urb->context;
  1463. spin_lock(&ctx->lock);
  1464. ctx->count--;
  1465. ctx->packet_count += urb->number_of_packets;
  1466. if (urb->error_count > 0)
  1467. ctx->errors += urb->error_count;
  1468. else if (urb->status != 0)
  1469. ctx->errors += urb->number_of_packets;
  1470. else if (urb->actual_length != urb->transfer_buffer_length)
  1471. ctx->errors++;
  1472. else if (check_guard_bytes(ctx->dev, urb) != 0)
  1473. ctx->errors++;
  1474. if (urb->status == 0 && ctx->count > (ctx->pending - 1)
  1475. && !ctx->submit_error) {
  1476. int status = usb_submit_urb(urb, GFP_ATOMIC);
  1477. switch (status) {
  1478. case 0:
  1479. goto done;
  1480. default:
  1481. dev_err(&ctx->dev->intf->dev,
  1482. "iso resubmit err %d\n",
  1483. status);
  1484. /* FALLTHROUGH */
  1485. case -ENODEV: /* disconnected */
  1486. case -ESHUTDOWN: /* endpoint disabled */
  1487. ctx->submit_error = 1;
  1488. break;
  1489. }
  1490. }
  1491. ctx->pending--;
  1492. if (ctx->pending == 0) {
  1493. if (ctx->errors)
  1494. dev_err(&ctx->dev->intf->dev,
  1495. "iso test, %lu errors out of %lu\n",
  1496. ctx->errors, ctx->packet_count);
  1497. complete(&ctx->done);
  1498. }
  1499. done:
  1500. spin_unlock(&ctx->lock);
  1501. }
  1502. static struct urb *iso_alloc_urb(
  1503. struct usb_device *udev,
  1504. int pipe,
  1505. struct usb_endpoint_descriptor *desc,
  1506. long bytes,
  1507. unsigned offset
  1508. )
  1509. {
  1510. struct urb *urb;
  1511. unsigned i, maxp, packets;
  1512. if (bytes < 0 || !desc)
  1513. return NULL;
  1514. maxp = 0x7ff & usb_endpoint_maxp(desc);
  1515. maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
  1516. packets = DIV_ROUND_UP(bytes, maxp);
  1517. urb = usb_alloc_urb(packets, GFP_KERNEL);
  1518. if (!urb)
  1519. return urb;
  1520. urb->dev = udev;
  1521. urb->pipe = pipe;
  1522. urb->number_of_packets = packets;
  1523. urb->transfer_buffer_length = bytes;
  1524. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  1525. GFP_KERNEL,
  1526. &urb->transfer_dma);
  1527. if (!urb->transfer_buffer) {
  1528. usb_free_urb(urb);
  1529. return NULL;
  1530. }
  1531. if (offset) {
  1532. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  1533. urb->transfer_buffer += offset;
  1534. urb->transfer_dma += offset;
  1535. }
  1536. /* For inbound transfers use guard byte so that test fails if
  1537. data not correctly copied */
  1538. memset(urb->transfer_buffer,
  1539. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  1540. bytes);
  1541. for (i = 0; i < packets; i++) {
  1542. /* here, only the last packet will be short */
  1543. urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
  1544. bytes -= urb->iso_frame_desc[i].length;
  1545. urb->iso_frame_desc[i].offset = maxp * i;
  1546. }
  1547. urb->complete = iso_callback;
  1548. /* urb->context = SET BY CALLER */
  1549. urb->interval = 1 << (desc->bInterval - 1);
  1550. urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
  1551. return urb;
  1552. }
  1553. static int
  1554. test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
  1555. int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
  1556. {
  1557. struct iso_context context;
  1558. struct usb_device *udev;
  1559. unsigned i;
  1560. unsigned long packets = 0;
  1561. int status = 0;
  1562. struct urb *urbs[10]; /* FIXME no limit */
  1563. if (param->sglen > 10)
  1564. return -EDOM;
  1565. memset(&context, 0, sizeof(context));
  1566. context.count = param->iterations * param->sglen;
  1567. context.dev = dev;
  1568. init_completion(&context.done);
  1569. spin_lock_init(&context.lock);
  1570. memset(urbs, 0, sizeof(urbs));
  1571. udev = testdev_to_usbdev(dev);
  1572. dev_info(&dev->intf->dev,
  1573. "... iso period %d %sframes, wMaxPacket %04x\n",
  1574. 1 << (desc->bInterval - 1),
  1575. (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
  1576. usb_endpoint_maxp(desc));
  1577. for (i = 0; i < param->sglen; i++) {
  1578. urbs[i] = iso_alloc_urb(udev, pipe, desc,
  1579. param->length, offset);
  1580. if (!urbs[i]) {
  1581. status = -ENOMEM;
  1582. goto fail;
  1583. }
  1584. packets += urbs[i]->number_of_packets;
  1585. urbs[i]->context = &context;
  1586. }
  1587. packets *= param->iterations;
  1588. dev_info(&dev->intf->dev,
  1589. "... total %lu msec (%lu packets)\n",
  1590. (packets * (1 << (desc->bInterval - 1)))
  1591. / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
  1592. packets);
  1593. spin_lock_irq(&context.lock);
  1594. for (i = 0; i < param->sglen; i++) {
  1595. ++context.pending;
  1596. status = usb_submit_urb(urbs[i], GFP_ATOMIC);
  1597. if (status < 0) {
  1598. ERROR(dev, "submit iso[%d], error %d\n", i, status);
  1599. if (i == 0) {
  1600. spin_unlock_irq(&context.lock);
  1601. goto fail;
  1602. }
  1603. simple_free_urb(urbs[i]);
  1604. urbs[i] = NULL;
  1605. context.pending--;
  1606. context.submit_error = 1;
  1607. break;
  1608. }
  1609. }
  1610. spin_unlock_irq(&context.lock);
  1611. wait_for_completion(&context.done);
  1612. for (i = 0; i < param->sglen; i++) {
  1613. if (urbs[i])
  1614. simple_free_urb(urbs[i]);
  1615. }
  1616. /*
  1617. * Isochronous transfers are expected to fail sometimes. As an
  1618. * arbitrary limit, we will report an error if any submissions
  1619. * fail or if the transfer failure rate is > 10%.
  1620. */
  1621. if (status != 0)
  1622. ;
  1623. else if (context.submit_error)
  1624. status = -EACCES;
  1625. else if (context.errors > context.packet_count / 10)
  1626. status = -EIO;
  1627. return status;
  1628. fail:
  1629. for (i = 0; i < param->sglen; i++) {
  1630. if (urbs[i])
  1631. simple_free_urb(urbs[i]);
  1632. }
  1633. return status;
  1634. }
  1635. static int test_unaligned_bulk(
  1636. struct usbtest_dev *tdev,
  1637. int pipe,
  1638. unsigned length,
  1639. int iterations,
  1640. unsigned transfer_flags,
  1641. const char *label)
  1642. {
  1643. int retval;
  1644. struct urb *urb = usbtest_alloc_urb(
  1645. testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
  1646. if (!urb)
  1647. return -ENOMEM;
  1648. retval = simple_io(tdev, urb, iterations, 0, 0, label);
  1649. simple_free_urb(urb);
  1650. return retval;
  1651. }
  1652. /*-------------------------------------------------------------------------*/
  1653. /* We only have this one interface to user space, through usbfs.
  1654. * User mode code can scan usbfs to find N different devices (maybe on
  1655. * different busses) to use when testing, and allocate one thread per
  1656. * test. So discovery is simplified, and we have no device naming issues.
  1657. *
  1658. * Don't use these only as stress/load tests. Use them along with with
  1659. * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
  1660. * video capture, and so on. Run different tests at different times, in
  1661. * different sequences. Nothing here should interact with other devices,
  1662. * except indirectly by consuming USB bandwidth and CPU resources for test
  1663. * threads and request completion. But the only way to know that for sure
  1664. * is to test when HC queues are in use by many devices.
  1665. *
  1666. * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
  1667. * it locks out usbcore in certain code paths. Notably, if you disconnect
  1668. * the device-under-test, khubd will wait block forever waiting for the
  1669. * ioctl to complete ... so that usb_disconnect() can abort the pending
  1670. * urbs and then call usbtest_disconnect(). To abort a test, you're best
  1671. * off just killing the userspace task and waiting for it to exit.
  1672. */
  1673. static int
  1674. usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
  1675. {
  1676. struct usbtest_dev *dev = usb_get_intfdata(intf);
  1677. struct usb_device *udev = testdev_to_usbdev(dev);
  1678. struct usbtest_param *param = buf;
  1679. int retval = -EOPNOTSUPP;
  1680. struct urb *urb;
  1681. struct scatterlist *sg;
  1682. struct usb_sg_request req;
  1683. struct timeval start;
  1684. unsigned i;
  1685. /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
  1686. pattern = mod_pattern;
  1687. if (code != USBTEST_REQUEST)
  1688. return -EOPNOTSUPP;
  1689. if (param->iterations <= 0)
  1690. return -EINVAL;
  1691. if (mutex_lock_interruptible(&dev->lock))
  1692. return -ERESTARTSYS;
  1693. /* FIXME: What if a system sleep starts while a test is running? */
  1694. /* some devices, like ez-usb default devices, need a non-default
  1695. * altsetting to have any active endpoints. some tests change
  1696. * altsettings; force a default so most tests don't need to check.
  1697. */
  1698. if (dev->info->alt >= 0) {
  1699. int res;
  1700. if (intf->altsetting->desc.bInterfaceNumber) {
  1701. mutex_unlock(&dev->lock);
  1702. return -ENODEV;
  1703. }
  1704. res = set_altsetting(dev, dev->info->alt);
  1705. if (res) {
  1706. dev_err(&intf->dev,
  1707. "set altsetting to %d failed, %d\n",
  1708. dev->info->alt, res);
  1709. mutex_unlock(&dev->lock);
  1710. return res;
  1711. }
  1712. }
  1713. /*
  1714. * Just a bunch of test cases that every HCD is expected to handle.
  1715. *
  1716. * Some may need specific firmware, though it'd be good to have
  1717. * one firmware image to handle all the test cases.
  1718. *
  1719. * FIXME add more tests! cancel requests, verify the data, control
  1720. * queueing, concurrent read+write threads, and so on.
  1721. */
  1722. do_gettimeofday(&start);
  1723. switch (param->test_num) {
  1724. case 0:
  1725. dev_info(&intf->dev, "TEST 0: NOP\n");
  1726. retval = 0;
  1727. break;
  1728. /* Simple non-queued bulk I/O tests */
  1729. case 1:
  1730. if (dev->out_pipe == 0)
  1731. break;
  1732. dev_info(&intf->dev,
  1733. "TEST 1: write %d bytes %u times\n",
  1734. param->length, param->iterations);
  1735. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1736. if (!urb) {
  1737. retval = -ENOMEM;
  1738. break;
  1739. }
  1740. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1741. retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
  1742. simple_free_urb(urb);
  1743. break;
  1744. case 2:
  1745. if (dev->in_pipe == 0)
  1746. break;
  1747. dev_info(&intf->dev,
  1748. "TEST 2: read %d bytes %u times\n",
  1749. param->length, param->iterations);
  1750. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1751. if (!urb) {
  1752. retval = -ENOMEM;
  1753. break;
  1754. }
  1755. /* FIRMWARE: bulk source (maybe generates short writes) */
  1756. retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
  1757. simple_free_urb(urb);
  1758. break;
  1759. case 3:
  1760. if (dev->out_pipe == 0 || param->vary == 0)
  1761. break;
  1762. dev_info(&intf->dev,
  1763. "TEST 3: write/%d 0..%d bytes %u times\n",
  1764. param->vary, param->length, param->iterations);
  1765. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1766. if (!urb) {
  1767. retval = -ENOMEM;
  1768. break;
  1769. }
  1770. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1771. retval = simple_io(dev, urb, param->iterations, param->vary,
  1772. 0, "test3");
  1773. simple_free_urb(urb);
  1774. break;
  1775. case 4:
  1776. if (dev->in_pipe == 0 || param->vary == 0)
  1777. break;
  1778. dev_info(&intf->dev,
  1779. "TEST 4: read/%d 0..%d bytes %u times\n",
  1780. param->vary, param->length, param->iterations);
  1781. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1782. if (!urb) {
  1783. retval = -ENOMEM;
  1784. break;
  1785. }
  1786. /* FIRMWARE: bulk source (maybe generates short writes) */
  1787. retval = simple_io(dev, urb, param->iterations, param->vary,
  1788. 0, "test4");
  1789. simple_free_urb(urb);
  1790. break;
  1791. /* Queued bulk I/O tests */
  1792. case 5:
  1793. if (dev->out_pipe == 0 || param->sglen == 0)
  1794. break;
  1795. dev_info(&intf->dev,
  1796. "TEST 5: write %d sglists %d entries of %d bytes\n",
  1797. param->iterations,
  1798. param->sglen, param->length);
  1799. sg = alloc_sglist(param->sglen, param->length, 0);
  1800. if (!sg) {
  1801. retval = -ENOMEM;
  1802. break;
  1803. }
  1804. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1805. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1806. &req, sg, param->sglen);
  1807. free_sglist(sg, param->sglen);
  1808. break;
  1809. case 6:
  1810. if (dev->in_pipe == 0 || param->sglen == 0)
  1811. break;
  1812. dev_info(&intf->dev,
  1813. "TEST 6: read %d sglists %d entries of %d bytes\n",
  1814. param->iterations,
  1815. param->sglen, param->length);
  1816. sg = alloc_sglist(param->sglen, param->length, 0);
  1817. if (!sg) {
  1818. retval = -ENOMEM;
  1819. break;
  1820. }
  1821. /* FIRMWARE: bulk source (maybe generates short writes) */
  1822. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1823. &req, sg, param->sglen);
  1824. free_sglist(sg, param->sglen);
  1825. break;
  1826. case 7:
  1827. if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1828. break;
  1829. dev_info(&intf->dev,
  1830. "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
  1831. param->vary, param->iterations,
  1832. param->sglen, param->length);
  1833. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1834. if (!sg) {
  1835. retval = -ENOMEM;
  1836. break;
  1837. }
  1838. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1839. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1840. &req, sg, param->sglen);
  1841. free_sglist(sg, param->sglen);
  1842. break;
  1843. case 8:
  1844. if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1845. break;
  1846. dev_info(&intf->dev,
  1847. "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
  1848. param->vary, param->iterations,
  1849. param->sglen, param->length);
  1850. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1851. if (!sg) {
  1852. retval = -ENOMEM;
  1853. break;
  1854. }
  1855. /* FIRMWARE: bulk source (maybe generates short writes) */
  1856. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1857. &req, sg, param->sglen);
  1858. free_sglist(sg, param->sglen);
  1859. break;
  1860. /* non-queued sanity tests for control (chapter 9 subset) */
  1861. case 9:
  1862. retval = 0;
  1863. dev_info(&intf->dev,
  1864. "TEST 9: ch9 (subset) control tests, %d times\n",
  1865. param->iterations);
  1866. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1867. retval = ch9_postconfig(dev);
  1868. if (retval)
  1869. dev_err(&intf->dev, "ch9 subset failed, "
  1870. "iterations left %d\n", i);
  1871. break;
  1872. /* queued control messaging */
  1873. case 10:
  1874. retval = 0;
  1875. dev_info(&intf->dev,
  1876. "TEST 10: queue %d control calls, %d times\n",
  1877. param->sglen,
  1878. param->iterations);
  1879. retval = test_ctrl_queue(dev, param);
  1880. break;
  1881. /* simple non-queued unlinks (ring with one urb) */
  1882. case 11:
  1883. if (dev->in_pipe == 0 || !param->length)
  1884. break;
  1885. retval = 0;
  1886. dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
  1887. param->iterations, param->length);
  1888. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1889. retval = unlink_simple(dev, dev->in_pipe,
  1890. param->length);
  1891. if (retval)
  1892. dev_err(&intf->dev, "unlink reads failed %d, "
  1893. "iterations left %d\n", retval, i);
  1894. break;
  1895. case 12:
  1896. if (dev->out_pipe == 0 || !param->length)
  1897. break;
  1898. retval = 0;
  1899. dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
  1900. param->iterations, param->length);
  1901. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1902. retval = unlink_simple(dev, dev->out_pipe,
  1903. param->length);
  1904. if (retval)
  1905. dev_err(&intf->dev, "unlink writes failed %d, "
  1906. "iterations left %d\n", retval, i);
  1907. break;
  1908. /* ep halt tests */
  1909. case 13:
  1910. if (dev->out_pipe == 0 && dev->in_pipe == 0)
  1911. break;
  1912. retval = 0;
  1913. dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
  1914. param->iterations);
  1915. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1916. retval = halt_simple(dev);
  1917. if (retval)
  1918. ERROR(dev, "halts failed, iterations left %d\n", i);
  1919. break;
  1920. /* control write tests */
  1921. case 14:
  1922. if (!dev->info->ctrl_out)
  1923. break;
  1924. dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
  1925. param->iterations,
  1926. realworld ? 1 : 0, param->length,
  1927. param->vary);
  1928. retval = ctrl_out(dev, param->iterations,
  1929. param->length, param->vary, 0);
  1930. break;
  1931. /* iso write tests */
  1932. case 15:
  1933. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1934. break;
  1935. dev_info(&intf->dev,
  1936. "TEST 15: write %d iso, %d entries of %d bytes\n",
  1937. param->iterations,
  1938. param->sglen, param->length);
  1939. /* FIRMWARE: iso sink */
  1940. retval = test_iso_queue(dev, param,
  1941. dev->out_iso_pipe, dev->iso_out, 0);
  1942. break;
  1943. /* iso read tests */
  1944. case 16:
  1945. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1946. break;
  1947. dev_info(&intf->dev,
  1948. "TEST 16: read %d iso, %d entries of %d bytes\n",
  1949. param->iterations,
  1950. param->sglen, param->length);
  1951. /* FIRMWARE: iso source */
  1952. retval = test_iso_queue(dev, param,
  1953. dev->in_iso_pipe, dev->iso_in, 0);
  1954. break;
  1955. /* FIXME scatterlist cancel (needs helper thread) */
  1956. /* Tests for bulk I/O using DMA mapping by core and odd address */
  1957. case 17:
  1958. if (dev->out_pipe == 0)
  1959. break;
  1960. dev_info(&intf->dev,
  1961. "TEST 17: write odd addr %d bytes %u times core map\n",
  1962. param->length, param->iterations);
  1963. retval = test_unaligned_bulk(
  1964. dev, dev->out_pipe,
  1965. param->length, param->iterations,
  1966. 0, "test17");
  1967. break;
  1968. case 18:
  1969. if (dev->in_pipe == 0)
  1970. break;
  1971. dev_info(&intf->dev,
  1972. "TEST 18: read odd addr %d bytes %u times core map\n",
  1973. param->length, param->iterations);
  1974. retval = test_unaligned_bulk(
  1975. dev, dev->in_pipe,
  1976. param->length, param->iterations,
  1977. 0, "test18");
  1978. break;
  1979. /* Tests for bulk I/O using premapped coherent buffer and odd address */
  1980. case 19:
  1981. if (dev->out_pipe == 0)
  1982. break;
  1983. dev_info(&intf->dev,
  1984. "TEST 19: write odd addr %d bytes %u times premapped\n",
  1985. param->length, param->iterations);
  1986. retval = test_unaligned_bulk(
  1987. dev, dev->out_pipe,
  1988. param->length, param->iterations,
  1989. URB_NO_TRANSFER_DMA_MAP, "test19");
  1990. break;
  1991. case 20:
  1992. if (dev->in_pipe == 0)
  1993. break;
  1994. dev_info(&intf->dev,
  1995. "TEST 20: read odd addr %d bytes %u times premapped\n",
  1996. param->length, param->iterations);
  1997. retval = test_unaligned_bulk(
  1998. dev, dev->in_pipe,
  1999. param->length, param->iterations,
  2000. URB_NO_TRANSFER_DMA_MAP, "test20");
  2001. break;
  2002. /* control write tests with unaligned buffer */
  2003. case 21:
  2004. if (!dev->info->ctrl_out)
  2005. break;
  2006. dev_info(&intf->dev,
  2007. "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
  2008. param->iterations,
  2009. realworld ? 1 : 0, param->length,
  2010. param->vary);
  2011. retval = ctrl_out(dev, param->iterations,
  2012. param->length, param->vary, 1);
  2013. break;
  2014. /* unaligned iso tests */
  2015. case 22:
  2016. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  2017. break;
  2018. dev_info(&intf->dev,
  2019. "TEST 22: write %d iso odd, %d entries of %d bytes\n",
  2020. param->iterations,
  2021. param->sglen, param->length);
  2022. retval = test_iso_queue(dev, param,
  2023. dev->out_iso_pipe, dev->iso_out, 1);
  2024. break;
  2025. case 23:
  2026. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  2027. break;
  2028. dev_info(&intf->dev,
  2029. "TEST 23: read %d iso odd, %d entries of %d bytes\n",
  2030. param->iterations,
  2031. param->sglen, param->length);
  2032. retval = test_iso_queue(dev, param,
  2033. dev->in_iso_pipe, dev->iso_in, 1);
  2034. break;
  2035. /* unlink URBs from a bulk-OUT queue */
  2036. case 24:
  2037. if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
  2038. break;
  2039. retval = 0;
  2040. dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
  2041. "%d %d-byte writes\n",
  2042. param->iterations, param->sglen, param->length);
  2043. for (i = param->iterations; retval == 0 && i > 0; --i) {
  2044. retval = unlink_queued(dev, dev->out_pipe,
  2045. param->sglen, param->length);
  2046. if (retval) {
  2047. dev_err(&intf->dev,
  2048. "unlink queued writes failed %d, "
  2049. "iterations left %d\n", retval, i);
  2050. break;
  2051. }
  2052. }
  2053. break;
  2054. }
  2055. do_gettimeofday(&param->duration);
  2056. param->duration.tv_sec -= start.tv_sec;
  2057. param->duration.tv_usec -= start.tv_usec;
  2058. if (param->duration.tv_usec < 0) {
  2059. param->duration.tv_usec += 1000 * 1000;
  2060. param->duration.tv_sec -= 1;
  2061. }
  2062. mutex_unlock(&dev->lock);
  2063. return retval;
  2064. }
  2065. /*-------------------------------------------------------------------------*/
  2066. static unsigned force_interrupt;
  2067. module_param(force_interrupt, uint, 0);
  2068. MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
  2069. #ifdef GENERIC
  2070. static unsigned short vendor;
  2071. module_param(vendor, ushort, 0);
  2072. MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
  2073. static unsigned short product;
  2074. module_param(product, ushort, 0);
  2075. MODULE_PARM_DESC(product, "product code (from vendor)");
  2076. #endif
  2077. static int
  2078. usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
  2079. {
  2080. struct usb_device *udev;
  2081. struct usbtest_dev *dev;
  2082. struct usbtest_info *info;
  2083. char *rtest, *wtest;
  2084. char *irtest, *iwtest;
  2085. udev = interface_to_usbdev(intf);
  2086. #ifdef GENERIC
  2087. /* specify devices by module parameters? */
  2088. if (id->match_flags == 0) {
  2089. /* vendor match required, product match optional */
  2090. if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
  2091. return -ENODEV;
  2092. if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
  2093. return -ENODEV;
  2094. dev_info(&intf->dev, "matched module params, "
  2095. "vend=0x%04x prod=0x%04x\n",
  2096. le16_to_cpu(udev->descriptor.idVendor),
  2097. le16_to_cpu(udev->descriptor.idProduct));
  2098. }
  2099. #endif
  2100. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2101. if (!dev)
  2102. return -ENOMEM;
  2103. info = (struct usbtest_info *) id->driver_info;
  2104. dev->info = info;
  2105. mutex_init(&dev->lock);
  2106. dev->intf = intf;
  2107. /* cacheline-aligned scratch for i/o */
  2108. dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
  2109. if (dev->buf == NULL) {
  2110. kfree(dev);
  2111. return -ENOMEM;
  2112. }
  2113. /* NOTE this doesn't yet test the handful of difference that are
  2114. * visible with high speed interrupts: bigger maxpacket (1K) and
  2115. * "high bandwidth" modes (up to 3 packets/uframe).
  2116. */
  2117. rtest = wtest = "";
  2118. irtest = iwtest = "";
  2119. if (force_interrupt || udev->speed == USB_SPEED_LOW) {
  2120. if (info->ep_in) {
  2121. dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
  2122. rtest = " intr-in";
  2123. }
  2124. if (info->ep_out) {
  2125. dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
  2126. wtest = " intr-out";
  2127. }
  2128. } else {
  2129. if (override_alt >= 0 || info->autoconf) {
  2130. int status;
  2131. status = get_endpoints(dev, intf);
  2132. if (status < 0) {
  2133. WARNING(dev, "couldn't get endpoints, %d\n",
  2134. status);
  2135. kfree(dev->buf);
  2136. kfree(dev);
  2137. return status;
  2138. }
  2139. /* may find bulk or ISO pipes */
  2140. } else {
  2141. if (info->ep_in)
  2142. dev->in_pipe = usb_rcvbulkpipe(udev,
  2143. info->ep_in);
  2144. if (info->ep_out)
  2145. dev->out_pipe = usb_sndbulkpipe(udev,
  2146. info->ep_out);
  2147. }
  2148. if (dev->in_pipe)
  2149. rtest = " bulk-in";
  2150. if (dev->out_pipe)
  2151. wtest = " bulk-out";
  2152. if (dev->in_iso_pipe)
  2153. irtest = " iso-in";
  2154. if (dev->out_iso_pipe)
  2155. iwtest = " iso-out";
  2156. }
  2157. usb_set_intfdata(intf, dev);
  2158. dev_info(&intf->dev, "%s\n", info->name);
  2159. dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
  2160. usb_speed_string(udev->speed),
  2161. info->ctrl_out ? " in/out" : "",
  2162. rtest, wtest,
  2163. irtest, iwtest,
  2164. info->alt >= 0 ? " (+alt)" : "");
  2165. return 0;
  2166. }
  2167. static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
  2168. {
  2169. return 0;
  2170. }
  2171. static int usbtest_resume(struct usb_interface *intf)
  2172. {
  2173. return 0;
  2174. }
  2175. static void usbtest_disconnect(struct usb_interface *intf)
  2176. {
  2177. struct usbtest_dev *dev = usb_get_intfdata(intf);
  2178. usb_set_intfdata(intf, NULL);
  2179. dev_dbg(&intf->dev, "disconnect\n");
  2180. kfree(dev);
  2181. }
  2182. /* Basic testing only needs a device that can source or sink bulk traffic.
  2183. * Any device can test control transfers (default with GENERIC binding).
  2184. *
  2185. * Several entries work with the default EP0 implementation that's built
  2186. * into EZ-USB chips. There's a default vendor ID which can be overridden
  2187. * by (very) small config EEPROMS, but otherwise all these devices act
  2188. * identically until firmware is loaded: only EP0 works. It turns out
  2189. * to be easy to make other endpoints work, without modifying that EP0
  2190. * behavior. For now, we expect that kind of firmware.
  2191. */
  2192. /* an21xx or fx versions of ez-usb */
  2193. static struct usbtest_info ez1_info = {
  2194. .name = "EZ-USB device",
  2195. .ep_in = 2,
  2196. .ep_out = 2,
  2197. .alt = 1,
  2198. };
  2199. /* fx2 version of ez-usb */
  2200. static struct usbtest_info ez2_info = {
  2201. .name = "FX2 device",
  2202. .ep_in = 6,
  2203. .ep_out = 2,
  2204. .alt = 1,
  2205. };
  2206. /* ezusb family device with dedicated usb test firmware,
  2207. */
  2208. static struct usbtest_info fw_info = {
  2209. .name = "usb test device",
  2210. .ep_in = 2,
  2211. .ep_out = 2,
  2212. .alt = 1,
  2213. .autoconf = 1, /* iso and ctrl_out need autoconf */
  2214. .ctrl_out = 1,
  2215. .iso = 1, /* iso_ep's are #8 in/out */
  2216. };
  2217. /* peripheral running Linux and 'zero.c' test firmware, or
  2218. * its user-mode cousin. different versions of this use
  2219. * different hardware with the same vendor/product codes.
  2220. * host side MUST rely on the endpoint descriptors.
  2221. */
  2222. static struct usbtest_info gz_info = {
  2223. .name = "Linux gadget zero",
  2224. .autoconf = 1,
  2225. .ctrl_out = 1,
  2226. .iso = 1,
  2227. .alt = 0,
  2228. };
  2229. static struct usbtest_info um_info = {
  2230. .name = "Linux user mode test driver",
  2231. .autoconf = 1,
  2232. .alt = -1,
  2233. };
  2234. static struct usbtest_info um2_info = {
  2235. .name = "Linux user mode ISO test driver",
  2236. .autoconf = 1,
  2237. .iso = 1,
  2238. .alt = -1,
  2239. };
  2240. #ifdef IBOT2
  2241. /* this is a nice source of high speed bulk data;
  2242. * uses an FX2, with firmware provided in the device
  2243. */
  2244. static struct usbtest_info ibot2_info = {
  2245. .name = "iBOT2 webcam",
  2246. .ep_in = 2,
  2247. .alt = -1,
  2248. };
  2249. #endif
  2250. #ifdef GENERIC
  2251. /* we can use any device to test control traffic */
  2252. static struct usbtest_info generic_info = {
  2253. .name = "Generic USB device",
  2254. .alt = -1,
  2255. };
  2256. #endif
  2257. static const struct usb_device_id id_table[] = {
  2258. /*-------------------------------------------------------------*/
  2259. /* EZ-USB devices which download firmware to replace (or in our
  2260. * case augment) the default device implementation.
  2261. */
  2262. /* generic EZ-USB FX controller */
  2263. { USB_DEVICE(0x0547, 0x2235),
  2264. .driver_info = (unsigned long) &ez1_info,
  2265. },
  2266. /* CY3671 development board with EZ-USB FX */
  2267. { USB_DEVICE(0x0547, 0x0080),
  2268. .driver_info = (unsigned long) &ez1_info,
  2269. },
  2270. /* generic EZ-USB FX2 controller (or development board) */
  2271. { USB_DEVICE(0x04b4, 0x8613),
  2272. .driver_info = (unsigned long) &ez2_info,
  2273. },
  2274. /* re-enumerated usb test device firmware */
  2275. { USB_DEVICE(0xfff0, 0xfff0),
  2276. .driver_info = (unsigned long) &fw_info,
  2277. },
  2278. /* "Gadget Zero" firmware runs under Linux */
  2279. { USB_DEVICE(0x0525, 0xa4a0),
  2280. .driver_info = (unsigned long) &gz_info,
  2281. },
  2282. /* so does a user-mode variant */
  2283. { USB_DEVICE(0x0525, 0xa4a4),
  2284. .driver_info = (unsigned long) &um_info,
  2285. },
  2286. /* ... and a user-mode variant that talks iso */
  2287. { USB_DEVICE(0x0525, 0xa4a3),
  2288. .driver_info = (unsigned long) &um2_info,
  2289. },
  2290. #ifdef KEYSPAN_19Qi
  2291. /* Keyspan 19qi uses an21xx (original EZ-USB) */
  2292. /* this does not coexist with the real Keyspan 19qi driver! */
  2293. { USB_DEVICE(0x06cd, 0x010b),
  2294. .driver_info = (unsigned long) &ez1_info,
  2295. },
  2296. #endif
  2297. /*-------------------------------------------------------------*/
  2298. #ifdef IBOT2
  2299. /* iBOT2 makes a nice source of high speed bulk-in data */
  2300. /* this does not coexist with a real iBOT2 driver! */
  2301. { USB_DEVICE(0x0b62, 0x0059),
  2302. .driver_info = (unsigned long) &ibot2_info,
  2303. },
  2304. #endif
  2305. /*-------------------------------------------------------------*/
  2306. #ifdef GENERIC
  2307. /* module params can specify devices to use for control tests */
  2308. { .driver_info = (unsigned long) &generic_info, },
  2309. #endif
  2310. /*-------------------------------------------------------------*/
  2311. { }
  2312. };
  2313. MODULE_DEVICE_TABLE(usb, id_table);
  2314. static struct usb_driver usbtest_driver = {
  2315. .name = "usbtest",
  2316. .id_table = id_table,
  2317. .probe = usbtest_probe,
  2318. .unlocked_ioctl = usbtest_ioctl,
  2319. .disconnect = usbtest_disconnect,
  2320. .suspend = usbtest_suspend,
  2321. .resume = usbtest_resume,
  2322. };
  2323. /*-------------------------------------------------------------------------*/
  2324. static int __init usbtest_init(void)
  2325. {
  2326. #ifdef GENERIC
  2327. if (vendor)
  2328. pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
  2329. #endif
  2330. return usb_register(&usbtest_driver);
  2331. }
  2332. module_init(usbtest_init);
  2333. static void __exit usbtest_exit(void)
  2334. {
  2335. usb_deregister(&usbtest_driver);
  2336. }
  2337. module_exit(usbtest_exit);
  2338. MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
  2339. MODULE_LICENSE("GPL");