usbtest.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/moduleparam.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/mutex.h>
  10. #include <linux/usb.h>
  11. /*-------------------------------------------------------------------------*/
  12. /* FIXME make these public somewhere; usbdevfs.h? */
  13. struct usbtest_param {
  14. /* inputs */
  15. unsigned test_num; /* 0..(TEST_CASES-1) */
  16. unsigned iterations;
  17. unsigned length;
  18. unsigned vary;
  19. unsigned sglen;
  20. /* outputs */
  21. struct timeval duration;
  22. };
  23. #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
  24. /*-------------------------------------------------------------------------*/
  25. #define GENERIC /* let probe() bind using module params */
  26. /* Some devices that can be used for testing will have "real" drivers.
  27. * Entries for those need to be enabled here by hand, after disabling
  28. * that "real" driver.
  29. */
  30. //#define IBOT2 /* grab iBOT2 webcams */
  31. //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
  32. /*-------------------------------------------------------------------------*/
  33. struct usbtest_info {
  34. const char *name;
  35. u8 ep_in; /* bulk/intr source */
  36. u8 ep_out; /* bulk/intr sink */
  37. unsigned autoconf:1;
  38. unsigned ctrl_out:1;
  39. unsigned iso:1; /* try iso in/out */
  40. int alt;
  41. };
  42. /* this is accessed only through usbfs ioctl calls.
  43. * one ioctl to issue a test ... one lock per device.
  44. * tests create other threads if they need them.
  45. * urbs and buffers are allocated dynamically,
  46. * and data generated deterministically.
  47. */
  48. struct usbtest_dev {
  49. struct usb_interface *intf;
  50. struct usbtest_info *info;
  51. int in_pipe;
  52. int out_pipe;
  53. int in_iso_pipe;
  54. int out_iso_pipe;
  55. struct usb_endpoint_descriptor *iso_in, *iso_out;
  56. struct mutex lock;
  57. #define TBUF_SIZE 256
  58. u8 *buf;
  59. };
  60. static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
  61. {
  62. return interface_to_usbdev(test->intf);
  63. }
  64. /* set up all urbs so they can be used with either bulk or interrupt */
  65. #define INTERRUPT_RATE 1 /* msec/transfer */
  66. #define ERROR(tdev, fmt, args...) \
  67. dev_err(&(tdev)->intf->dev , fmt , ## args)
  68. #define WARNING(tdev, fmt, args...) \
  69. dev_warn(&(tdev)->intf->dev , fmt , ## args)
  70. #define GUARD_BYTE 0xA5
  71. /*-------------------------------------------------------------------------*/
  72. static int
  73. get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
  74. {
  75. int tmp;
  76. struct usb_host_interface *alt;
  77. struct usb_host_endpoint *in, *out;
  78. struct usb_host_endpoint *iso_in, *iso_out;
  79. struct usb_device *udev;
  80. for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
  81. unsigned ep;
  82. in = out = NULL;
  83. iso_in = iso_out = NULL;
  84. alt = intf->altsetting + tmp;
  85. /* take the first altsetting with in-bulk + out-bulk;
  86. * ignore other endpoints and altsettings.
  87. */
  88. for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
  89. struct usb_host_endpoint *e;
  90. e = alt->endpoint + ep;
  91. switch (e->desc.bmAttributes) {
  92. case USB_ENDPOINT_XFER_BULK:
  93. break;
  94. case USB_ENDPOINT_XFER_ISOC:
  95. if (dev->info->iso)
  96. goto try_iso;
  97. /* FALLTHROUGH */
  98. default:
  99. continue;
  100. }
  101. if (usb_endpoint_dir_in(&e->desc)) {
  102. if (!in)
  103. in = e;
  104. } else {
  105. if (!out)
  106. out = e;
  107. }
  108. continue;
  109. try_iso:
  110. if (usb_endpoint_dir_in(&e->desc)) {
  111. if (!iso_in)
  112. iso_in = e;
  113. } else {
  114. if (!iso_out)
  115. iso_out = e;
  116. }
  117. }
  118. if ((in && out) || iso_in || iso_out)
  119. goto found;
  120. }
  121. return -EINVAL;
  122. found:
  123. udev = testdev_to_usbdev(dev);
  124. if (alt->desc.bAlternateSetting != 0) {
  125. tmp = usb_set_interface(udev,
  126. alt->desc.bInterfaceNumber,
  127. alt->desc.bAlternateSetting);
  128. if (tmp < 0)
  129. return tmp;
  130. }
  131. if (in) {
  132. dev->in_pipe = usb_rcvbulkpipe(udev,
  133. in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  134. dev->out_pipe = usb_sndbulkpipe(udev,
  135. out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  136. }
  137. if (iso_in) {
  138. dev->iso_in = &iso_in->desc;
  139. dev->in_iso_pipe = usb_rcvisocpipe(udev,
  140. iso_in->desc.bEndpointAddress
  141. & USB_ENDPOINT_NUMBER_MASK);
  142. }
  143. if (iso_out) {
  144. dev->iso_out = &iso_out->desc;
  145. dev->out_iso_pipe = usb_sndisocpipe(udev,
  146. iso_out->desc.bEndpointAddress
  147. & USB_ENDPOINT_NUMBER_MASK);
  148. }
  149. return 0;
  150. }
  151. /*-------------------------------------------------------------------------*/
  152. /* Support for testing basic non-queued I/O streams.
  153. *
  154. * These just package urbs as requests that can be easily canceled.
  155. * Each urb's data buffer is dynamically allocated; callers can fill
  156. * them with non-zero test data (or test for it) when appropriate.
  157. */
  158. static void simple_callback(struct urb *urb)
  159. {
  160. complete(urb->context);
  161. }
  162. static struct urb *usbtest_alloc_urb(
  163. struct usb_device *udev,
  164. int pipe,
  165. unsigned long bytes,
  166. unsigned transfer_flags,
  167. unsigned offset)
  168. {
  169. struct urb *urb;
  170. urb = usb_alloc_urb(0, GFP_KERNEL);
  171. if (!urb)
  172. return urb;
  173. usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
  174. urb->interval = (udev->speed == USB_SPEED_HIGH)
  175. ? (INTERRUPT_RATE << 3)
  176. : INTERRUPT_RATE;
  177. urb->transfer_flags = transfer_flags;
  178. if (usb_pipein(pipe))
  179. urb->transfer_flags |= URB_SHORT_NOT_OK;
  180. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  181. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  182. GFP_KERNEL, &urb->transfer_dma);
  183. else
  184. urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
  185. if (!urb->transfer_buffer) {
  186. usb_free_urb(urb);
  187. return NULL;
  188. }
  189. /* To test unaligned transfers add an offset and fill the
  190. unused memory with a guard value */
  191. if (offset) {
  192. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  193. urb->transfer_buffer += offset;
  194. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  195. urb->transfer_dma += offset;
  196. }
  197. /* For inbound transfers use guard byte so that test fails if
  198. data not correctly copied */
  199. memset(urb->transfer_buffer,
  200. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  201. bytes);
  202. return urb;
  203. }
  204. static struct urb *simple_alloc_urb(
  205. struct usb_device *udev,
  206. int pipe,
  207. unsigned long bytes)
  208. {
  209. return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
  210. }
  211. static unsigned pattern;
  212. static unsigned mod_pattern;
  213. module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
  214. MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
  215. static inline void simple_fill_buf(struct urb *urb)
  216. {
  217. unsigned i;
  218. u8 *buf = urb->transfer_buffer;
  219. unsigned len = urb->transfer_buffer_length;
  220. switch (pattern) {
  221. default:
  222. /* FALLTHROUGH */
  223. case 0:
  224. memset(buf, 0, len);
  225. break;
  226. case 1: /* mod63 */
  227. for (i = 0; i < len; i++)
  228. *buf++ = (u8) (i % 63);
  229. break;
  230. }
  231. }
  232. static inline unsigned long buffer_offset(void *buf)
  233. {
  234. return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
  235. }
  236. static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
  237. {
  238. u8 *buf = urb->transfer_buffer;
  239. u8 *guard = buf - buffer_offset(buf);
  240. unsigned i;
  241. for (i = 0; guard < buf; i++, guard++) {
  242. if (*guard != GUARD_BYTE) {
  243. ERROR(tdev, "guard byte[%d] %d (not %d)\n",
  244. i, *guard, GUARD_BYTE);
  245. return -EINVAL;
  246. }
  247. }
  248. return 0;
  249. }
  250. static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
  251. {
  252. unsigned i;
  253. u8 expected;
  254. u8 *buf = urb->transfer_buffer;
  255. unsigned len = urb->actual_length;
  256. int ret = check_guard_bytes(tdev, urb);
  257. if (ret)
  258. return ret;
  259. for (i = 0; i < len; i++, buf++) {
  260. switch (pattern) {
  261. /* all-zeroes has no synchronization issues */
  262. case 0:
  263. expected = 0;
  264. break;
  265. /* mod63 stays in sync with short-terminated transfers,
  266. * or otherwise when host and gadget agree on how large
  267. * each usb transfer request should be. resync is done
  268. * with set_interface or set_config.
  269. */
  270. case 1: /* mod63 */
  271. expected = i % 63;
  272. break;
  273. /* always fail unsupported patterns */
  274. default:
  275. expected = !*buf;
  276. break;
  277. }
  278. if (*buf == expected)
  279. continue;
  280. ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
  281. return -EINVAL;
  282. }
  283. return 0;
  284. }
  285. static void simple_free_urb(struct urb *urb)
  286. {
  287. unsigned long offset = buffer_offset(urb->transfer_buffer);
  288. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  289. usb_free_coherent(
  290. urb->dev,
  291. urb->transfer_buffer_length + offset,
  292. urb->transfer_buffer - offset,
  293. urb->transfer_dma - offset);
  294. else
  295. kfree(urb->transfer_buffer - offset);
  296. usb_free_urb(urb);
  297. }
  298. static int simple_io(
  299. struct usbtest_dev *tdev,
  300. struct urb *urb,
  301. int iterations,
  302. int vary,
  303. int expected,
  304. const char *label
  305. )
  306. {
  307. struct usb_device *udev = urb->dev;
  308. int max = urb->transfer_buffer_length;
  309. struct completion completion;
  310. int retval = 0;
  311. urb->context = &completion;
  312. while (retval == 0 && iterations-- > 0) {
  313. init_completion(&completion);
  314. if (usb_pipeout(urb->pipe)) {
  315. simple_fill_buf(urb);
  316. urb->transfer_flags |= URB_ZERO_PACKET;
  317. }
  318. retval = usb_submit_urb(urb, GFP_KERNEL);
  319. if (retval != 0)
  320. break;
  321. /* NOTE: no timeouts; can't be broken out of by interrupt */
  322. wait_for_completion(&completion);
  323. retval = urb->status;
  324. urb->dev = udev;
  325. if (retval == 0 && usb_pipein(urb->pipe))
  326. retval = simple_check_buf(tdev, urb);
  327. if (vary) {
  328. int len = urb->transfer_buffer_length;
  329. len += vary;
  330. len %= max;
  331. if (len == 0)
  332. len = (vary < max) ? vary : max;
  333. urb->transfer_buffer_length = len;
  334. }
  335. /* FIXME if endpoint halted, clear halt (and log) */
  336. }
  337. urb->transfer_buffer_length = max;
  338. if (expected != retval)
  339. dev_err(&udev->dev,
  340. "%s failed, iterations left %d, status %d (not %d)\n",
  341. label, iterations, retval, expected);
  342. return retval;
  343. }
  344. /*-------------------------------------------------------------------------*/
  345. /* We use scatterlist primitives to test queued I/O.
  346. * Yes, this also tests the scatterlist primitives.
  347. */
  348. static void free_sglist(struct scatterlist *sg, int nents)
  349. {
  350. unsigned i;
  351. if (!sg)
  352. return;
  353. for (i = 0; i < nents; i++) {
  354. if (!sg_page(&sg[i]))
  355. continue;
  356. kfree(sg_virt(&sg[i]));
  357. }
  358. kfree(sg);
  359. }
  360. static struct scatterlist *
  361. alloc_sglist(int nents, int max, int vary)
  362. {
  363. struct scatterlist *sg;
  364. unsigned i;
  365. unsigned size = max;
  366. sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
  367. if (!sg)
  368. return NULL;
  369. sg_init_table(sg, nents);
  370. for (i = 0; i < nents; i++) {
  371. char *buf;
  372. unsigned j;
  373. buf = kzalloc(size, GFP_KERNEL);
  374. if (!buf) {
  375. free_sglist(sg, i);
  376. return NULL;
  377. }
  378. /* kmalloc pages are always physically contiguous! */
  379. sg_set_buf(&sg[i], buf, size);
  380. switch (pattern) {
  381. case 0:
  382. /* already zeroed */
  383. break;
  384. case 1:
  385. for (j = 0; j < size; j++)
  386. *buf++ = (u8) (j % 63);
  387. break;
  388. }
  389. if (vary) {
  390. size += vary;
  391. size %= max;
  392. if (size == 0)
  393. size = (vary < max) ? vary : max;
  394. }
  395. }
  396. return sg;
  397. }
  398. static int perform_sglist(
  399. struct usbtest_dev *tdev,
  400. unsigned iterations,
  401. int pipe,
  402. struct usb_sg_request *req,
  403. struct scatterlist *sg,
  404. int nents
  405. )
  406. {
  407. struct usb_device *udev = testdev_to_usbdev(tdev);
  408. int retval = 0;
  409. while (retval == 0 && iterations-- > 0) {
  410. retval = usb_sg_init(req, udev, pipe,
  411. (udev->speed == USB_SPEED_HIGH)
  412. ? (INTERRUPT_RATE << 3)
  413. : INTERRUPT_RATE,
  414. sg, nents, 0, GFP_KERNEL);
  415. if (retval)
  416. break;
  417. usb_sg_wait(req);
  418. retval = req->status;
  419. /* FIXME check resulting data pattern */
  420. /* FIXME if endpoint halted, clear halt (and log) */
  421. }
  422. /* FIXME for unlink or fault handling tests, don't report
  423. * failure if retval is as we expected ...
  424. */
  425. if (retval)
  426. ERROR(tdev, "perform_sglist failed, "
  427. "iterations left %d, status %d\n",
  428. iterations, retval);
  429. return retval;
  430. }
  431. /*-------------------------------------------------------------------------*/
  432. /* unqueued control message testing
  433. *
  434. * there's a nice set of device functional requirements in chapter 9 of the
  435. * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
  436. * special test firmware.
  437. *
  438. * we know the device is configured (or suspended) by the time it's visible
  439. * through usbfs. we can't change that, so we won't test enumeration (which
  440. * worked 'well enough' to get here, this time), power management (ditto),
  441. * or remote wakeup (which needs human interaction).
  442. */
  443. static unsigned realworld = 1;
  444. module_param(realworld, uint, 0);
  445. MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
  446. static int get_altsetting(struct usbtest_dev *dev)
  447. {
  448. struct usb_interface *iface = dev->intf;
  449. struct usb_device *udev = interface_to_usbdev(iface);
  450. int retval;
  451. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  452. USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
  453. 0, iface->altsetting[0].desc.bInterfaceNumber,
  454. dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  455. switch (retval) {
  456. case 1:
  457. return dev->buf[0];
  458. case 0:
  459. retval = -ERANGE;
  460. /* FALLTHROUGH */
  461. default:
  462. return retval;
  463. }
  464. }
  465. static int set_altsetting(struct usbtest_dev *dev, int alternate)
  466. {
  467. struct usb_interface *iface = dev->intf;
  468. struct usb_device *udev;
  469. if (alternate < 0 || alternate >= 256)
  470. return -EINVAL;
  471. udev = interface_to_usbdev(iface);
  472. return usb_set_interface(udev,
  473. iface->altsetting[0].desc.bInterfaceNumber,
  474. alternate);
  475. }
  476. static int is_good_config(struct usbtest_dev *tdev, int len)
  477. {
  478. struct usb_config_descriptor *config;
  479. if (len < sizeof *config)
  480. return 0;
  481. config = (struct usb_config_descriptor *) tdev->buf;
  482. switch (config->bDescriptorType) {
  483. case USB_DT_CONFIG:
  484. case USB_DT_OTHER_SPEED_CONFIG:
  485. if (config->bLength != 9) {
  486. ERROR(tdev, "bogus config descriptor length\n");
  487. return 0;
  488. }
  489. /* this bit 'must be 1' but often isn't */
  490. if (!realworld && !(config->bmAttributes & 0x80)) {
  491. ERROR(tdev, "high bit of config attributes not set\n");
  492. return 0;
  493. }
  494. if (config->bmAttributes & 0x1f) { /* reserved == 0 */
  495. ERROR(tdev, "reserved config bits set\n");
  496. return 0;
  497. }
  498. break;
  499. default:
  500. return 0;
  501. }
  502. if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
  503. return 1;
  504. if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
  505. return 1;
  506. ERROR(tdev, "bogus config descriptor read size\n");
  507. return 0;
  508. }
  509. /* sanity test for standard requests working with usb_control_mesg() and some
  510. * of the utility functions which use it.
  511. *
  512. * this doesn't test how endpoint halts behave or data toggles get set, since
  513. * we won't do I/O to bulk/interrupt endpoints here (which is how to change
  514. * halt or toggle). toggle testing is impractical without support from hcds.
  515. *
  516. * this avoids failing devices linux would normally work with, by not testing
  517. * config/altsetting operations for devices that only support their defaults.
  518. * such devices rarely support those needless operations.
  519. *
  520. * NOTE that since this is a sanity test, it's not examining boundary cases
  521. * to see if usbcore, hcd, and device all behave right. such testing would
  522. * involve varied read sizes and other operation sequences.
  523. */
  524. static int ch9_postconfig(struct usbtest_dev *dev)
  525. {
  526. struct usb_interface *iface = dev->intf;
  527. struct usb_device *udev = interface_to_usbdev(iface);
  528. int i, alt, retval;
  529. /* [9.2.3] if there's more than one altsetting, we need to be able to
  530. * set and get each one. mostly trusts the descriptors from usbcore.
  531. */
  532. for (i = 0; i < iface->num_altsetting; i++) {
  533. /* 9.2.3 constrains the range here */
  534. alt = iface->altsetting[i].desc.bAlternateSetting;
  535. if (alt < 0 || alt >= iface->num_altsetting) {
  536. dev_err(&iface->dev,
  537. "invalid alt [%d].bAltSetting = %d\n",
  538. i, alt);
  539. }
  540. /* [real world] get/set unimplemented if there's only one */
  541. if (realworld && iface->num_altsetting == 1)
  542. continue;
  543. /* [9.4.10] set_interface */
  544. retval = set_altsetting(dev, alt);
  545. if (retval) {
  546. dev_err(&iface->dev, "can't set_interface = %d, %d\n",
  547. alt, retval);
  548. return retval;
  549. }
  550. /* [9.4.4] get_interface always works */
  551. retval = get_altsetting(dev);
  552. if (retval != alt) {
  553. dev_err(&iface->dev, "get alt should be %d, was %d\n",
  554. alt, retval);
  555. return (retval < 0) ? retval : -EDOM;
  556. }
  557. }
  558. /* [real world] get_config unimplemented if there's only one */
  559. if (!realworld || udev->descriptor.bNumConfigurations != 1) {
  560. int expected = udev->actconfig->desc.bConfigurationValue;
  561. /* [9.4.2] get_configuration always works
  562. * ... although some cheap devices (like one TI Hub I've got)
  563. * won't return config descriptors except before set_config.
  564. */
  565. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  566. USB_REQ_GET_CONFIGURATION,
  567. USB_DIR_IN | USB_RECIP_DEVICE,
  568. 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  569. if (retval != 1 || dev->buf[0] != expected) {
  570. dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
  571. retval, dev->buf[0], expected);
  572. return (retval < 0) ? retval : -EDOM;
  573. }
  574. }
  575. /* there's always [9.4.3] a device descriptor [9.6.1] */
  576. retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
  577. dev->buf, sizeof udev->descriptor);
  578. if (retval != sizeof udev->descriptor) {
  579. dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
  580. return (retval < 0) ? retval : -EDOM;
  581. }
  582. /* there's always [9.4.3] at least one config descriptor [9.6.3] */
  583. for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
  584. retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
  585. dev->buf, TBUF_SIZE);
  586. if (!is_good_config(dev, retval)) {
  587. dev_err(&iface->dev,
  588. "config [%d] descriptor --> %d\n",
  589. i, retval);
  590. return (retval < 0) ? retval : -EDOM;
  591. }
  592. /* FIXME cross-checking udev->config[i] to make sure usbcore
  593. * parsed it right (etc) would be good testing paranoia
  594. */
  595. }
  596. /* and sometimes [9.2.6.6] speed dependent descriptors */
  597. if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
  598. struct usb_qualifier_descriptor *d = NULL;
  599. /* device qualifier [9.6.2] */
  600. retval = usb_get_descriptor(udev,
  601. USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
  602. sizeof(struct usb_qualifier_descriptor));
  603. if (retval == -EPIPE) {
  604. if (udev->speed == USB_SPEED_HIGH) {
  605. dev_err(&iface->dev,
  606. "hs dev qualifier --> %d\n",
  607. retval);
  608. return (retval < 0) ? retval : -EDOM;
  609. }
  610. /* usb2.0 but not high-speed capable; fine */
  611. } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
  612. dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
  613. return (retval < 0) ? retval : -EDOM;
  614. } else
  615. d = (struct usb_qualifier_descriptor *) dev->buf;
  616. /* might not have [9.6.2] any other-speed configs [9.6.4] */
  617. if (d) {
  618. unsigned max = d->bNumConfigurations;
  619. for (i = 0; i < max; i++) {
  620. retval = usb_get_descriptor(udev,
  621. USB_DT_OTHER_SPEED_CONFIG, i,
  622. dev->buf, TBUF_SIZE);
  623. if (!is_good_config(dev, retval)) {
  624. dev_err(&iface->dev,
  625. "other speed config --> %d\n",
  626. retval);
  627. return (retval < 0) ? retval : -EDOM;
  628. }
  629. }
  630. }
  631. }
  632. /* FIXME fetch strings from at least the device descriptor */
  633. /* [9.4.5] get_status always works */
  634. retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
  635. if (retval != 2) {
  636. dev_err(&iface->dev, "get dev status --> %d\n", retval);
  637. return (retval < 0) ? retval : -EDOM;
  638. }
  639. /* FIXME configuration.bmAttributes says if we could try to set/clear
  640. * the device's remote wakeup feature ... if we can, test that here
  641. */
  642. retval = usb_get_status(udev, USB_RECIP_INTERFACE,
  643. iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
  644. if (retval != 2) {
  645. dev_err(&iface->dev, "get interface status --> %d\n", retval);
  646. return (retval < 0) ? retval : -EDOM;
  647. }
  648. /* FIXME get status for each endpoint in the interface */
  649. return 0;
  650. }
  651. /*-------------------------------------------------------------------------*/
  652. /* use ch9 requests to test whether:
  653. * (a) queues work for control, keeping N subtests queued and
  654. * active (auto-resubmit) for M loops through the queue.
  655. * (b) protocol stalls (control-only) will autorecover.
  656. * it's not like bulk/intr; no halt clearing.
  657. * (c) short control reads are reported and handled.
  658. * (d) queues are always processed in-order
  659. */
  660. struct ctrl_ctx {
  661. spinlock_t lock;
  662. struct usbtest_dev *dev;
  663. struct completion complete;
  664. unsigned count;
  665. unsigned pending;
  666. int status;
  667. struct urb **urb;
  668. struct usbtest_param *param;
  669. int last;
  670. };
  671. #define NUM_SUBCASES 15 /* how many test subcases here? */
  672. struct subcase {
  673. struct usb_ctrlrequest setup;
  674. int number;
  675. int expected;
  676. };
  677. static void ctrl_complete(struct urb *urb)
  678. {
  679. struct ctrl_ctx *ctx = urb->context;
  680. struct usb_ctrlrequest *reqp;
  681. struct subcase *subcase;
  682. int status = urb->status;
  683. reqp = (struct usb_ctrlrequest *)urb->setup_packet;
  684. subcase = container_of(reqp, struct subcase, setup);
  685. spin_lock(&ctx->lock);
  686. ctx->count--;
  687. ctx->pending--;
  688. /* queue must transfer and complete in fifo order, unless
  689. * usb_unlink_urb() is used to unlink something not at the
  690. * physical queue head (not tested).
  691. */
  692. if (subcase->number > 0) {
  693. if ((subcase->number - ctx->last) != 1) {
  694. ERROR(ctx->dev,
  695. "subcase %d completed out of order, last %d\n",
  696. subcase->number, ctx->last);
  697. status = -EDOM;
  698. ctx->last = subcase->number;
  699. goto error;
  700. }
  701. }
  702. ctx->last = subcase->number;
  703. /* succeed or fault in only one way? */
  704. if (status == subcase->expected)
  705. status = 0;
  706. /* async unlink for cleanup? */
  707. else if (status != -ECONNRESET) {
  708. /* some faults are allowed, not required */
  709. if (subcase->expected > 0 && (
  710. ((status == -subcase->expected /* happened */
  711. || status == 0)))) /* didn't */
  712. status = 0;
  713. /* sometimes more than one fault is allowed */
  714. else if (subcase->number == 12 && status == -EPIPE)
  715. status = 0;
  716. else
  717. ERROR(ctx->dev, "subtest %d error, status %d\n",
  718. subcase->number, status);
  719. }
  720. /* unexpected status codes mean errors; ideally, in hardware */
  721. if (status) {
  722. error:
  723. if (ctx->status == 0) {
  724. int i;
  725. ctx->status = status;
  726. ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
  727. "%d left, subcase %d, len %d/%d\n",
  728. reqp->bRequestType, reqp->bRequest,
  729. status, ctx->count, subcase->number,
  730. urb->actual_length,
  731. urb->transfer_buffer_length);
  732. /* FIXME this "unlink everything" exit route should
  733. * be a separate test case.
  734. */
  735. /* unlink whatever's still pending */
  736. for (i = 1; i < ctx->param->sglen; i++) {
  737. struct urb *u = ctx->urb[
  738. (i + subcase->number)
  739. % ctx->param->sglen];
  740. if (u == urb || !u->dev)
  741. continue;
  742. spin_unlock(&ctx->lock);
  743. status = usb_unlink_urb(u);
  744. spin_lock(&ctx->lock);
  745. switch (status) {
  746. case -EINPROGRESS:
  747. case -EBUSY:
  748. case -EIDRM:
  749. continue;
  750. default:
  751. ERROR(ctx->dev, "urb unlink --> %d\n",
  752. status);
  753. }
  754. }
  755. status = ctx->status;
  756. }
  757. }
  758. /* resubmit if we need to, else mark this as done */
  759. if ((status == 0) && (ctx->pending < ctx->count)) {
  760. status = usb_submit_urb(urb, GFP_ATOMIC);
  761. if (status != 0) {
  762. ERROR(ctx->dev,
  763. "can't resubmit ctrl %02x.%02x, err %d\n",
  764. reqp->bRequestType, reqp->bRequest, status);
  765. urb->dev = NULL;
  766. } else
  767. ctx->pending++;
  768. } else
  769. urb->dev = NULL;
  770. /* signal completion when nothing's queued */
  771. if (ctx->pending == 0)
  772. complete(&ctx->complete);
  773. spin_unlock(&ctx->lock);
  774. }
  775. static int
  776. test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
  777. {
  778. struct usb_device *udev = testdev_to_usbdev(dev);
  779. struct urb **urb;
  780. struct ctrl_ctx context;
  781. int i;
  782. if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
  783. return -EOPNOTSUPP;
  784. spin_lock_init(&context.lock);
  785. context.dev = dev;
  786. init_completion(&context.complete);
  787. context.count = param->sglen * param->iterations;
  788. context.pending = 0;
  789. context.status = -ENOMEM;
  790. context.param = param;
  791. context.last = -1;
  792. /* allocate and init the urbs we'll queue.
  793. * as with bulk/intr sglists, sglen is the queue depth; it also
  794. * controls which subtests run (more tests than sglen) or rerun.
  795. */
  796. urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
  797. if (!urb)
  798. return -ENOMEM;
  799. for (i = 0; i < param->sglen; i++) {
  800. int pipe = usb_rcvctrlpipe(udev, 0);
  801. unsigned len;
  802. struct urb *u;
  803. struct usb_ctrlrequest req;
  804. struct subcase *reqp;
  805. /* sign of this variable means:
  806. * -: tested code must return this (negative) error code
  807. * +: tested code may return this (negative too) error code
  808. */
  809. int expected = 0;
  810. /* requests here are mostly expected to succeed on any
  811. * device, but some are chosen to trigger protocol stalls
  812. * or short reads.
  813. */
  814. memset(&req, 0, sizeof req);
  815. req.bRequest = USB_REQ_GET_DESCRIPTOR;
  816. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  817. switch (i % NUM_SUBCASES) {
  818. case 0: /* get device descriptor */
  819. req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
  820. len = sizeof(struct usb_device_descriptor);
  821. break;
  822. case 1: /* get first config descriptor (only) */
  823. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  824. len = sizeof(struct usb_config_descriptor);
  825. break;
  826. case 2: /* get altsetting (OFTEN STALLS) */
  827. req.bRequest = USB_REQ_GET_INTERFACE;
  828. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  829. /* index = 0 means first interface */
  830. len = 1;
  831. expected = EPIPE;
  832. break;
  833. case 3: /* get interface status */
  834. req.bRequest = USB_REQ_GET_STATUS;
  835. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  836. /* interface 0 */
  837. len = 2;
  838. break;
  839. case 4: /* get device status */
  840. req.bRequest = USB_REQ_GET_STATUS;
  841. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  842. len = 2;
  843. break;
  844. case 5: /* get device qualifier (MAY STALL) */
  845. req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
  846. len = sizeof(struct usb_qualifier_descriptor);
  847. if (udev->speed != USB_SPEED_HIGH)
  848. expected = EPIPE;
  849. break;
  850. case 6: /* get first config descriptor, plus interface */
  851. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  852. len = sizeof(struct usb_config_descriptor);
  853. len += sizeof(struct usb_interface_descriptor);
  854. break;
  855. case 7: /* get interface descriptor (ALWAYS STALLS) */
  856. req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
  857. /* interface == 0 */
  858. len = sizeof(struct usb_interface_descriptor);
  859. expected = -EPIPE;
  860. break;
  861. /* NOTE: two consecutive stalls in the queue here.
  862. * that tests fault recovery a bit more aggressively. */
  863. case 8: /* clear endpoint halt (MAY STALL) */
  864. req.bRequest = USB_REQ_CLEAR_FEATURE;
  865. req.bRequestType = USB_RECIP_ENDPOINT;
  866. /* wValue 0 == ep halt */
  867. /* wIndex 0 == ep0 (shouldn't halt!) */
  868. len = 0;
  869. pipe = usb_sndctrlpipe(udev, 0);
  870. expected = EPIPE;
  871. break;
  872. case 9: /* get endpoint status */
  873. req.bRequest = USB_REQ_GET_STATUS;
  874. req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
  875. /* endpoint 0 */
  876. len = 2;
  877. break;
  878. case 10: /* trigger short read (EREMOTEIO) */
  879. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  880. len = 1024;
  881. expected = -EREMOTEIO;
  882. break;
  883. /* NOTE: two consecutive _different_ faults in the queue. */
  884. case 11: /* get endpoint descriptor (ALWAYS STALLS) */
  885. req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
  886. /* endpoint == 0 */
  887. len = sizeof(struct usb_interface_descriptor);
  888. expected = EPIPE;
  889. break;
  890. /* NOTE: sometimes even a third fault in the queue! */
  891. case 12: /* get string 0 descriptor (MAY STALL) */
  892. req.wValue = cpu_to_le16(USB_DT_STRING << 8);
  893. /* string == 0, for language IDs */
  894. len = sizeof(struct usb_interface_descriptor);
  895. /* may succeed when > 4 languages */
  896. expected = EREMOTEIO; /* or EPIPE, if no strings */
  897. break;
  898. case 13: /* short read, resembling case 10 */
  899. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  900. /* last data packet "should" be DATA1, not DATA0 */
  901. len = 1024 - udev->descriptor.bMaxPacketSize0;
  902. expected = -EREMOTEIO;
  903. break;
  904. case 14: /* short read; try to fill the last packet */
  905. req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
  906. /* device descriptor size == 18 bytes */
  907. len = udev->descriptor.bMaxPacketSize0;
  908. if (udev->speed == USB_SPEED_SUPER)
  909. len = 512;
  910. switch (len) {
  911. case 8:
  912. len = 24;
  913. break;
  914. case 16:
  915. len = 32;
  916. break;
  917. }
  918. expected = -EREMOTEIO;
  919. break;
  920. default:
  921. ERROR(dev, "bogus number of ctrl queue testcases!\n");
  922. context.status = -EINVAL;
  923. goto cleanup;
  924. }
  925. req.wLength = cpu_to_le16(len);
  926. urb[i] = u = simple_alloc_urb(udev, pipe, len);
  927. if (!u)
  928. goto cleanup;
  929. reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
  930. if (!reqp)
  931. goto cleanup;
  932. reqp->setup = req;
  933. reqp->number = i % NUM_SUBCASES;
  934. reqp->expected = expected;
  935. u->setup_packet = (char *) &reqp->setup;
  936. u->context = &context;
  937. u->complete = ctrl_complete;
  938. }
  939. /* queue the urbs */
  940. context.urb = urb;
  941. spin_lock_irq(&context.lock);
  942. for (i = 0; i < param->sglen; i++) {
  943. context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
  944. if (context.status != 0) {
  945. ERROR(dev, "can't submit urb[%d], status %d\n",
  946. i, context.status);
  947. context.count = context.pending;
  948. break;
  949. }
  950. context.pending++;
  951. }
  952. spin_unlock_irq(&context.lock);
  953. /* FIXME set timer and time out; provide a disconnect hook */
  954. /* wait for the last one to complete */
  955. if (context.pending > 0)
  956. wait_for_completion(&context.complete);
  957. cleanup:
  958. for (i = 0; i < param->sglen; i++) {
  959. if (!urb[i])
  960. continue;
  961. urb[i]->dev = udev;
  962. kfree(urb[i]->setup_packet);
  963. simple_free_urb(urb[i]);
  964. }
  965. kfree(urb);
  966. return context.status;
  967. }
  968. #undef NUM_SUBCASES
  969. /*-------------------------------------------------------------------------*/
  970. static void unlink1_callback(struct urb *urb)
  971. {
  972. int status = urb->status;
  973. /* we "know" -EPIPE (stall) never happens */
  974. if (!status)
  975. status = usb_submit_urb(urb, GFP_ATOMIC);
  976. if (status) {
  977. urb->status = status;
  978. complete(urb->context);
  979. }
  980. }
  981. static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
  982. {
  983. struct urb *urb;
  984. struct completion completion;
  985. int retval = 0;
  986. init_completion(&completion);
  987. urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
  988. if (!urb)
  989. return -ENOMEM;
  990. urb->context = &completion;
  991. urb->complete = unlink1_callback;
  992. /* keep the endpoint busy. there are lots of hc/hcd-internal
  993. * states, and testing should get to all of them over time.
  994. *
  995. * FIXME want additional tests for when endpoint is STALLing
  996. * due to errors, or is just NAKing requests.
  997. */
  998. retval = usb_submit_urb(urb, GFP_KERNEL);
  999. if (retval != 0) {
  1000. dev_err(&dev->intf->dev, "submit fail %d\n", retval);
  1001. return retval;
  1002. }
  1003. /* unlinking that should always work. variable delay tests more
  1004. * hcd states and code paths, even with little other system load.
  1005. */
  1006. msleep(jiffies % (2 * INTERRUPT_RATE));
  1007. if (async) {
  1008. while (!completion_done(&completion)) {
  1009. retval = usb_unlink_urb(urb);
  1010. switch (retval) {
  1011. case -EBUSY:
  1012. case -EIDRM:
  1013. /* we can't unlink urbs while they're completing
  1014. * or if they've completed, and we haven't
  1015. * resubmitted. "normal" drivers would prevent
  1016. * resubmission, but since we're testing unlink
  1017. * paths, we can't.
  1018. */
  1019. ERROR(dev, "unlink retry\n");
  1020. continue;
  1021. case 0:
  1022. case -EINPROGRESS:
  1023. break;
  1024. default:
  1025. dev_err(&dev->intf->dev,
  1026. "unlink fail %d\n", retval);
  1027. return retval;
  1028. }
  1029. break;
  1030. }
  1031. } else
  1032. usb_kill_urb(urb);
  1033. wait_for_completion(&completion);
  1034. retval = urb->status;
  1035. simple_free_urb(urb);
  1036. if (async)
  1037. return (retval == -ECONNRESET) ? 0 : retval - 1000;
  1038. else
  1039. return (retval == -ENOENT || retval == -EPERM) ?
  1040. 0 : retval - 2000;
  1041. }
  1042. static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
  1043. {
  1044. int retval = 0;
  1045. /* test sync and async paths */
  1046. retval = unlink1(dev, pipe, len, 1);
  1047. if (!retval)
  1048. retval = unlink1(dev, pipe, len, 0);
  1049. return retval;
  1050. }
  1051. /*-------------------------------------------------------------------------*/
  1052. struct queued_ctx {
  1053. struct completion complete;
  1054. atomic_t pending;
  1055. unsigned num;
  1056. int status;
  1057. struct urb **urbs;
  1058. };
  1059. static void unlink_queued_callback(struct urb *urb)
  1060. {
  1061. int status = urb->status;
  1062. struct queued_ctx *ctx = urb->context;
  1063. if (ctx->status)
  1064. goto done;
  1065. if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
  1066. if (status == -ECONNRESET)
  1067. goto done;
  1068. /* What error should we report if the URB completed normally? */
  1069. }
  1070. if (status != 0)
  1071. ctx->status = status;
  1072. done:
  1073. if (atomic_dec_and_test(&ctx->pending))
  1074. complete(&ctx->complete);
  1075. }
  1076. static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
  1077. unsigned size)
  1078. {
  1079. struct queued_ctx ctx;
  1080. struct usb_device *udev = testdev_to_usbdev(dev);
  1081. void *buf;
  1082. dma_addr_t buf_dma;
  1083. int i;
  1084. int retval = -ENOMEM;
  1085. init_completion(&ctx.complete);
  1086. atomic_set(&ctx.pending, 1); /* One more than the actual value */
  1087. ctx.num = num;
  1088. ctx.status = 0;
  1089. buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
  1090. if (!buf)
  1091. return retval;
  1092. memset(buf, 0, size);
  1093. /* Allocate and init the urbs we'll queue */
  1094. ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
  1095. if (!ctx.urbs)
  1096. goto free_buf;
  1097. for (i = 0; i < num; i++) {
  1098. ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  1099. if (!ctx.urbs[i])
  1100. goto free_urbs;
  1101. usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
  1102. unlink_queued_callback, &ctx);
  1103. ctx.urbs[i]->transfer_dma = buf_dma;
  1104. ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
  1105. }
  1106. /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
  1107. for (i = 0; i < num; i++) {
  1108. atomic_inc(&ctx.pending);
  1109. retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
  1110. if (retval != 0) {
  1111. dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
  1112. i, retval);
  1113. atomic_dec(&ctx.pending);
  1114. ctx.status = retval;
  1115. break;
  1116. }
  1117. }
  1118. if (i == num) {
  1119. usb_unlink_urb(ctx.urbs[num - 4]);
  1120. usb_unlink_urb(ctx.urbs[num - 2]);
  1121. } else {
  1122. while (--i >= 0)
  1123. usb_unlink_urb(ctx.urbs[i]);
  1124. }
  1125. if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
  1126. complete(&ctx.complete);
  1127. wait_for_completion(&ctx.complete);
  1128. retval = ctx.status;
  1129. free_urbs:
  1130. for (i = 0; i < num; i++)
  1131. usb_free_urb(ctx.urbs[i]);
  1132. kfree(ctx.urbs);
  1133. free_buf:
  1134. usb_free_coherent(udev, size, buf, buf_dma);
  1135. return retval;
  1136. }
  1137. /*-------------------------------------------------------------------------*/
  1138. static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1139. {
  1140. int retval;
  1141. u16 status;
  1142. /* shouldn't look or act halted */
  1143. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1144. if (retval < 0) {
  1145. ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
  1146. ep, retval);
  1147. return retval;
  1148. }
  1149. if (status != 0) {
  1150. ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
  1151. return -EINVAL;
  1152. }
  1153. retval = simple_io(tdev, urb, 1, 0, 0, __func__);
  1154. if (retval != 0)
  1155. return -EINVAL;
  1156. return 0;
  1157. }
  1158. static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1159. {
  1160. int retval;
  1161. u16 status;
  1162. /* should look and act halted */
  1163. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1164. if (retval < 0) {
  1165. ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
  1166. ep, retval);
  1167. return retval;
  1168. }
  1169. le16_to_cpus(&status);
  1170. if (status != 1) {
  1171. ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
  1172. return -EINVAL;
  1173. }
  1174. retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
  1175. if (retval != -EPIPE)
  1176. return -EINVAL;
  1177. retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
  1178. if (retval != -EPIPE)
  1179. return -EINVAL;
  1180. return 0;
  1181. }
  1182. static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1183. {
  1184. int retval;
  1185. /* shouldn't look or act halted now */
  1186. retval = verify_not_halted(tdev, ep, urb);
  1187. if (retval < 0)
  1188. return retval;
  1189. /* set halt (protocol test only), verify it worked */
  1190. retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
  1191. USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
  1192. USB_ENDPOINT_HALT, ep,
  1193. NULL, 0, USB_CTRL_SET_TIMEOUT);
  1194. if (retval < 0) {
  1195. ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
  1196. return retval;
  1197. }
  1198. retval = verify_halted(tdev, ep, urb);
  1199. if (retval < 0)
  1200. return retval;
  1201. /* clear halt (tests API + protocol), verify it worked */
  1202. retval = usb_clear_halt(urb->dev, urb->pipe);
  1203. if (retval < 0) {
  1204. ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
  1205. return retval;
  1206. }
  1207. retval = verify_not_halted(tdev, ep, urb);
  1208. if (retval < 0)
  1209. return retval;
  1210. /* NOTE: could also verify SET_INTERFACE clear halts ... */
  1211. return 0;
  1212. }
  1213. static int halt_simple(struct usbtest_dev *dev)
  1214. {
  1215. int ep;
  1216. int retval = 0;
  1217. struct urb *urb;
  1218. urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
  1219. if (urb == NULL)
  1220. return -ENOMEM;
  1221. if (dev->in_pipe) {
  1222. ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
  1223. urb->pipe = dev->in_pipe;
  1224. retval = test_halt(dev, ep, urb);
  1225. if (retval < 0)
  1226. goto done;
  1227. }
  1228. if (dev->out_pipe) {
  1229. ep = usb_pipeendpoint(dev->out_pipe);
  1230. urb->pipe = dev->out_pipe;
  1231. retval = test_halt(dev, ep, urb);
  1232. }
  1233. done:
  1234. simple_free_urb(urb);
  1235. return retval;
  1236. }
  1237. /*-------------------------------------------------------------------------*/
  1238. /* Control OUT tests use the vendor control requests from Intel's
  1239. * USB 2.0 compliance test device: write a buffer, read it back.
  1240. *
  1241. * Intel's spec only _requires_ that it work for one packet, which
  1242. * is pretty weak. Some HCDs place limits here; most devices will
  1243. * need to be able to handle more than one OUT data packet. We'll
  1244. * try whatever we're told to try.
  1245. */
  1246. static int ctrl_out(struct usbtest_dev *dev,
  1247. unsigned count, unsigned length, unsigned vary, unsigned offset)
  1248. {
  1249. unsigned i, j, len;
  1250. int retval;
  1251. u8 *buf;
  1252. char *what = "?";
  1253. struct usb_device *udev;
  1254. if (length < 1 || length > 0xffff || vary >= length)
  1255. return -EINVAL;
  1256. buf = kmalloc(length + offset, GFP_KERNEL);
  1257. if (!buf)
  1258. return -ENOMEM;
  1259. buf += offset;
  1260. udev = testdev_to_usbdev(dev);
  1261. len = length;
  1262. retval = 0;
  1263. /* NOTE: hardware might well act differently if we pushed it
  1264. * with lots back-to-back queued requests.
  1265. */
  1266. for (i = 0; i < count; i++) {
  1267. /* write patterned data */
  1268. for (j = 0; j < len; j++)
  1269. buf[j] = i + j;
  1270. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  1271. 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
  1272. 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
  1273. if (retval != len) {
  1274. what = "write";
  1275. if (retval >= 0) {
  1276. ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
  1277. retval, len);
  1278. retval = -EBADMSG;
  1279. }
  1280. break;
  1281. }
  1282. /* read it back -- assuming nothing intervened!! */
  1283. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  1284. 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
  1285. 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
  1286. if (retval != len) {
  1287. what = "read";
  1288. if (retval >= 0) {
  1289. ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
  1290. retval, len);
  1291. retval = -EBADMSG;
  1292. }
  1293. break;
  1294. }
  1295. /* fail if we can't verify */
  1296. for (j = 0; j < len; j++) {
  1297. if (buf[j] != (u8) (i + j)) {
  1298. ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
  1299. j, buf[j], (u8) i + j);
  1300. retval = -EBADMSG;
  1301. break;
  1302. }
  1303. }
  1304. if (retval < 0) {
  1305. what = "verify";
  1306. break;
  1307. }
  1308. len += vary;
  1309. /* [real world] the "zero bytes IN" case isn't really used.
  1310. * hardware can easily trip up in this weird case, since its
  1311. * status stage is IN, not OUT like other ep0in transfers.
  1312. */
  1313. if (len > length)
  1314. len = realworld ? 1 : 0;
  1315. }
  1316. if (retval < 0)
  1317. ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
  1318. what, retval, i);
  1319. kfree(buf - offset);
  1320. return retval;
  1321. }
  1322. /*-------------------------------------------------------------------------*/
  1323. /* ISO tests ... mimics common usage
  1324. * - buffer length is split into N packets (mostly maxpacket sized)
  1325. * - multi-buffers according to sglen
  1326. */
  1327. struct iso_context {
  1328. unsigned count;
  1329. unsigned pending;
  1330. spinlock_t lock;
  1331. struct completion done;
  1332. int submit_error;
  1333. unsigned long errors;
  1334. unsigned long packet_count;
  1335. struct usbtest_dev *dev;
  1336. };
  1337. static void iso_callback(struct urb *urb)
  1338. {
  1339. struct iso_context *ctx = urb->context;
  1340. spin_lock(&ctx->lock);
  1341. ctx->count--;
  1342. ctx->packet_count += urb->number_of_packets;
  1343. if (urb->error_count > 0)
  1344. ctx->errors += urb->error_count;
  1345. else if (urb->status != 0)
  1346. ctx->errors += urb->number_of_packets;
  1347. else if (urb->actual_length != urb->transfer_buffer_length)
  1348. ctx->errors++;
  1349. else if (check_guard_bytes(ctx->dev, urb) != 0)
  1350. ctx->errors++;
  1351. if (urb->status == 0 && ctx->count > (ctx->pending - 1)
  1352. && !ctx->submit_error) {
  1353. int status = usb_submit_urb(urb, GFP_ATOMIC);
  1354. switch (status) {
  1355. case 0:
  1356. goto done;
  1357. default:
  1358. dev_err(&ctx->dev->intf->dev,
  1359. "iso resubmit err %d\n",
  1360. status);
  1361. /* FALLTHROUGH */
  1362. case -ENODEV: /* disconnected */
  1363. case -ESHUTDOWN: /* endpoint disabled */
  1364. ctx->submit_error = 1;
  1365. break;
  1366. }
  1367. }
  1368. ctx->pending--;
  1369. if (ctx->pending == 0) {
  1370. if (ctx->errors)
  1371. dev_err(&ctx->dev->intf->dev,
  1372. "iso test, %lu errors out of %lu\n",
  1373. ctx->errors, ctx->packet_count);
  1374. complete(&ctx->done);
  1375. }
  1376. done:
  1377. spin_unlock(&ctx->lock);
  1378. }
  1379. static struct urb *iso_alloc_urb(
  1380. struct usb_device *udev,
  1381. int pipe,
  1382. struct usb_endpoint_descriptor *desc,
  1383. long bytes,
  1384. unsigned offset
  1385. )
  1386. {
  1387. struct urb *urb;
  1388. unsigned i, maxp, packets;
  1389. if (bytes < 0 || !desc)
  1390. return NULL;
  1391. maxp = 0x7ff & usb_endpoint_maxp(desc);
  1392. maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
  1393. packets = DIV_ROUND_UP(bytes, maxp);
  1394. urb = usb_alloc_urb(packets, GFP_KERNEL);
  1395. if (!urb)
  1396. return urb;
  1397. urb->dev = udev;
  1398. urb->pipe = pipe;
  1399. urb->number_of_packets = packets;
  1400. urb->transfer_buffer_length = bytes;
  1401. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  1402. GFP_KERNEL,
  1403. &urb->transfer_dma);
  1404. if (!urb->transfer_buffer) {
  1405. usb_free_urb(urb);
  1406. return NULL;
  1407. }
  1408. if (offset) {
  1409. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  1410. urb->transfer_buffer += offset;
  1411. urb->transfer_dma += offset;
  1412. }
  1413. /* For inbound transfers use guard byte so that test fails if
  1414. data not correctly copied */
  1415. memset(urb->transfer_buffer,
  1416. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  1417. bytes);
  1418. for (i = 0; i < packets; i++) {
  1419. /* here, only the last packet will be short */
  1420. urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
  1421. bytes -= urb->iso_frame_desc[i].length;
  1422. urb->iso_frame_desc[i].offset = maxp * i;
  1423. }
  1424. urb->complete = iso_callback;
  1425. /* urb->context = SET BY CALLER */
  1426. urb->interval = 1 << (desc->bInterval - 1);
  1427. urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
  1428. return urb;
  1429. }
  1430. static int
  1431. test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
  1432. int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
  1433. {
  1434. struct iso_context context;
  1435. struct usb_device *udev;
  1436. unsigned i;
  1437. unsigned long packets = 0;
  1438. int status = 0;
  1439. struct urb *urbs[10]; /* FIXME no limit */
  1440. if (param->sglen > 10)
  1441. return -EDOM;
  1442. memset(&context, 0, sizeof context);
  1443. context.count = param->iterations * param->sglen;
  1444. context.dev = dev;
  1445. init_completion(&context.done);
  1446. spin_lock_init(&context.lock);
  1447. memset(urbs, 0, sizeof urbs);
  1448. udev = testdev_to_usbdev(dev);
  1449. dev_info(&dev->intf->dev,
  1450. "... iso period %d %sframes, wMaxPacket %04x\n",
  1451. 1 << (desc->bInterval - 1),
  1452. (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
  1453. usb_endpoint_maxp(desc));
  1454. for (i = 0; i < param->sglen; i++) {
  1455. urbs[i] = iso_alloc_urb(udev, pipe, desc,
  1456. param->length, offset);
  1457. if (!urbs[i]) {
  1458. status = -ENOMEM;
  1459. goto fail;
  1460. }
  1461. packets += urbs[i]->number_of_packets;
  1462. urbs[i]->context = &context;
  1463. }
  1464. packets *= param->iterations;
  1465. dev_info(&dev->intf->dev,
  1466. "... total %lu msec (%lu packets)\n",
  1467. (packets * (1 << (desc->bInterval - 1)))
  1468. / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
  1469. packets);
  1470. spin_lock_irq(&context.lock);
  1471. for (i = 0; i < param->sglen; i++) {
  1472. ++context.pending;
  1473. status = usb_submit_urb(urbs[i], GFP_ATOMIC);
  1474. if (status < 0) {
  1475. ERROR(dev, "submit iso[%d], error %d\n", i, status);
  1476. if (i == 0) {
  1477. spin_unlock_irq(&context.lock);
  1478. goto fail;
  1479. }
  1480. simple_free_urb(urbs[i]);
  1481. urbs[i] = NULL;
  1482. context.pending--;
  1483. context.submit_error = 1;
  1484. break;
  1485. }
  1486. }
  1487. spin_unlock_irq(&context.lock);
  1488. wait_for_completion(&context.done);
  1489. for (i = 0; i < param->sglen; i++) {
  1490. if (urbs[i])
  1491. simple_free_urb(urbs[i]);
  1492. }
  1493. /*
  1494. * Isochronous transfers are expected to fail sometimes. As an
  1495. * arbitrary limit, we will report an error if any submissions
  1496. * fail or if the transfer failure rate is > 10%.
  1497. */
  1498. if (status != 0)
  1499. ;
  1500. else if (context.submit_error)
  1501. status = -EACCES;
  1502. else if (context.errors > context.packet_count / 10)
  1503. status = -EIO;
  1504. return status;
  1505. fail:
  1506. for (i = 0; i < param->sglen; i++) {
  1507. if (urbs[i])
  1508. simple_free_urb(urbs[i]);
  1509. }
  1510. return status;
  1511. }
  1512. static int test_unaligned_bulk(
  1513. struct usbtest_dev *tdev,
  1514. int pipe,
  1515. unsigned length,
  1516. int iterations,
  1517. unsigned transfer_flags,
  1518. const char *label)
  1519. {
  1520. int retval;
  1521. struct urb *urb = usbtest_alloc_urb(
  1522. testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
  1523. if (!urb)
  1524. return -ENOMEM;
  1525. retval = simple_io(tdev, urb, iterations, 0, 0, label);
  1526. simple_free_urb(urb);
  1527. return retval;
  1528. }
  1529. /*-------------------------------------------------------------------------*/
  1530. /* We only have this one interface to user space, through usbfs.
  1531. * User mode code can scan usbfs to find N different devices (maybe on
  1532. * different busses) to use when testing, and allocate one thread per
  1533. * test. So discovery is simplified, and we have no device naming issues.
  1534. *
  1535. * Don't use these only as stress/load tests. Use them along with with
  1536. * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
  1537. * video capture, and so on. Run different tests at different times, in
  1538. * different sequences. Nothing here should interact with other devices,
  1539. * except indirectly by consuming USB bandwidth and CPU resources for test
  1540. * threads and request completion. But the only way to know that for sure
  1541. * is to test when HC queues are in use by many devices.
  1542. *
  1543. * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
  1544. * it locks out usbcore in certain code paths. Notably, if you disconnect
  1545. * the device-under-test, khubd will wait block forever waiting for the
  1546. * ioctl to complete ... so that usb_disconnect() can abort the pending
  1547. * urbs and then call usbtest_disconnect(). To abort a test, you're best
  1548. * off just killing the userspace task and waiting for it to exit.
  1549. */
  1550. static int
  1551. usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
  1552. {
  1553. struct usbtest_dev *dev = usb_get_intfdata(intf);
  1554. struct usb_device *udev = testdev_to_usbdev(dev);
  1555. struct usbtest_param *param = buf;
  1556. int retval = -EOPNOTSUPP;
  1557. struct urb *urb;
  1558. struct scatterlist *sg;
  1559. struct usb_sg_request req;
  1560. struct timeval start;
  1561. unsigned i;
  1562. /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
  1563. pattern = mod_pattern;
  1564. if (code != USBTEST_REQUEST)
  1565. return -EOPNOTSUPP;
  1566. if (param->iterations <= 0)
  1567. return -EINVAL;
  1568. if (mutex_lock_interruptible(&dev->lock))
  1569. return -ERESTARTSYS;
  1570. /* FIXME: What if a system sleep starts while a test is running? */
  1571. /* some devices, like ez-usb default devices, need a non-default
  1572. * altsetting to have any active endpoints. some tests change
  1573. * altsettings; force a default so most tests don't need to check.
  1574. */
  1575. if (dev->info->alt >= 0) {
  1576. int res;
  1577. if (intf->altsetting->desc.bInterfaceNumber) {
  1578. mutex_unlock(&dev->lock);
  1579. return -ENODEV;
  1580. }
  1581. res = set_altsetting(dev, dev->info->alt);
  1582. if (res) {
  1583. dev_err(&intf->dev,
  1584. "set altsetting to %d failed, %d\n",
  1585. dev->info->alt, res);
  1586. mutex_unlock(&dev->lock);
  1587. return res;
  1588. }
  1589. }
  1590. /*
  1591. * Just a bunch of test cases that every HCD is expected to handle.
  1592. *
  1593. * Some may need specific firmware, though it'd be good to have
  1594. * one firmware image to handle all the test cases.
  1595. *
  1596. * FIXME add more tests! cancel requests, verify the data, control
  1597. * queueing, concurrent read+write threads, and so on.
  1598. */
  1599. do_gettimeofday(&start);
  1600. switch (param->test_num) {
  1601. case 0:
  1602. dev_info(&intf->dev, "TEST 0: NOP\n");
  1603. retval = 0;
  1604. break;
  1605. /* Simple non-queued bulk I/O tests */
  1606. case 1:
  1607. if (dev->out_pipe == 0)
  1608. break;
  1609. dev_info(&intf->dev,
  1610. "TEST 1: write %d bytes %u times\n",
  1611. param->length, param->iterations);
  1612. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1613. if (!urb) {
  1614. retval = -ENOMEM;
  1615. break;
  1616. }
  1617. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1618. retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
  1619. simple_free_urb(urb);
  1620. break;
  1621. case 2:
  1622. if (dev->in_pipe == 0)
  1623. break;
  1624. dev_info(&intf->dev,
  1625. "TEST 2: read %d bytes %u times\n",
  1626. param->length, param->iterations);
  1627. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1628. if (!urb) {
  1629. retval = -ENOMEM;
  1630. break;
  1631. }
  1632. /* FIRMWARE: bulk source (maybe generates short writes) */
  1633. retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
  1634. simple_free_urb(urb);
  1635. break;
  1636. case 3:
  1637. if (dev->out_pipe == 0 || param->vary == 0)
  1638. break;
  1639. dev_info(&intf->dev,
  1640. "TEST 3: write/%d 0..%d bytes %u times\n",
  1641. param->vary, param->length, param->iterations);
  1642. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1643. if (!urb) {
  1644. retval = -ENOMEM;
  1645. break;
  1646. }
  1647. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1648. retval = simple_io(dev, urb, param->iterations, param->vary,
  1649. 0, "test3");
  1650. simple_free_urb(urb);
  1651. break;
  1652. case 4:
  1653. if (dev->in_pipe == 0 || param->vary == 0)
  1654. break;
  1655. dev_info(&intf->dev,
  1656. "TEST 4: read/%d 0..%d bytes %u times\n",
  1657. param->vary, param->length, param->iterations);
  1658. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1659. if (!urb) {
  1660. retval = -ENOMEM;
  1661. break;
  1662. }
  1663. /* FIRMWARE: bulk source (maybe generates short writes) */
  1664. retval = simple_io(dev, urb, param->iterations, param->vary,
  1665. 0, "test4");
  1666. simple_free_urb(urb);
  1667. break;
  1668. /* Queued bulk I/O tests */
  1669. case 5:
  1670. if (dev->out_pipe == 0 || param->sglen == 0)
  1671. break;
  1672. dev_info(&intf->dev,
  1673. "TEST 5: write %d sglists %d entries of %d bytes\n",
  1674. param->iterations,
  1675. param->sglen, param->length);
  1676. sg = alloc_sglist(param->sglen, param->length, 0);
  1677. if (!sg) {
  1678. retval = -ENOMEM;
  1679. break;
  1680. }
  1681. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1682. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1683. &req, sg, param->sglen);
  1684. free_sglist(sg, param->sglen);
  1685. break;
  1686. case 6:
  1687. if (dev->in_pipe == 0 || param->sglen == 0)
  1688. break;
  1689. dev_info(&intf->dev,
  1690. "TEST 6: read %d sglists %d entries of %d bytes\n",
  1691. param->iterations,
  1692. param->sglen, param->length);
  1693. sg = alloc_sglist(param->sglen, param->length, 0);
  1694. if (!sg) {
  1695. retval = -ENOMEM;
  1696. break;
  1697. }
  1698. /* FIRMWARE: bulk source (maybe generates short writes) */
  1699. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1700. &req, sg, param->sglen);
  1701. free_sglist(sg, param->sglen);
  1702. break;
  1703. case 7:
  1704. if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1705. break;
  1706. dev_info(&intf->dev,
  1707. "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
  1708. param->vary, param->iterations,
  1709. param->sglen, param->length);
  1710. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1711. if (!sg) {
  1712. retval = -ENOMEM;
  1713. break;
  1714. }
  1715. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1716. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1717. &req, sg, param->sglen);
  1718. free_sglist(sg, param->sglen);
  1719. break;
  1720. case 8:
  1721. if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1722. break;
  1723. dev_info(&intf->dev,
  1724. "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
  1725. param->vary, param->iterations,
  1726. param->sglen, param->length);
  1727. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1728. if (!sg) {
  1729. retval = -ENOMEM;
  1730. break;
  1731. }
  1732. /* FIRMWARE: bulk source (maybe generates short writes) */
  1733. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1734. &req, sg, param->sglen);
  1735. free_sglist(sg, param->sglen);
  1736. break;
  1737. /* non-queued sanity tests for control (chapter 9 subset) */
  1738. case 9:
  1739. retval = 0;
  1740. dev_info(&intf->dev,
  1741. "TEST 9: ch9 (subset) control tests, %d times\n",
  1742. param->iterations);
  1743. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1744. retval = ch9_postconfig(dev);
  1745. if (retval)
  1746. dev_err(&intf->dev, "ch9 subset failed, "
  1747. "iterations left %d\n", i);
  1748. break;
  1749. /* queued control messaging */
  1750. case 10:
  1751. retval = 0;
  1752. dev_info(&intf->dev,
  1753. "TEST 10: queue %d control calls, %d times\n",
  1754. param->sglen,
  1755. param->iterations);
  1756. retval = test_ctrl_queue(dev, param);
  1757. break;
  1758. /* simple non-queued unlinks (ring with one urb) */
  1759. case 11:
  1760. if (dev->in_pipe == 0 || !param->length)
  1761. break;
  1762. retval = 0;
  1763. dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
  1764. param->iterations, param->length);
  1765. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1766. retval = unlink_simple(dev, dev->in_pipe,
  1767. param->length);
  1768. if (retval)
  1769. dev_err(&intf->dev, "unlink reads failed %d, "
  1770. "iterations left %d\n", retval, i);
  1771. break;
  1772. case 12:
  1773. if (dev->out_pipe == 0 || !param->length)
  1774. break;
  1775. retval = 0;
  1776. dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
  1777. param->iterations, param->length);
  1778. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1779. retval = unlink_simple(dev, dev->out_pipe,
  1780. param->length);
  1781. if (retval)
  1782. dev_err(&intf->dev, "unlink writes failed %d, "
  1783. "iterations left %d\n", retval, i);
  1784. break;
  1785. /* ep halt tests */
  1786. case 13:
  1787. if (dev->out_pipe == 0 && dev->in_pipe == 0)
  1788. break;
  1789. retval = 0;
  1790. dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
  1791. param->iterations);
  1792. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1793. retval = halt_simple(dev);
  1794. if (retval)
  1795. ERROR(dev, "halts failed, iterations left %d\n", i);
  1796. break;
  1797. /* control write tests */
  1798. case 14:
  1799. if (!dev->info->ctrl_out)
  1800. break;
  1801. dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
  1802. param->iterations,
  1803. realworld ? 1 : 0, param->length,
  1804. param->vary);
  1805. retval = ctrl_out(dev, param->iterations,
  1806. param->length, param->vary, 0);
  1807. break;
  1808. /* iso write tests */
  1809. case 15:
  1810. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1811. break;
  1812. dev_info(&intf->dev,
  1813. "TEST 15: write %d iso, %d entries of %d bytes\n",
  1814. param->iterations,
  1815. param->sglen, param->length);
  1816. /* FIRMWARE: iso sink */
  1817. retval = test_iso_queue(dev, param,
  1818. dev->out_iso_pipe, dev->iso_out, 0);
  1819. break;
  1820. /* iso read tests */
  1821. case 16:
  1822. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1823. break;
  1824. dev_info(&intf->dev,
  1825. "TEST 16: read %d iso, %d entries of %d bytes\n",
  1826. param->iterations,
  1827. param->sglen, param->length);
  1828. /* FIRMWARE: iso source */
  1829. retval = test_iso_queue(dev, param,
  1830. dev->in_iso_pipe, dev->iso_in, 0);
  1831. break;
  1832. /* FIXME scatterlist cancel (needs helper thread) */
  1833. /* Tests for bulk I/O using DMA mapping by core and odd address */
  1834. case 17:
  1835. if (dev->out_pipe == 0)
  1836. break;
  1837. dev_info(&intf->dev,
  1838. "TEST 17: write odd addr %d bytes %u times core map\n",
  1839. param->length, param->iterations);
  1840. retval = test_unaligned_bulk(
  1841. dev, dev->out_pipe,
  1842. param->length, param->iterations,
  1843. 0, "test17");
  1844. break;
  1845. case 18:
  1846. if (dev->in_pipe == 0)
  1847. break;
  1848. dev_info(&intf->dev,
  1849. "TEST 18: read odd addr %d bytes %u times core map\n",
  1850. param->length, param->iterations);
  1851. retval = test_unaligned_bulk(
  1852. dev, dev->in_pipe,
  1853. param->length, param->iterations,
  1854. 0, "test18");
  1855. break;
  1856. /* Tests for bulk I/O using premapped coherent buffer and odd address */
  1857. case 19:
  1858. if (dev->out_pipe == 0)
  1859. break;
  1860. dev_info(&intf->dev,
  1861. "TEST 19: write odd addr %d bytes %u times premapped\n",
  1862. param->length, param->iterations);
  1863. retval = test_unaligned_bulk(
  1864. dev, dev->out_pipe,
  1865. param->length, param->iterations,
  1866. URB_NO_TRANSFER_DMA_MAP, "test19");
  1867. break;
  1868. case 20:
  1869. if (dev->in_pipe == 0)
  1870. break;
  1871. dev_info(&intf->dev,
  1872. "TEST 20: read odd addr %d bytes %u times premapped\n",
  1873. param->length, param->iterations);
  1874. retval = test_unaligned_bulk(
  1875. dev, dev->in_pipe,
  1876. param->length, param->iterations,
  1877. URB_NO_TRANSFER_DMA_MAP, "test20");
  1878. break;
  1879. /* control write tests with unaligned buffer */
  1880. case 21:
  1881. if (!dev->info->ctrl_out)
  1882. break;
  1883. dev_info(&intf->dev,
  1884. "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
  1885. param->iterations,
  1886. realworld ? 1 : 0, param->length,
  1887. param->vary);
  1888. retval = ctrl_out(dev, param->iterations,
  1889. param->length, param->vary, 1);
  1890. break;
  1891. /* unaligned iso tests */
  1892. case 22:
  1893. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1894. break;
  1895. dev_info(&intf->dev,
  1896. "TEST 22: write %d iso odd, %d entries of %d bytes\n",
  1897. param->iterations,
  1898. param->sglen, param->length);
  1899. retval = test_iso_queue(dev, param,
  1900. dev->out_iso_pipe, dev->iso_out, 1);
  1901. break;
  1902. case 23:
  1903. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1904. break;
  1905. dev_info(&intf->dev,
  1906. "TEST 23: read %d iso odd, %d entries of %d bytes\n",
  1907. param->iterations,
  1908. param->sglen, param->length);
  1909. retval = test_iso_queue(dev, param,
  1910. dev->in_iso_pipe, dev->iso_in, 1);
  1911. break;
  1912. /* unlink URBs from a bulk-OUT queue */
  1913. case 24:
  1914. if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
  1915. break;
  1916. retval = 0;
  1917. dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
  1918. "%d %d-byte writes\n",
  1919. param->iterations, param->sglen, param->length);
  1920. for (i = param->iterations; retval == 0 && i > 0; --i) {
  1921. retval = unlink_queued(dev, dev->out_pipe,
  1922. param->sglen, param->length);
  1923. if (retval) {
  1924. dev_err(&intf->dev,
  1925. "unlink queued writes failed %d, "
  1926. "iterations left %d\n", retval, i);
  1927. break;
  1928. }
  1929. }
  1930. break;
  1931. }
  1932. do_gettimeofday(&param->duration);
  1933. param->duration.tv_sec -= start.tv_sec;
  1934. param->duration.tv_usec -= start.tv_usec;
  1935. if (param->duration.tv_usec < 0) {
  1936. param->duration.tv_usec += 1000 * 1000;
  1937. param->duration.tv_sec -= 1;
  1938. }
  1939. mutex_unlock(&dev->lock);
  1940. return retval;
  1941. }
  1942. /*-------------------------------------------------------------------------*/
  1943. static unsigned force_interrupt;
  1944. module_param(force_interrupt, uint, 0);
  1945. MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
  1946. #ifdef GENERIC
  1947. static unsigned short vendor;
  1948. module_param(vendor, ushort, 0);
  1949. MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
  1950. static unsigned short product;
  1951. module_param(product, ushort, 0);
  1952. MODULE_PARM_DESC(product, "product code (from vendor)");
  1953. #endif
  1954. static int
  1955. usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
  1956. {
  1957. struct usb_device *udev;
  1958. struct usbtest_dev *dev;
  1959. struct usbtest_info *info;
  1960. char *rtest, *wtest;
  1961. char *irtest, *iwtest;
  1962. udev = interface_to_usbdev(intf);
  1963. #ifdef GENERIC
  1964. /* specify devices by module parameters? */
  1965. if (id->match_flags == 0) {
  1966. /* vendor match required, product match optional */
  1967. if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
  1968. return -ENODEV;
  1969. if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
  1970. return -ENODEV;
  1971. dev_info(&intf->dev, "matched module params, "
  1972. "vend=0x%04x prod=0x%04x\n",
  1973. le16_to_cpu(udev->descriptor.idVendor),
  1974. le16_to_cpu(udev->descriptor.idProduct));
  1975. }
  1976. #endif
  1977. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1978. if (!dev)
  1979. return -ENOMEM;
  1980. info = (struct usbtest_info *) id->driver_info;
  1981. dev->info = info;
  1982. mutex_init(&dev->lock);
  1983. dev->intf = intf;
  1984. /* cacheline-aligned scratch for i/o */
  1985. dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
  1986. if (dev->buf == NULL) {
  1987. kfree(dev);
  1988. return -ENOMEM;
  1989. }
  1990. /* NOTE this doesn't yet test the handful of difference that are
  1991. * visible with high speed interrupts: bigger maxpacket (1K) and
  1992. * "high bandwidth" modes (up to 3 packets/uframe).
  1993. */
  1994. rtest = wtest = "";
  1995. irtest = iwtest = "";
  1996. if (force_interrupt || udev->speed == USB_SPEED_LOW) {
  1997. if (info->ep_in) {
  1998. dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
  1999. rtest = " intr-in";
  2000. }
  2001. if (info->ep_out) {
  2002. dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
  2003. wtest = " intr-out";
  2004. }
  2005. } else {
  2006. if (info->autoconf) {
  2007. int status;
  2008. status = get_endpoints(dev, intf);
  2009. if (status < 0) {
  2010. WARNING(dev, "couldn't get endpoints, %d\n",
  2011. status);
  2012. return status;
  2013. }
  2014. /* may find bulk or ISO pipes */
  2015. } else {
  2016. if (info->ep_in)
  2017. dev->in_pipe = usb_rcvbulkpipe(udev,
  2018. info->ep_in);
  2019. if (info->ep_out)
  2020. dev->out_pipe = usb_sndbulkpipe(udev,
  2021. info->ep_out);
  2022. }
  2023. if (dev->in_pipe)
  2024. rtest = " bulk-in";
  2025. if (dev->out_pipe)
  2026. wtest = " bulk-out";
  2027. if (dev->in_iso_pipe)
  2028. irtest = " iso-in";
  2029. if (dev->out_iso_pipe)
  2030. iwtest = " iso-out";
  2031. }
  2032. usb_set_intfdata(intf, dev);
  2033. dev_info(&intf->dev, "%s\n", info->name);
  2034. dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
  2035. usb_speed_string(udev->speed),
  2036. info->ctrl_out ? " in/out" : "",
  2037. rtest, wtest,
  2038. irtest, iwtest,
  2039. info->alt >= 0 ? " (+alt)" : "");
  2040. return 0;
  2041. }
  2042. static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
  2043. {
  2044. return 0;
  2045. }
  2046. static int usbtest_resume(struct usb_interface *intf)
  2047. {
  2048. return 0;
  2049. }
  2050. static void usbtest_disconnect(struct usb_interface *intf)
  2051. {
  2052. struct usbtest_dev *dev = usb_get_intfdata(intf);
  2053. usb_set_intfdata(intf, NULL);
  2054. dev_dbg(&intf->dev, "disconnect\n");
  2055. kfree(dev);
  2056. }
  2057. /* Basic testing only needs a device that can source or sink bulk traffic.
  2058. * Any device can test control transfers (default with GENERIC binding).
  2059. *
  2060. * Several entries work with the default EP0 implementation that's built
  2061. * into EZ-USB chips. There's a default vendor ID which can be overridden
  2062. * by (very) small config EEPROMS, but otherwise all these devices act
  2063. * identically until firmware is loaded: only EP0 works. It turns out
  2064. * to be easy to make other endpoints work, without modifying that EP0
  2065. * behavior. For now, we expect that kind of firmware.
  2066. */
  2067. /* an21xx or fx versions of ez-usb */
  2068. static struct usbtest_info ez1_info = {
  2069. .name = "EZ-USB device",
  2070. .ep_in = 2,
  2071. .ep_out = 2,
  2072. .alt = 1,
  2073. };
  2074. /* fx2 version of ez-usb */
  2075. static struct usbtest_info ez2_info = {
  2076. .name = "FX2 device",
  2077. .ep_in = 6,
  2078. .ep_out = 2,
  2079. .alt = 1,
  2080. };
  2081. /* ezusb family device with dedicated usb test firmware,
  2082. */
  2083. static struct usbtest_info fw_info = {
  2084. .name = "usb test device",
  2085. .ep_in = 2,
  2086. .ep_out = 2,
  2087. .alt = 1,
  2088. .autoconf = 1, /* iso and ctrl_out need autoconf */
  2089. .ctrl_out = 1,
  2090. .iso = 1, /* iso_ep's are #8 in/out */
  2091. };
  2092. /* peripheral running Linux and 'zero.c' test firmware, or
  2093. * its user-mode cousin. different versions of this use
  2094. * different hardware with the same vendor/product codes.
  2095. * host side MUST rely on the endpoint descriptors.
  2096. */
  2097. static struct usbtest_info gz_info = {
  2098. .name = "Linux gadget zero",
  2099. .autoconf = 1,
  2100. .ctrl_out = 1,
  2101. .alt = 0,
  2102. };
  2103. static struct usbtest_info um_info = {
  2104. .name = "Linux user mode test driver",
  2105. .autoconf = 1,
  2106. .alt = -1,
  2107. };
  2108. static struct usbtest_info um2_info = {
  2109. .name = "Linux user mode ISO test driver",
  2110. .autoconf = 1,
  2111. .iso = 1,
  2112. .alt = -1,
  2113. };
  2114. #ifdef IBOT2
  2115. /* this is a nice source of high speed bulk data;
  2116. * uses an FX2, with firmware provided in the device
  2117. */
  2118. static struct usbtest_info ibot2_info = {
  2119. .name = "iBOT2 webcam",
  2120. .ep_in = 2,
  2121. .alt = -1,
  2122. };
  2123. #endif
  2124. #ifdef GENERIC
  2125. /* we can use any device to test control traffic */
  2126. static struct usbtest_info generic_info = {
  2127. .name = "Generic USB device",
  2128. .alt = -1,
  2129. };
  2130. #endif
  2131. static const struct usb_device_id id_table[] = {
  2132. /*-------------------------------------------------------------*/
  2133. /* EZ-USB devices which download firmware to replace (or in our
  2134. * case augment) the default device implementation.
  2135. */
  2136. /* generic EZ-USB FX controller */
  2137. { USB_DEVICE(0x0547, 0x2235),
  2138. .driver_info = (unsigned long) &ez1_info,
  2139. },
  2140. /* CY3671 development board with EZ-USB FX */
  2141. { USB_DEVICE(0x0547, 0x0080),
  2142. .driver_info = (unsigned long) &ez1_info,
  2143. },
  2144. /* generic EZ-USB FX2 controller (or development board) */
  2145. { USB_DEVICE(0x04b4, 0x8613),
  2146. .driver_info = (unsigned long) &ez2_info,
  2147. },
  2148. /* re-enumerated usb test device firmware */
  2149. { USB_DEVICE(0xfff0, 0xfff0),
  2150. .driver_info = (unsigned long) &fw_info,
  2151. },
  2152. /* "Gadget Zero" firmware runs under Linux */
  2153. { USB_DEVICE(0x0525, 0xa4a0),
  2154. .driver_info = (unsigned long) &gz_info,
  2155. },
  2156. /* so does a user-mode variant */
  2157. { USB_DEVICE(0x0525, 0xa4a4),
  2158. .driver_info = (unsigned long) &um_info,
  2159. },
  2160. /* ... and a user-mode variant that talks iso */
  2161. { USB_DEVICE(0x0525, 0xa4a3),
  2162. .driver_info = (unsigned long) &um2_info,
  2163. },
  2164. #ifdef KEYSPAN_19Qi
  2165. /* Keyspan 19qi uses an21xx (original EZ-USB) */
  2166. /* this does not coexist with the real Keyspan 19qi driver! */
  2167. { USB_DEVICE(0x06cd, 0x010b),
  2168. .driver_info = (unsigned long) &ez1_info,
  2169. },
  2170. #endif
  2171. /*-------------------------------------------------------------*/
  2172. #ifdef IBOT2
  2173. /* iBOT2 makes a nice source of high speed bulk-in data */
  2174. /* this does not coexist with a real iBOT2 driver! */
  2175. { USB_DEVICE(0x0b62, 0x0059),
  2176. .driver_info = (unsigned long) &ibot2_info,
  2177. },
  2178. #endif
  2179. /*-------------------------------------------------------------*/
  2180. #ifdef GENERIC
  2181. /* module params can specify devices to use for control tests */
  2182. { .driver_info = (unsigned long) &generic_info, },
  2183. #endif
  2184. /*-------------------------------------------------------------*/
  2185. { }
  2186. };
  2187. MODULE_DEVICE_TABLE(usb, id_table);
  2188. static struct usb_driver usbtest_driver = {
  2189. .name = "usbtest",
  2190. .id_table = id_table,
  2191. .probe = usbtest_probe,
  2192. .unlocked_ioctl = usbtest_ioctl,
  2193. .disconnect = usbtest_disconnect,
  2194. .suspend = usbtest_suspend,
  2195. .resume = usbtest_resume,
  2196. };
  2197. /*-------------------------------------------------------------------------*/
  2198. static int __init usbtest_init(void)
  2199. {
  2200. #ifdef GENERIC
  2201. if (vendor)
  2202. pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
  2203. #endif
  2204. return usb_register(&usbtest_driver);
  2205. }
  2206. module_init(usbtest_init);
  2207. static void __exit usbtest_exit(void)
  2208. {
  2209. usb_deregister(&usbtest_driver);
  2210. }
  2211. module_exit(usbtest_exit);
  2212. MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
  2213. MODULE_LICENSE("GPL");