usbtest.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/moduleparam.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/mutex.h>
  10. #include <linux/usb.h>
  11. /*-------------------------------------------------------------------------*/
  12. /* FIXME make these public somewhere; usbdevfs.h? */
  13. struct usbtest_param {
  14. /* inputs */
  15. unsigned test_num; /* 0..(TEST_CASES-1) */
  16. unsigned iterations;
  17. unsigned length;
  18. unsigned vary;
  19. unsigned sglen;
  20. /* outputs */
  21. struct timeval duration;
  22. };
  23. #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
  24. /*-------------------------------------------------------------------------*/
  25. #define GENERIC /* let probe() bind using module params */
  26. /* Some devices that can be used for testing will have "real" drivers.
  27. * Entries for those need to be enabled here by hand, after disabling
  28. * that "real" driver.
  29. */
  30. //#define IBOT2 /* grab iBOT2 webcams */
  31. //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
  32. /*-------------------------------------------------------------------------*/
  33. struct usbtest_info {
  34. const char *name;
  35. u8 ep_in; /* bulk/intr source */
  36. u8 ep_out; /* bulk/intr sink */
  37. unsigned autoconf:1;
  38. unsigned ctrl_out:1;
  39. unsigned iso:1; /* try iso in/out */
  40. int alt;
  41. };
  42. /* this is accessed only through usbfs ioctl calls.
  43. * one ioctl to issue a test ... one lock per device.
  44. * tests create other threads if they need them.
  45. * urbs and buffers are allocated dynamically,
  46. * and data generated deterministically.
  47. */
  48. struct usbtest_dev {
  49. struct usb_interface *intf;
  50. struct usbtest_info *info;
  51. int in_pipe;
  52. int out_pipe;
  53. int in_iso_pipe;
  54. int out_iso_pipe;
  55. struct usb_endpoint_descriptor *iso_in, *iso_out;
  56. struct mutex lock;
  57. #define TBUF_SIZE 256
  58. u8 *buf;
  59. };
  60. static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
  61. {
  62. return interface_to_usbdev(test->intf);
  63. }
  64. /* set up all urbs so they can be used with either bulk or interrupt */
  65. #define INTERRUPT_RATE 1 /* msec/transfer */
  66. #define ERROR(tdev, fmt, args...) \
  67. dev_err(&(tdev)->intf->dev , fmt , ## args)
  68. #define WARNING(tdev, fmt, args...) \
  69. dev_warn(&(tdev)->intf->dev , fmt , ## args)
  70. #define GUARD_BYTE 0xA5
  71. /*-------------------------------------------------------------------------*/
  72. static int
  73. get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
  74. {
  75. int tmp;
  76. struct usb_host_interface *alt;
  77. struct usb_host_endpoint *in, *out;
  78. struct usb_host_endpoint *iso_in, *iso_out;
  79. struct usb_device *udev;
  80. for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
  81. unsigned ep;
  82. in = out = NULL;
  83. iso_in = iso_out = NULL;
  84. alt = intf->altsetting + tmp;
  85. /* take the first altsetting with in-bulk + out-bulk;
  86. * ignore other endpoints and altsettings.
  87. */
  88. for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
  89. struct usb_host_endpoint *e;
  90. e = alt->endpoint + ep;
  91. switch (e->desc.bmAttributes) {
  92. case USB_ENDPOINT_XFER_BULK:
  93. break;
  94. case USB_ENDPOINT_XFER_ISOC:
  95. if (dev->info->iso)
  96. goto try_iso;
  97. /* FALLTHROUGH */
  98. default:
  99. continue;
  100. }
  101. if (usb_endpoint_dir_in(&e->desc)) {
  102. if (!in)
  103. in = e;
  104. } else {
  105. if (!out)
  106. out = e;
  107. }
  108. continue;
  109. try_iso:
  110. if (usb_endpoint_dir_in(&e->desc)) {
  111. if (!iso_in)
  112. iso_in = e;
  113. } else {
  114. if (!iso_out)
  115. iso_out = e;
  116. }
  117. }
  118. if ((in && out) || iso_in || iso_out)
  119. goto found;
  120. }
  121. return -EINVAL;
  122. found:
  123. udev = testdev_to_usbdev(dev);
  124. if (alt->desc.bAlternateSetting != 0) {
  125. tmp = usb_set_interface(udev,
  126. alt->desc.bInterfaceNumber,
  127. alt->desc.bAlternateSetting);
  128. if (tmp < 0)
  129. return tmp;
  130. }
  131. if (in) {
  132. dev->in_pipe = usb_rcvbulkpipe(udev,
  133. in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  134. dev->out_pipe = usb_sndbulkpipe(udev,
  135. out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  136. }
  137. if (iso_in) {
  138. dev->iso_in = &iso_in->desc;
  139. dev->in_iso_pipe = usb_rcvisocpipe(udev,
  140. iso_in->desc.bEndpointAddress
  141. & USB_ENDPOINT_NUMBER_MASK);
  142. }
  143. if (iso_out) {
  144. dev->iso_out = &iso_out->desc;
  145. dev->out_iso_pipe = usb_sndisocpipe(udev,
  146. iso_out->desc.bEndpointAddress
  147. & USB_ENDPOINT_NUMBER_MASK);
  148. }
  149. return 0;
  150. }
  151. /*-------------------------------------------------------------------------*/
  152. /* Support for testing basic non-queued I/O streams.
  153. *
  154. * These just package urbs as requests that can be easily canceled.
  155. * Each urb's data buffer is dynamically allocated; callers can fill
  156. * them with non-zero test data (or test for it) when appropriate.
  157. */
  158. static void simple_callback(struct urb *urb)
  159. {
  160. complete(urb->context);
  161. }
  162. static struct urb *usbtest_alloc_urb(
  163. struct usb_device *udev,
  164. int pipe,
  165. unsigned long bytes,
  166. unsigned transfer_flags,
  167. unsigned offset)
  168. {
  169. struct urb *urb;
  170. urb = usb_alloc_urb(0, GFP_KERNEL);
  171. if (!urb)
  172. return urb;
  173. usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
  174. urb->interval = (udev->speed == USB_SPEED_HIGH)
  175. ? (INTERRUPT_RATE << 3)
  176. : INTERRUPT_RATE;
  177. urb->transfer_flags = transfer_flags;
  178. if (usb_pipein(pipe))
  179. urb->transfer_flags |= URB_SHORT_NOT_OK;
  180. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  181. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  182. GFP_KERNEL, &urb->transfer_dma);
  183. else
  184. urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
  185. if (!urb->transfer_buffer) {
  186. usb_free_urb(urb);
  187. return NULL;
  188. }
  189. /* To test unaligned transfers add an offset and fill the
  190. unused memory with a guard value */
  191. if (offset) {
  192. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  193. urb->transfer_buffer += offset;
  194. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  195. urb->transfer_dma += offset;
  196. }
  197. /* For inbound transfers use guard byte so that test fails if
  198. data not correctly copied */
  199. memset(urb->transfer_buffer,
  200. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  201. bytes);
  202. return urb;
  203. }
  204. static struct urb *simple_alloc_urb(
  205. struct usb_device *udev,
  206. int pipe,
  207. unsigned long bytes)
  208. {
  209. return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
  210. }
  211. static unsigned pattern;
  212. static unsigned mod_pattern;
  213. module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
  214. MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
  215. static inline void simple_fill_buf(struct urb *urb)
  216. {
  217. unsigned i;
  218. u8 *buf = urb->transfer_buffer;
  219. unsigned len = urb->transfer_buffer_length;
  220. switch (pattern) {
  221. default:
  222. /* FALLTHROUGH */
  223. case 0:
  224. memset(buf, 0, len);
  225. break;
  226. case 1: /* mod63 */
  227. for (i = 0; i < len; i++)
  228. *buf++ = (u8) (i % 63);
  229. break;
  230. }
  231. }
  232. static inline unsigned long buffer_offset(void *buf)
  233. {
  234. return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
  235. }
  236. static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
  237. {
  238. u8 *buf = urb->transfer_buffer;
  239. u8 *guard = buf - buffer_offset(buf);
  240. unsigned i;
  241. for (i = 0; guard < buf; i++, guard++) {
  242. if (*guard != GUARD_BYTE) {
  243. ERROR(tdev, "guard byte[%d] %d (not %d)\n",
  244. i, *guard, GUARD_BYTE);
  245. return -EINVAL;
  246. }
  247. }
  248. return 0;
  249. }
  250. static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
  251. {
  252. unsigned i;
  253. u8 expected;
  254. u8 *buf = urb->transfer_buffer;
  255. unsigned len = urb->actual_length;
  256. int ret = check_guard_bytes(tdev, urb);
  257. if (ret)
  258. return ret;
  259. for (i = 0; i < len; i++, buf++) {
  260. switch (pattern) {
  261. /* all-zeroes has no synchronization issues */
  262. case 0:
  263. expected = 0;
  264. break;
  265. /* mod63 stays in sync with short-terminated transfers,
  266. * or otherwise when host and gadget agree on how large
  267. * each usb transfer request should be. resync is done
  268. * with set_interface or set_config.
  269. */
  270. case 1: /* mod63 */
  271. expected = i % 63;
  272. break;
  273. /* always fail unsupported patterns */
  274. default:
  275. expected = !*buf;
  276. break;
  277. }
  278. if (*buf == expected)
  279. continue;
  280. ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
  281. return -EINVAL;
  282. }
  283. return 0;
  284. }
  285. static void simple_free_urb(struct urb *urb)
  286. {
  287. unsigned long offset = buffer_offset(urb->transfer_buffer);
  288. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  289. usb_free_coherent(
  290. urb->dev,
  291. urb->transfer_buffer_length + offset,
  292. urb->transfer_buffer - offset,
  293. urb->transfer_dma - offset);
  294. else
  295. kfree(urb->transfer_buffer - offset);
  296. usb_free_urb(urb);
  297. }
  298. static int simple_io(
  299. struct usbtest_dev *tdev,
  300. struct urb *urb,
  301. int iterations,
  302. int vary,
  303. int expected,
  304. const char *label
  305. )
  306. {
  307. struct usb_device *udev = urb->dev;
  308. int max = urb->transfer_buffer_length;
  309. struct completion completion;
  310. int retval = 0;
  311. urb->context = &completion;
  312. while (retval == 0 && iterations-- > 0) {
  313. init_completion(&completion);
  314. if (usb_pipeout(urb->pipe)) {
  315. simple_fill_buf(urb);
  316. urb->transfer_flags |= URB_ZERO_PACKET;
  317. }
  318. retval = usb_submit_urb(urb, GFP_KERNEL);
  319. if (retval != 0)
  320. break;
  321. /* NOTE: no timeouts; can't be broken out of by interrupt */
  322. wait_for_completion(&completion);
  323. retval = urb->status;
  324. urb->dev = udev;
  325. if (retval == 0 && usb_pipein(urb->pipe))
  326. retval = simple_check_buf(tdev, urb);
  327. if (vary) {
  328. int len = urb->transfer_buffer_length;
  329. len += vary;
  330. len %= max;
  331. if (len == 0)
  332. len = (vary < max) ? vary : max;
  333. urb->transfer_buffer_length = len;
  334. }
  335. /* FIXME if endpoint halted, clear halt (and log) */
  336. }
  337. urb->transfer_buffer_length = max;
  338. if (expected != retval)
  339. dev_err(&udev->dev,
  340. "%s failed, iterations left %d, status %d (not %d)\n",
  341. label, iterations, retval, expected);
  342. return retval;
  343. }
  344. /*-------------------------------------------------------------------------*/
  345. /* We use scatterlist primitives to test queued I/O.
  346. * Yes, this also tests the scatterlist primitives.
  347. */
  348. static void free_sglist(struct scatterlist *sg, int nents)
  349. {
  350. unsigned i;
  351. if (!sg)
  352. return;
  353. for (i = 0; i < nents; i++) {
  354. if (!sg_page(&sg[i]))
  355. continue;
  356. kfree(sg_virt(&sg[i]));
  357. }
  358. kfree(sg);
  359. }
  360. static struct scatterlist *
  361. alloc_sglist(int nents, int max, int vary)
  362. {
  363. struct scatterlist *sg;
  364. unsigned i;
  365. unsigned size = max;
  366. sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
  367. if (!sg)
  368. return NULL;
  369. sg_init_table(sg, nents);
  370. for (i = 0; i < nents; i++) {
  371. char *buf;
  372. unsigned j;
  373. buf = kzalloc(size, GFP_KERNEL);
  374. if (!buf) {
  375. free_sglist(sg, i);
  376. return NULL;
  377. }
  378. /* kmalloc pages are always physically contiguous! */
  379. sg_set_buf(&sg[i], buf, size);
  380. switch (pattern) {
  381. case 0:
  382. /* already zeroed */
  383. break;
  384. case 1:
  385. for (j = 0; j < size; j++)
  386. *buf++ = (u8) (j % 63);
  387. break;
  388. }
  389. if (vary) {
  390. size += vary;
  391. size %= max;
  392. if (size == 0)
  393. size = (vary < max) ? vary : max;
  394. }
  395. }
  396. return sg;
  397. }
  398. static int perform_sglist(
  399. struct usbtest_dev *tdev,
  400. unsigned iterations,
  401. int pipe,
  402. struct usb_sg_request *req,
  403. struct scatterlist *sg,
  404. int nents
  405. )
  406. {
  407. struct usb_device *udev = testdev_to_usbdev(tdev);
  408. int retval = 0;
  409. while (retval == 0 && iterations-- > 0) {
  410. retval = usb_sg_init(req, udev, pipe,
  411. (udev->speed == USB_SPEED_HIGH)
  412. ? (INTERRUPT_RATE << 3)
  413. : INTERRUPT_RATE,
  414. sg, nents, 0, GFP_KERNEL);
  415. if (retval)
  416. break;
  417. usb_sg_wait(req);
  418. retval = req->status;
  419. /* FIXME check resulting data pattern */
  420. /* FIXME if endpoint halted, clear halt (and log) */
  421. }
  422. /* FIXME for unlink or fault handling tests, don't report
  423. * failure if retval is as we expected ...
  424. */
  425. if (retval)
  426. ERROR(tdev, "perform_sglist failed, "
  427. "iterations left %d, status %d\n",
  428. iterations, retval);
  429. return retval;
  430. }
  431. /*-------------------------------------------------------------------------*/
  432. /* unqueued control message testing
  433. *
  434. * there's a nice set of device functional requirements in chapter 9 of the
  435. * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
  436. * special test firmware.
  437. *
  438. * we know the device is configured (or suspended) by the time it's visible
  439. * through usbfs. we can't change that, so we won't test enumeration (which
  440. * worked 'well enough' to get here, this time), power management (ditto),
  441. * or remote wakeup (which needs human interaction).
  442. */
  443. static unsigned realworld = 1;
  444. module_param(realworld, uint, 0);
  445. MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
  446. static int get_altsetting(struct usbtest_dev *dev)
  447. {
  448. struct usb_interface *iface = dev->intf;
  449. struct usb_device *udev = interface_to_usbdev(iface);
  450. int retval;
  451. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  452. USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
  453. 0, iface->altsetting[0].desc.bInterfaceNumber,
  454. dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  455. switch (retval) {
  456. case 1:
  457. return dev->buf[0];
  458. case 0:
  459. retval = -ERANGE;
  460. /* FALLTHROUGH */
  461. default:
  462. return retval;
  463. }
  464. }
  465. static int set_altsetting(struct usbtest_dev *dev, int alternate)
  466. {
  467. struct usb_interface *iface = dev->intf;
  468. struct usb_device *udev;
  469. if (alternate < 0 || alternate >= 256)
  470. return -EINVAL;
  471. udev = interface_to_usbdev(iface);
  472. return usb_set_interface(udev,
  473. iface->altsetting[0].desc.bInterfaceNumber,
  474. alternate);
  475. }
  476. static int is_good_config(struct usbtest_dev *tdev, int len)
  477. {
  478. struct usb_config_descriptor *config;
  479. if (len < sizeof *config)
  480. return 0;
  481. config = (struct usb_config_descriptor *) tdev->buf;
  482. switch (config->bDescriptorType) {
  483. case USB_DT_CONFIG:
  484. case USB_DT_OTHER_SPEED_CONFIG:
  485. if (config->bLength != 9) {
  486. ERROR(tdev, "bogus config descriptor length\n");
  487. return 0;
  488. }
  489. /* this bit 'must be 1' but often isn't */
  490. if (!realworld && !(config->bmAttributes & 0x80)) {
  491. ERROR(tdev, "high bit of config attributes not set\n");
  492. return 0;
  493. }
  494. if (config->bmAttributes & 0x1f) { /* reserved == 0 */
  495. ERROR(tdev, "reserved config bits set\n");
  496. return 0;
  497. }
  498. break;
  499. default:
  500. return 0;
  501. }
  502. if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
  503. return 1;
  504. if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
  505. return 1;
  506. ERROR(tdev, "bogus config descriptor read size\n");
  507. return 0;
  508. }
  509. /* sanity test for standard requests working with usb_control_mesg() and some
  510. * of the utility functions which use it.
  511. *
  512. * this doesn't test how endpoint halts behave or data toggles get set, since
  513. * we won't do I/O to bulk/interrupt endpoints here (which is how to change
  514. * halt or toggle). toggle testing is impractical without support from hcds.
  515. *
  516. * this avoids failing devices linux would normally work with, by not testing
  517. * config/altsetting operations for devices that only support their defaults.
  518. * such devices rarely support those needless operations.
  519. *
  520. * NOTE that since this is a sanity test, it's not examining boundary cases
  521. * to see if usbcore, hcd, and device all behave right. such testing would
  522. * involve varied read sizes and other operation sequences.
  523. */
  524. static int ch9_postconfig(struct usbtest_dev *dev)
  525. {
  526. struct usb_interface *iface = dev->intf;
  527. struct usb_device *udev = interface_to_usbdev(iface);
  528. int i, alt, retval;
  529. /* [9.2.3] if there's more than one altsetting, we need to be able to
  530. * set and get each one. mostly trusts the descriptors from usbcore.
  531. */
  532. for (i = 0; i < iface->num_altsetting; i++) {
  533. /* 9.2.3 constrains the range here */
  534. alt = iface->altsetting[i].desc.bAlternateSetting;
  535. if (alt < 0 || alt >= iface->num_altsetting) {
  536. dev_err(&iface->dev,
  537. "invalid alt [%d].bAltSetting = %d\n",
  538. i, alt);
  539. }
  540. /* [real world] get/set unimplemented if there's only one */
  541. if (realworld && iface->num_altsetting == 1)
  542. continue;
  543. /* [9.4.10] set_interface */
  544. retval = set_altsetting(dev, alt);
  545. if (retval) {
  546. dev_err(&iface->dev, "can't set_interface = %d, %d\n",
  547. alt, retval);
  548. return retval;
  549. }
  550. /* [9.4.4] get_interface always works */
  551. retval = get_altsetting(dev);
  552. if (retval != alt) {
  553. dev_err(&iface->dev, "get alt should be %d, was %d\n",
  554. alt, retval);
  555. return (retval < 0) ? retval : -EDOM;
  556. }
  557. }
  558. /* [real world] get_config unimplemented if there's only one */
  559. if (!realworld || udev->descriptor.bNumConfigurations != 1) {
  560. int expected = udev->actconfig->desc.bConfigurationValue;
  561. /* [9.4.2] get_configuration always works
  562. * ... although some cheap devices (like one TI Hub I've got)
  563. * won't return config descriptors except before set_config.
  564. */
  565. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  566. USB_REQ_GET_CONFIGURATION,
  567. USB_DIR_IN | USB_RECIP_DEVICE,
  568. 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  569. if (retval != 1 || dev->buf[0] != expected) {
  570. dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
  571. retval, dev->buf[0], expected);
  572. return (retval < 0) ? retval : -EDOM;
  573. }
  574. }
  575. /* there's always [9.4.3] a device descriptor [9.6.1] */
  576. retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
  577. dev->buf, sizeof udev->descriptor);
  578. if (retval != sizeof udev->descriptor) {
  579. dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
  580. return (retval < 0) ? retval : -EDOM;
  581. }
  582. /* there's always [9.4.3] at least one config descriptor [9.6.3] */
  583. for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
  584. retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
  585. dev->buf, TBUF_SIZE);
  586. if (!is_good_config(dev, retval)) {
  587. dev_err(&iface->dev,
  588. "config [%d] descriptor --> %d\n",
  589. i, retval);
  590. return (retval < 0) ? retval : -EDOM;
  591. }
  592. /* FIXME cross-checking udev->config[i] to make sure usbcore
  593. * parsed it right (etc) would be good testing paranoia
  594. */
  595. }
  596. /* and sometimes [9.2.6.6] speed dependent descriptors */
  597. if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
  598. struct usb_qualifier_descriptor *d = NULL;
  599. /* device qualifier [9.6.2] */
  600. retval = usb_get_descriptor(udev,
  601. USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
  602. sizeof(struct usb_qualifier_descriptor));
  603. if (retval == -EPIPE) {
  604. if (udev->speed == USB_SPEED_HIGH) {
  605. dev_err(&iface->dev,
  606. "hs dev qualifier --> %d\n",
  607. retval);
  608. return (retval < 0) ? retval : -EDOM;
  609. }
  610. /* usb2.0 but not high-speed capable; fine */
  611. } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
  612. dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
  613. return (retval < 0) ? retval : -EDOM;
  614. } else
  615. d = (struct usb_qualifier_descriptor *) dev->buf;
  616. /* might not have [9.6.2] any other-speed configs [9.6.4] */
  617. if (d) {
  618. unsigned max = d->bNumConfigurations;
  619. for (i = 0; i < max; i++) {
  620. retval = usb_get_descriptor(udev,
  621. USB_DT_OTHER_SPEED_CONFIG, i,
  622. dev->buf, TBUF_SIZE);
  623. if (!is_good_config(dev, retval)) {
  624. dev_err(&iface->dev,
  625. "other speed config --> %d\n",
  626. retval);
  627. return (retval < 0) ? retval : -EDOM;
  628. }
  629. }
  630. }
  631. }
  632. /* FIXME fetch strings from at least the device descriptor */
  633. /* [9.4.5] get_status always works */
  634. retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
  635. if (retval != 2) {
  636. dev_err(&iface->dev, "get dev status --> %d\n", retval);
  637. return (retval < 0) ? retval : -EDOM;
  638. }
  639. /* FIXME configuration.bmAttributes says if we could try to set/clear
  640. * the device's remote wakeup feature ... if we can, test that here
  641. */
  642. retval = usb_get_status(udev, USB_RECIP_INTERFACE,
  643. iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
  644. if (retval != 2) {
  645. dev_err(&iface->dev, "get interface status --> %d\n", retval);
  646. return (retval < 0) ? retval : -EDOM;
  647. }
  648. /* FIXME get status for each endpoint in the interface */
  649. return 0;
  650. }
  651. /*-------------------------------------------------------------------------*/
  652. /* use ch9 requests to test whether:
  653. * (a) queues work for control, keeping N subtests queued and
  654. * active (auto-resubmit) for M loops through the queue.
  655. * (b) protocol stalls (control-only) will autorecover.
  656. * it's not like bulk/intr; no halt clearing.
  657. * (c) short control reads are reported and handled.
  658. * (d) queues are always processed in-order
  659. */
  660. struct ctrl_ctx {
  661. spinlock_t lock;
  662. struct usbtest_dev *dev;
  663. struct completion complete;
  664. unsigned count;
  665. unsigned pending;
  666. int status;
  667. struct urb **urb;
  668. struct usbtest_param *param;
  669. int last;
  670. };
  671. #define NUM_SUBCASES 15 /* how many test subcases here? */
  672. struct subcase {
  673. struct usb_ctrlrequest setup;
  674. int number;
  675. int expected;
  676. };
  677. static void ctrl_complete(struct urb *urb)
  678. {
  679. struct ctrl_ctx *ctx = urb->context;
  680. struct usb_ctrlrequest *reqp;
  681. struct subcase *subcase;
  682. int status = urb->status;
  683. reqp = (struct usb_ctrlrequest *)urb->setup_packet;
  684. subcase = container_of(reqp, struct subcase, setup);
  685. spin_lock(&ctx->lock);
  686. ctx->count--;
  687. ctx->pending--;
  688. /* queue must transfer and complete in fifo order, unless
  689. * usb_unlink_urb() is used to unlink something not at the
  690. * physical queue head (not tested).
  691. */
  692. if (subcase->number > 0) {
  693. if ((subcase->number - ctx->last) != 1) {
  694. ERROR(ctx->dev,
  695. "subcase %d completed out of order, last %d\n",
  696. subcase->number, ctx->last);
  697. status = -EDOM;
  698. ctx->last = subcase->number;
  699. goto error;
  700. }
  701. }
  702. ctx->last = subcase->number;
  703. /* succeed or fault in only one way? */
  704. if (status == subcase->expected)
  705. status = 0;
  706. /* async unlink for cleanup? */
  707. else if (status != -ECONNRESET) {
  708. /* some faults are allowed, not required */
  709. if (subcase->expected > 0 && (
  710. ((status == -subcase->expected /* happened */
  711. || status == 0)))) /* didn't */
  712. status = 0;
  713. /* sometimes more than one fault is allowed */
  714. else if (subcase->number == 12 && status == -EPIPE)
  715. status = 0;
  716. else
  717. ERROR(ctx->dev, "subtest %d error, status %d\n",
  718. subcase->number, status);
  719. }
  720. /* unexpected status codes mean errors; ideally, in hardware */
  721. if (status) {
  722. error:
  723. if (ctx->status == 0) {
  724. int i;
  725. ctx->status = status;
  726. ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
  727. "%d left, subcase %d, len %d/%d\n",
  728. reqp->bRequestType, reqp->bRequest,
  729. status, ctx->count, subcase->number,
  730. urb->actual_length,
  731. urb->transfer_buffer_length);
  732. /* FIXME this "unlink everything" exit route should
  733. * be a separate test case.
  734. */
  735. /* unlink whatever's still pending */
  736. for (i = 1; i < ctx->param->sglen; i++) {
  737. struct urb *u = ctx->urb[
  738. (i + subcase->number)
  739. % ctx->param->sglen];
  740. if (u == urb || !u->dev)
  741. continue;
  742. spin_unlock(&ctx->lock);
  743. status = usb_unlink_urb(u);
  744. spin_lock(&ctx->lock);
  745. switch (status) {
  746. case -EINPROGRESS:
  747. case -EBUSY:
  748. case -EIDRM:
  749. continue;
  750. default:
  751. ERROR(ctx->dev, "urb unlink --> %d\n",
  752. status);
  753. }
  754. }
  755. status = ctx->status;
  756. }
  757. }
  758. /* resubmit if we need to, else mark this as done */
  759. if ((status == 0) && (ctx->pending < ctx->count)) {
  760. status = usb_submit_urb(urb, GFP_ATOMIC);
  761. if (status != 0) {
  762. ERROR(ctx->dev,
  763. "can't resubmit ctrl %02x.%02x, err %d\n",
  764. reqp->bRequestType, reqp->bRequest, status);
  765. urb->dev = NULL;
  766. } else
  767. ctx->pending++;
  768. } else
  769. urb->dev = NULL;
  770. /* signal completion when nothing's queued */
  771. if (ctx->pending == 0)
  772. complete(&ctx->complete);
  773. spin_unlock(&ctx->lock);
  774. }
  775. static int
  776. test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
  777. {
  778. struct usb_device *udev = testdev_to_usbdev(dev);
  779. struct urb **urb;
  780. struct ctrl_ctx context;
  781. int i;
  782. if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
  783. return -EOPNOTSUPP;
  784. spin_lock_init(&context.lock);
  785. context.dev = dev;
  786. init_completion(&context.complete);
  787. context.count = param->sglen * param->iterations;
  788. context.pending = 0;
  789. context.status = -ENOMEM;
  790. context.param = param;
  791. context.last = -1;
  792. /* allocate and init the urbs we'll queue.
  793. * as with bulk/intr sglists, sglen is the queue depth; it also
  794. * controls which subtests run (more tests than sglen) or rerun.
  795. */
  796. urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
  797. if (!urb)
  798. return -ENOMEM;
  799. for (i = 0; i < param->sglen; i++) {
  800. int pipe = usb_rcvctrlpipe(udev, 0);
  801. unsigned len;
  802. struct urb *u;
  803. struct usb_ctrlrequest req;
  804. struct subcase *reqp;
  805. /* sign of this variable means:
  806. * -: tested code must return this (negative) error code
  807. * +: tested code may return this (negative too) error code
  808. */
  809. int expected = 0;
  810. /* requests here are mostly expected to succeed on any
  811. * device, but some are chosen to trigger protocol stalls
  812. * or short reads.
  813. */
  814. memset(&req, 0, sizeof req);
  815. req.bRequest = USB_REQ_GET_DESCRIPTOR;
  816. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  817. switch (i % NUM_SUBCASES) {
  818. case 0: /* get device descriptor */
  819. req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
  820. len = sizeof(struct usb_device_descriptor);
  821. break;
  822. case 1: /* get first config descriptor (only) */
  823. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  824. len = sizeof(struct usb_config_descriptor);
  825. break;
  826. case 2: /* get altsetting (OFTEN STALLS) */
  827. req.bRequest = USB_REQ_GET_INTERFACE;
  828. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  829. /* index = 0 means first interface */
  830. len = 1;
  831. expected = EPIPE;
  832. break;
  833. case 3: /* get interface status */
  834. req.bRequest = USB_REQ_GET_STATUS;
  835. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  836. /* interface 0 */
  837. len = 2;
  838. break;
  839. case 4: /* get device status */
  840. req.bRequest = USB_REQ_GET_STATUS;
  841. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  842. len = 2;
  843. break;
  844. case 5: /* get device qualifier (MAY STALL) */
  845. req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
  846. len = sizeof(struct usb_qualifier_descriptor);
  847. if (udev->speed != USB_SPEED_HIGH)
  848. expected = EPIPE;
  849. break;
  850. case 6: /* get first config descriptor, plus interface */
  851. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  852. len = sizeof(struct usb_config_descriptor);
  853. len += sizeof(struct usb_interface_descriptor);
  854. break;
  855. case 7: /* get interface descriptor (ALWAYS STALLS) */
  856. req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
  857. /* interface == 0 */
  858. len = sizeof(struct usb_interface_descriptor);
  859. expected = -EPIPE;
  860. break;
  861. /* NOTE: two consecutive stalls in the queue here.
  862. * that tests fault recovery a bit more aggressively. */
  863. case 8: /* clear endpoint halt (MAY STALL) */
  864. req.bRequest = USB_REQ_CLEAR_FEATURE;
  865. req.bRequestType = USB_RECIP_ENDPOINT;
  866. /* wValue 0 == ep halt */
  867. /* wIndex 0 == ep0 (shouldn't halt!) */
  868. len = 0;
  869. pipe = usb_sndctrlpipe(udev, 0);
  870. expected = EPIPE;
  871. break;
  872. case 9: /* get endpoint status */
  873. req.bRequest = USB_REQ_GET_STATUS;
  874. req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
  875. /* endpoint 0 */
  876. len = 2;
  877. break;
  878. case 10: /* trigger short read (EREMOTEIO) */
  879. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  880. len = 1024;
  881. expected = -EREMOTEIO;
  882. break;
  883. /* NOTE: two consecutive _different_ faults in the queue. */
  884. case 11: /* get endpoint descriptor (ALWAYS STALLS) */
  885. req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
  886. /* endpoint == 0 */
  887. len = sizeof(struct usb_interface_descriptor);
  888. expected = EPIPE;
  889. break;
  890. /* NOTE: sometimes even a third fault in the queue! */
  891. case 12: /* get string 0 descriptor (MAY STALL) */
  892. req.wValue = cpu_to_le16(USB_DT_STRING << 8);
  893. /* string == 0, for language IDs */
  894. len = sizeof(struct usb_interface_descriptor);
  895. /* may succeed when > 4 languages */
  896. expected = EREMOTEIO; /* or EPIPE, if no strings */
  897. break;
  898. case 13: /* short read, resembling case 10 */
  899. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  900. /* last data packet "should" be DATA1, not DATA0 */
  901. if (udev->speed == USB_SPEED_SUPER)
  902. len = 1024 - 512;
  903. else
  904. len = 1024 - udev->descriptor.bMaxPacketSize0;
  905. expected = -EREMOTEIO;
  906. break;
  907. case 14: /* short read; try to fill the last packet */
  908. req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
  909. /* device descriptor size == 18 bytes */
  910. len = udev->descriptor.bMaxPacketSize0;
  911. if (udev->speed == USB_SPEED_SUPER)
  912. len = 512;
  913. switch (len) {
  914. case 8:
  915. len = 24;
  916. break;
  917. case 16:
  918. len = 32;
  919. break;
  920. }
  921. expected = -EREMOTEIO;
  922. break;
  923. default:
  924. ERROR(dev, "bogus number of ctrl queue testcases!\n");
  925. context.status = -EINVAL;
  926. goto cleanup;
  927. }
  928. req.wLength = cpu_to_le16(len);
  929. urb[i] = u = simple_alloc_urb(udev, pipe, len);
  930. if (!u)
  931. goto cleanup;
  932. reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
  933. if (!reqp)
  934. goto cleanup;
  935. reqp->setup = req;
  936. reqp->number = i % NUM_SUBCASES;
  937. reqp->expected = expected;
  938. u->setup_packet = (char *) &reqp->setup;
  939. u->context = &context;
  940. u->complete = ctrl_complete;
  941. }
  942. /* queue the urbs */
  943. context.urb = urb;
  944. spin_lock_irq(&context.lock);
  945. for (i = 0; i < param->sglen; i++) {
  946. context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
  947. if (context.status != 0) {
  948. ERROR(dev, "can't submit urb[%d], status %d\n",
  949. i, context.status);
  950. context.count = context.pending;
  951. break;
  952. }
  953. context.pending++;
  954. }
  955. spin_unlock_irq(&context.lock);
  956. /* FIXME set timer and time out; provide a disconnect hook */
  957. /* wait for the last one to complete */
  958. if (context.pending > 0)
  959. wait_for_completion(&context.complete);
  960. cleanup:
  961. for (i = 0; i < param->sglen; i++) {
  962. if (!urb[i])
  963. continue;
  964. urb[i]->dev = udev;
  965. kfree(urb[i]->setup_packet);
  966. simple_free_urb(urb[i]);
  967. }
  968. kfree(urb);
  969. return context.status;
  970. }
  971. #undef NUM_SUBCASES
  972. /*-------------------------------------------------------------------------*/
  973. static void unlink1_callback(struct urb *urb)
  974. {
  975. int status = urb->status;
  976. /* we "know" -EPIPE (stall) never happens */
  977. if (!status)
  978. status = usb_submit_urb(urb, GFP_ATOMIC);
  979. if (status) {
  980. urb->status = status;
  981. complete(urb->context);
  982. }
  983. }
  984. static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
  985. {
  986. struct urb *urb;
  987. struct completion completion;
  988. int retval = 0;
  989. init_completion(&completion);
  990. urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
  991. if (!urb)
  992. return -ENOMEM;
  993. urb->context = &completion;
  994. urb->complete = unlink1_callback;
  995. /* keep the endpoint busy. there are lots of hc/hcd-internal
  996. * states, and testing should get to all of them over time.
  997. *
  998. * FIXME want additional tests for when endpoint is STALLing
  999. * due to errors, or is just NAKing requests.
  1000. */
  1001. retval = usb_submit_urb(urb, GFP_KERNEL);
  1002. if (retval != 0) {
  1003. dev_err(&dev->intf->dev, "submit fail %d\n", retval);
  1004. return retval;
  1005. }
  1006. /* unlinking that should always work. variable delay tests more
  1007. * hcd states and code paths, even with little other system load.
  1008. */
  1009. msleep(jiffies % (2 * INTERRUPT_RATE));
  1010. if (async) {
  1011. while (!completion_done(&completion)) {
  1012. retval = usb_unlink_urb(urb);
  1013. switch (retval) {
  1014. case -EBUSY:
  1015. case -EIDRM:
  1016. /* we can't unlink urbs while they're completing
  1017. * or if they've completed, and we haven't
  1018. * resubmitted. "normal" drivers would prevent
  1019. * resubmission, but since we're testing unlink
  1020. * paths, we can't.
  1021. */
  1022. ERROR(dev, "unlink retry\n");
  1023. continue;
  1024. case 0:
  1025. case -EINPROGRESS:
  1026. break;
  1027. default:
  1028. dev_err(&dev->intf->dev,
  1029. "unlink fail %d\n", retval);
  1030. return retval;
  1031. }
  1032. break;
  1033. }
  1034. } else
  1035. usb_kill_urb(urb);
  1036. wait_for_completion(&completion);
  1037. retval = urb->status;
  1038. simple_free_urb(urb);
  1039. if (async)
  1040. return (retval == -ECONNRESET) ? 0 : retval - 1000;
  1041. else
  1042. return (retval == -ENOENT || retval == -EPERM) ?
  1043. 0 : retval - 2000;
  1044. }
  1045. static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
  1046. {
  1047. int retval = 0;
  1048. /* test sync and async paths */
  1049. retval = unlink1(dev, pipe, len, 1);
  1050. if (!retval)
  1051. retval = unlink1(dev, pipe, len, 0);
  1052. return retval;
  1053. }
  1054. /*-------------------------------------------------------------------------*/
  1055. struct queued_ctx {
  1056. struct completion complete;
  1057. atomic_t pending;
  1058. unsigned num;
  1059. int status;
  1060. struct urb **urbs;
  1061. };
  1062. static void unlink_queued_callback(struct urb *urb)
  1063. {
  1064. int status = urb->status;
  1065. struct queued_ctx *ctx = urb->context;
  1066. if (ctx->status)
  1067. goto done;
  1068. if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
  1069. if (status == -ECONNRESET)
  1070. goto done;
  1071. /* What error should we report if the URB completed normally? */
  1072. }
  1073. if (status != 0)
  1074. ctx->status = status;
  1075. done:
  1076. if (atomic_dec_and_test(&ctx->pending))
  1077. complete(&ctx->complete);
  1078. }
  1079. static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
  1080. unsigned size)
  1081. {
  1082. struct queued_ctx ctx;
  1083. struct usb_device *udev = testdev_to_usbdev(dev);
  1084. void *buf;
  1085. dma_addr_t buf_dma;
  1086. int i;
  1087. int retval = -ENOMEM;
  1088. init_completion(&ctx.complete);
  1089. atomic_set(&ctx.pending, 1); /* One more than the actual value */
  1090. ctx.num = num;
  1091. ctx.status = 0;
  1092. buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
  1093. if (!buf)
  1094. return retval;
  1095. memset(buf, 0, size);
  1096. /* Allocate and init the urbs we'll queue */
  1097. ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
  1098. if (!ctx.urbs)
  1099. goto free_buf;
  1100. for (i = 0; i < num; i++) {
  1101. ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  1102. if (!ctx.urbs[i])
  1103. goto free_urbs;
  1104. usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
  1105. unlink_queued_callback, &ctx);
  1106. ctx.urbs[i]->transfer_dma = buf_dma;
  1107. ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
  1108. }
  1109. /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
  1110. for (i = 0; i < num; i++) {
  1111. atomic_inc(&ctx.pending);
  1112. retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
  1113. if (retval != 0) {
  1114. dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
  1115. i, retval);
  1116. atomic_dec(&ctx.pending);
  1117. ctx.status = retval;
  1118. break;
  1119. }
  1120. }
  1121. if (i == num) {
  1122. usb_unlink_urb(ctx.urbs[num - 4]);
  1123. usb_unlink_urb(ctx.urbs[num - 2]);
  1124. } else {
  1125. while (--i >= 0)
  1126. usb_unlink_urb(ctx.urbs[i]);
  1127. }
  1128. if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
  1129. complete(&ctx.complete);
  1130. wait_for_completion(&ctx.complete);
  1131. retval = ctx.status;
  1132. free_urbs:
  1133. for (i = 0; i < num; i++)
  1134. usb_free_urb(ctx.urbs[i]);
  1135. kfree(ctx.urbs);
  1136. free_buf:
  1137. usb_free_coherent(udev, size, buf, buf_dma);
  1138. return retval;
  1139. }
  1140. /*-------------------------------------------------------------------------*/
  1141. static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1142. {
  1143. int retval;
  1144. u16 status;
  1145. /* shouldn't look or act halted */
  1146. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1147. if (retval < 0) {
  1148. ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
  1149. ep, retval);
  1150. return retval;
  1151. }
  1152. if (status != 0) {
  1153. ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
  1154. return -EINVAL;
  1155. }
  1156. retval = simple_io(tdev, urb, 1, 0, 0, __func__);
  1157. if (retval != 0)
  1158. return -EINVAL;
  1159. return 0;
  1160. }
  1161. static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1162. {
  1163. int retval;
  1164. u16 status;
  1165. /* should look and act halted */
  1166. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1167. if (retval < 0) {
  1168. ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
  1169. ep, retval);
  1170. return retval;
  1171. }
  1172. le16_to_cpus(&status);
  1173. if (status != 1) {
  1174. ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
  1175. return -EINVAL;
  1176. }
  1177. retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
  1178. if (retval != -EPIPE)
  1179. return -EINVAL;
  1180. retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
  1181. if (retval != -EPIPE)
  1182. return -EINVAL;
  1183. return 0;
  1184. }
  1185. static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1186. {
  1187. int retval;
  1188. /* shouldn't look or act halted now */
  1189. retval = verify_not_halted(tdev, ep, urb);
  1190. if (retval < 0)
  1191. return retval;
  1192. /* set halt (protocol test only), verify it worked */
  1193. retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
  1194. USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
  1195. USB_ENDPOINT_HALT, ep,
  1196. NULL, 0, USB_CTRL_SET_TIMEOUT);
  1197. if (retval < 0) {
  1198. ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
  1199. return retval;
  1200. }
  1201. retval = verify_halted(tdev, ep, urb);
  1202. if (retval < 0)
  1203. return retval;
  1204. /* clear halt (tests API + protocol), verify it worked */
  1205. retval = usb_clear_halt(urb->dev, urb->pipe);
  1206. if (retval < 0) {
  1207. ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
  1208. return retval;
  1209. }
  1210. retval = verify_not_halted(tdev, ep, urb);
  1211. if (retval < 0)
  1212. return retval;
  1213. /* NOTE: could also verify SET_INTERFACE clear halts ... */
  1214. return 0;
  1215. }
  1216. static int halt_simple(struct usbtest_dev *dev)
  1217. {
  1218. int ep;
  1219. int retval = 0;
  1220. struct urb *urb;
  1221. struct usb_device *udev = testdev_to_usbdev(dev);
  1222. if (udev->speed == USB_SPEED_SUPER)
  1223. urb = simple_alloc_urb(udev, 0, 1024);
  1224. else
  1225. urb = simple_alloc_urb(udev, 0, 512);
  1226. if (urb == NULL)
  1227. return -ENOMEM;
  1228. if (dev->in_pipe) {
  1229. ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
  1230. urb->pipe = dev->in_pipe;
  1231. retval = test_halt(dev, ep, urb);
  1232. if (retval < 0)
  1233. goto done;
  1234. }
  1235. if (dev->out_pipe) {
  1236. ep = usb_pipeendpoint(dev->out_pipe);
  1237. urb->pipe = dev->out_pipe;
  1238. retval = test_halt(dev, ep, urb);
  1239. }
  1240. done:
  1241. simple_free_urb(urb);
  1242. return retval;
  1243. }
  1244. /*-------------------------------------------------------------------------*/
  1245. /* Control OUT tests use the vendor control requests from Intel's
  1246. * USB 2.0 compliance test device: write a buffer, read it back.
  1247. *
  1248. * Intel's spec only _requires_ that it work for one packet, which
  1249. * is pretty weak. Some HCDs place limits here; most devices will
  1250. * need to be able to handle more than one OUT data packet. We'll
  1251. * try whatever we're told to try.
  1252. */
  1253. static int ctrl_out(struct usbtest_dev *dev,
  1254. unsigned count, unsigned length, unsigned vary, unsigned offset)
  1255. {
  1256. unsigned i, j, len;
  1257. int retval;
  1258. u8 *buf;
  1259. char *what = "?";
  1260. struct usb_device *udev;
  1261. if (length < 1 || length > 0xffff || vary >= length)
  1262. return -EINVAL;
  1263. buf = kmalloc(length + offset, GFP_KERNEL);
  1264. if (!buf)
  1265. return -ENOMEM;
  1266. buf += offset;
  1267. udev = testdev_to_usbdev(dev);
  1268. len = length;
  1269. retval = 0;
  1270. /* NOTE: hardware might well act differently if we pushed it
  1271. * with lots back-to-back queued requests.
  1272. */
  1273. for (i = 0; i < count; i++) {
  1274. /* write patterned data */
  1275. for (j = 0; j < len; j++)
  1276. buf[j] = i + j;
  1277. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  1278. 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
  1279. 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
  1280. if (retval != len) {
  1281. what = "write";
  1282. if (retval >= 0) {
  1283. ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
  1284. retval, len);
  1285. retval = -EBADMSG;
  1286. }
  1287. break;
  1288. }
  1289. /* read it back -- assuming nothing intervened!! */
  1290. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  1291. 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
  1292. 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
  1293. if (retval != len) {
  1294. what = "read";
  1295. if (retval >= 0) {
  1296. ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
  1297. retval, len);
  1298. retval = -EBADMSG;
  1299. }
  1300. break;
  1301. }
  1302. /* fail if we can't verify */
  1303. for (j = 0; j < len; j++) {
  1304. if (buf[j] != (u8) (i + j)) {
  1305. ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
  1306. j, buf[j], (u8) i + j);
  1307. retval = -EBADMSG;
  1308. break;
  1309. }
  1310. }
  1311. if (retval < 0) {
  1312. what = "verify";
  1313. break;
  1314. }
  1315. len += vary;
  1316. /* [real world] the "zero bytes IN" case isn't really used.
  1317. * hardware can easily trip up in this weird case, since its
  1318. * status stage is IN, not OUT like other ep0in transfers.
  1319. */
  1320. if (len > length)
  1321. len = realworld ? 1 : 0;
  1322. }
  1323. if (retval < 0)
  1324. ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
  1325. what, retval, i);
  1326. kfree(buf - offset);
  1327. return retval;
  1328. }
  1329. /*-------------------------------------------------------------------------*/
  1330. /* ISO tests ... mimics common usage
  1331. * - buffer length is split into N packets (mostly maxpacket sized)
  1332. * - multi-buffers according to sglen
  1333. */
  1334. struct iso_context {
  1335. unsigned count;
  1336. unsigned pending;
  1337. spinlock_t lock;
  1338. struct completion done;
  1339. int submit_error;
  1340. unsigned long errors;
  1341. unsigned long packet_count;
  1342. struct usbtest_dev *dev;
  1343. };
  1344. static void iso_callback(struct urb *urb)
  1345. {
  1346. struct iso_context *ctx = urb->context;
  1347. spin_lock(&ctx->lock);
  1348. ctx->count--;
  1349. ctx->packet_count += urb->number_of_packets;
  1350. if (urb->error_count > 0)
  1351. ctx->errors += urb->error_count;
  1352. else if (urb->status != 0)
  1353. ctx->errors += urb->number_of_packets;
  1354. else if (urb->actual_length != urb->transfer_buffer_length)
  1355. ctx->errors++;
  1356. else if (check_guard_bytes(ctx->dev, urb) != 0)
  1357. ctx->errors++;
  1358. if (urb->status == 0 && ctx->count > (ctx->pending - 1)
  1359. && !ctx->submit_error) {
  1360. int status = usb_submit_urb(urb, GFP_ATOMIC);
  1361. switch (status) {
  1362. case 0:
  1363. goto done;
  1364. default:
  1365. dev_err(&ctx->dev->intf->dev,
  1366. "iso resubmit err %d\n",
  1367. status);
  1368. /* FALLTHROUGH */
  1369. case -ENODEV: /* disconnected */
  1370. case -ESHUTDOWN: /* endpoint disabled */
  1371. ctx->submit_error = 1;
  1372. break;
  1373. }
  1374. }
  1375. ctx->pending--;
  1376. if (ctx->pending == 0) {
  1377. if (ctx->errors)
  1378. dev_err(&ctx->dev->intf->dev,
  1379. "iso test, %lu errors out of %lu\n",
  1380. ctx->errors, ctx->packet_count);
  1381. complete(&ctx->done);
  1382. }
  1383. done:
  1384. spin_unlock(&ctx->lock);
  1385. }
  1386. static struct urb *iso_alloc_urb(
  1387. struct usb_device *udev,
  1388. int pipe,
  1389. struct usb_endpoint_descriptor *desc,
  1390. long bytes,
  1391. unsigned offset
  1392. )
  1393. {
  1394. struct urb *urb;
  1395. unsigned i, maxp, packets;
  1396. if (bytes < 0 || !desc)
  1397. return NULL;
  1398. maxp = 0x7ff & usb_endpoint_maxp(desc);
  1399. maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
  1400. packets = DIV_ROUND_UP(bytes, maxp);
  1401. urb = usb_alloc_urb(packets, GFP_KERNEL);
  1402. if (!urb)
  1403. return urb;
  1404. urb->dev = udev;
  1405. urb->pipe = pipe;
  1406. urb->number_of_packets = packets;
  1407. urb->transfer_buffer_length = bytes;
  1408. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  1409. GFP_KERNEL,
  1410. &urb->transfer_dma);
  1411. if (!urb->transfer_buffer) {
  1412. usb_free_urb(urb);
  1413. return NULL;
  1414. }
  1415. if (offset) {
  1416. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  1417. urb->transfer_buffer += offset;
  1418. urb->transfer_dma += offset;
  1419. }
  1420. /* For inbound transfers use guard byte so that test fails if
  1421. data not correctly copied */
  1422. memset(urb->transfer_buffer,
  1423. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  1424. bytes);
  1425. for (i = 0; i < packets; i++) {
  1426. /* here, only the last packet will be short */
  1427. urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
  1428. bytes -= urb->iso_frame_desc[i].length;
  1429. urb->iso_frame_desc[i].offset = maxp * i;
  1430. }
  1431. urb->complete = iso_callback;
  1432. /* urb->context = SET BY CALLER */
  1433. urb->interval = 1 << (desc->bInterval - 1);
  1434. urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
  1435. return urb;
  1436. }
  1437. static int
  1438. test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
  1439. int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
  1440. {
  1441. struct iso_context context;
  1442. struct usb_device *udev;
  1443. unsigned i;
  1444. unsigned long packets = 0;
  1445. int status = 0;
  1446. struct urb *urbs[10]; /* FIXME no limit */
  1447. if (param->sglen > 10)
  1448. return -EDOM;
  1449. memset(&context, 0, sizeof context);
  1450. context.count = param->iterations * param->sglen;
  1451. context.dev = dev;
  1452. init_completion(&context.done);
  1453. spin_lock_init(&context.lock);
  1454. memset(urbs, 0, sizeof urbs);
  1455. udev = testdev_to_usbdev(dev);
  1456. dev_info(&dev->intf->dev,
  1457. "... iso period %d %sframes, wMaxPacket %04x\n",
  1458. 1 << (desc->bInterval - 1),
  1459. (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
  1460. usb_endpoint_maxp(desc));
  1461. for (i = 0; i < param->sglen; i++) {
  1462. urbs[i] = iso_alloc_urb(udev, pipe, desc,
  1463. param->length, offset);
  1464. if (!urbs[i]) {
  1465. status = -ENOMEM;
  1466. goto fail;
  1467. }
  1468. packets += urbs[i]->number_of_packets;
  1469. urbs[i]->context = &context;
  1470. }
  1471. packets *= param->iterations;
  1472. dev_info(&dev->intf->dev,
  1473. "... total %lu msec (%lu packets)\n",
  1474. (packets * (1 << (desc->bInterval - 1)))
  1475. / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
  1476. packets);
  1477. spin_lock_irq(&context.lock);
  1478. for (i = 0; i < param->sglen; i++) {
  1479. ++context.pending;
  1480. status = usb_submit_urb(urbs[i], GFP_ATOMIC);
  1481. if (status < 0) {
  1482. ERROR(dev, "submit iso[%d], error %d\n", i, status);
  1483. if (i == 0) {
  1484. spin_unlock_irq(&context.lock);
  1485. goto fail;
  1486. }
  1487. simple_free_urb(urbs[i]);
  1488. urbs[i] = NULL;
  1489. context.pending--;
  1490. context.submit_error = 1;
  1491. break;
  1492. }
  1493. }
  1494. spin_unlock_irq(&context.lock);
  1495. wait_for_completion(&context.done);
  1496. for (i = 0; i < param->sglen; i++) {
  1497. if (urbs[i])
  1498. simple_free_urb(urbs[i]);
  1499. }
  1500. /*
  1501. * Isochronous transfers are expected to fail sometimes. As an
  1502. * arbitrary limit, we will report an error if any submissions
  1503. * fail or if the transfer failure rate is > 10%.
  1504. */
  1505. if (status != 0)
  1506. ;
  1507. else if (context.submit_error)
  1508. status = -EACCES;
  1509. else if (context.errors > context.packet_count / 10)
  1510. status = -EIO;
  1511. return status;
  1512. fail:
  1513. for (i = 0; i < param->sglen; i++) {
  1514. if (urbs[i])
  1515. simple_free_urb(urbs[i]);
  1516. }
  1517. return status;
  1518. }
  1519. static int test_unaligned_bulk(
  1520. struct usbtest_dev *tdev,
  1521. int pipe,
  1522. unsigned length,
  1523. int iterations,
  1524. unsigned transfer_flags,
  1525. const char *label)
  1526. {
  1527. int retval;
  1528. struct urb *urb = usbtest_alloc_urb(
  1529. testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
  1530. if (!urb)
  1531. return -ENOMEM;
  1532. retval = simple_io(tdev, urb, iterations, 0, 0, label);
  1533. simple_free_urb(urb);
  1534. return retval;
  1535. }
  1536. /*-------------------------------------------------------------------------*/
  1537. /* We only have this one interface to user space, through usbfs.
  1538. * User mode code can scan usbfs to find N different devices (maybe on
  1539. * different busses) to use when testing, and allocate one thread per
  1540. * test. So discovery is simplified, and we have no device naming issues.
  1541. *
  1542. * Don't use these only as stress/load tests. Use them along with with
  1543. * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
  1544. * video capture, and so on. Run different tests at different times, in
  1545. * different sequences. Nothing here should interact with other devices,
  1546. * except indirectly by consuming USB bandwidth and CPU resources for test
  1547. * threads and request completion. But the only way to know that for sure
  1548. * is to test when HC queues are in use by many devices.
  1549. *
  1550. * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
  1551. * it locks out usbcore in certain code paths. Notably, if you disconnect
  1552. * the device-under-test, khubd will wait block forever waiting for the
  1553. * ioctl to complete ... so that usb_disconnect() can abort the pending
  1554. * urbs and then call usbtest_disconnect(). To abort a test, you're best
  1555. * off just killing the userspace task and waiting for it to exit.
  1556. */
  1557. static int
  1558. usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
  1559. {
  1560. struct usbtest_dev *dev = usb_get_intfdata(intf);
  1561. struct usb_device *udev = testdev_to_usbdev(dev);
  1562. struct usbtest_param *param = buf;
  1563. int retval = -EOPNOTSUPP;
  1564. struct urb *urb;
  1565. struct scatterlist *sg;
  1566. struct usb_sg_request req;
  1567. struct timeval start;
  1568. unsigned i;
  1569. /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
  1570. pattern = mod_pattern;
  1571. if (code != USBTEST_REQUEST)
  1572. return -EOPNOTSUPP;
  1573. if (param->iterations <= 0)
  1574. return -EINVAL;
  1575. if (mutex_lock_interruptible(&dev->lock))
  1576. return -ERESTARTSYS;
  1577. /* FIXME: What if a system sleep starts while a test is running? */
  1578. /* some devices, like ez-usb default devices, need a non-default
  1579. * altsetting to have any active endpoints. some tests change
  1580. * altsettings; force a default so most tests don't need to check.
  1581. */
  1582. if (dev->info->alt >= 0) {
  1583. int res;
  1584. if (intf->altsetting->desc.bInterfaceNumber) {
  1585. mutex_unlock(&dev->lock);
  1586. return -ENODEV;
  1587. }
  1588. res = set_altsetting(dev, dev->info->alt);
  1589. if (res) {
  1590. dev_err(&intf->dev,
  1591. "set altsetting to %d failed, %d\n",
  1592. dev->info->alt, res);
  1593. mutex_unlock(&dev->lock);
  1594. return res;
  1595. }
  1596. }
  1597. /*
  1598. * Just a bunch of test cases that every HCD is expected to handle.
  1599. *
  1600. * Some may need specific firmware, though it'd be good to have
  1601. * one firmware image to handle all the test cases.
  1602. *
  1603. * FIXME add more tests! cancel requests, verify the data, control
  1604. * queueing, concurrent read+write threads, and so on.
  1605. */
  1606. do_gettimeofday(&start);
  1607. switch (param->test_num) {
  1608. case 0:
  1609. dev_info(&intf->dev, "TEST 0: NOP\n");
  1610. retval = 0;
  1611. break;
  1612. /* Simple non-queued bulk I/O tests */
  1613. case 1:
  1614. if (dev->out_pipe == 0)
  1615. break;
  1616. dev_info(&intf->dev,
  1617. "TEST 1: write %d bytes %u times\n",
  1618. param->length, param->iterations);
  1619. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1620. if (!urb) {
  1621. retval = -ENOMEM;
  1622. break;
  1623. }
  1624. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1625. retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
  1626. simple_free_urb(urb);
  1627. break;
  1628. case 2:
  1629. if (dev->in_pipe == 0)
  1630. break;
  1631. dev_info(&intf->dev,
  1632. "TEST 2: read %d bytes %u times\n",
  1633. param->length, param->iterations);
  1634. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1635. if (!urb) {
  1636. retval = -ENOMEM;
  1637. break;
  1638. }
  1639. /* FIRMWARE: bulk source (maybe generates short writes) */
  1640. retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
  1641. simple_free_urb(urb);
  1642. break;
  1643. case 3:
  1644. if (dev->out_pipe == 0 || param->vary == 0)
  1645. break;
  1646. dev_info(&intf->dev,
  1647. "TEST 3: write/%d 0..%d bytes %u times\n",
  1648. param->vary, param->length, param->iterations);
  1649. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1650. if (!urb) {
  1651. retval = -ENOMEM;
  1652. break;
  1653. }
  1654. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1655. retval = simple_io(dev, urb, param->iterations, param->vary,
  1656. 0, "test3");
  1657. simple_free_urb(urb);
  1658. break;
  1659. case 4:
  1660. if (dev->in_pipe == 0 || param->vary == 0)
  1661. break;
  1662. dev_info(&intf->dev,
  1663. "TEST 4: read/%d 0..%d bytes %u times\n",
  1664. param->vary, param->length, param->iterations);
  1665. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1666. if (!urb) {
  1667. retval = -ENOMEM;
  1668. break;
  1669. }
  1670. /* FIRMWARE: bulk source (maybe generates short writes) */
  1671. retval = simple_io(dev, urb, param->iterations, param->vary,
  1672. 0, "test4");
  1673. simple_free_urb(urb);
  1674. break;
  1675. /* Queued bulk I/O tests */
  1676. case 5:
  1677. if (dev->out_pipe == 0 || param->sglen == 0)
  1678. break;
  1679. dev_info(&intf->dev,
  1680. "TEST 5: write %d sglists %d entries of %d bytes\n",
  1681. param->iterations,
  1682. param->sglen, param->length);
  1683. sg = alloc_sglist(param->sglen, param->length, 0);
  1684. if (!sg) {
  1685. retval = -ENOMEM;
  1686. break;
  1687. }
  1688. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1689. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1690. &req, sg, param->sglen);
  1691. free_sglist(sg, param->sglen);
  1692. break;
  1693. case 6:
  1694. if (dev->in_pipe == 0 || param->sglen == 0)
  1695. break;
  1696. dev_info(&intf->dev,
  1697. "TEST 6: read %d sglists %d entries of %d bytes\n",
  1698. param->iterations,
  1699. param->sglen, param->length);
  1700. sg = alloc_sglist(param->sglen, param->length, 0);
  1701. if (!sg) {
  1702. retval = -ENOMEM;
  1703. break;
  1704. }
  1705. /* FIRMWARE: bulk source (maybe generates short writes) */
  1706. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1707. &req, sg, param->sglen);
  1708. free_sglist(sg, param->sglen);
  1709. break;
  1710. case 7:
  1711. if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1712. break;
  1713. dev_info(&intf->dev,
  1714. "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
  1715. param->vary, param->iterations,
  1716. param->sglen, param->length);
  1717. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1718. if (!sg) {
  1719. retval = -ENOMEM;
  1720. break;
  1721. }
  1722. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1723. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1724. &req, sg, param->sglen);
  1725. free_sglist(sg, param->sglen);
  1726. break;
  1727. case 8:
  1728. if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1729. break;
  1730. dev_info(&intf->dev,
  1731. "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
  1732. param->vary, param->iterations,
  1733. param->sglen, param->length);
  1734. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1735. if (!sg) {
  1736. retval = -ENOMEM;
  1737. break;
  1738. }
  1739. /* FIRMWARE: bulk source (maybe generates short writes) */
  1740. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1741. &req, sg, param->sglen);
  1742. free_sglist(sg, param->sglen);
  1743. break;
  1744. /* non-queued sanity tests for control (chapter 9 subset) */
  1745. case 9:
  1746. retval = 0;
  1747. dev_info(&intf->dev,
  1748. "TEST 9: ch9 (subset) control tests, %d times\n",
  1749. param->iterations);
  1750. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1751. retval = ch9_postconfig(dev);
  1752. if (retval)
  1753. dev_err(&intf->dev, "ch9 subset failed, "
  1754. "iterations left %d\n", i);
  1755. break;
  1756. /* queued control messaging */
  1757. case 10:
  1758. retval = 0;
  1759. dev_info(&intf->dev,
  1760. "TEST 10: queue %d control calls, %d times\n",
  1761. param->sglen,
  1762. param->iterations);
  1763. retval = test_ctrl_queue(dev, param);
  1764. break;
  1765. /* simple non-queued unlinks (ring with one urb) */
  1766. case 11:
  1767. if (dev->in_pipe == 0 || !param->length)
  1768. break;
  1769. retval = 0;
  1770. dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
  1771. param->iterations, param->length);
  1772. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1773. retval = unlink_simple(dev, dev->in_pipe,
  1774. param->length);
  1775. if (retval)
  1776. dev_err(&intf->dev, "unlink reads failed %d, "
  1777. "iterations left %d\n", retval, i);
  1778. break;
  1779. case 12:
  1780. if (dev->out_pipe == 0 || !param->length)
  1781. break;
  1782. retval = 0;
  1783. dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
  1784. param->iterations, param->length);
  1785. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1786. retval = unlink_simple(dev, dev->out_pipe,
  1787. param->length);
  1788. if (retval)
  1789. dev_err(&intf->dev, "unlink writes failed %d, "
  1790. "iterations left %d\n", retval, i);
  1791. break;
  1792. /* ep halt tests */
  1793. case 13:
  1794. if (dev->out_pipe == 0 && dev->in_pipe == 0)
  1795. break;
  1796. retval = 0;
  1797. dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
  1798. param->iterations);
  1799. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1800. retval = halt_simple(dev);
  1801. if (retval)
  1802. ERROR(dev, "halts failed, iterations left %d\n", i);
  1803. break;
  1804. /* control write tests */
  1805. case 14:
  1806. if (!dev->info->ctrl_out)
  1807. break;
  1808. dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
  1809. param->iterations,
  1810. realworld ? 1 : 0, param->length,
  1811. param->vary);
  1812. retval = ctrl_out(dev, param->iterations,
  1813. param->length, param->vary, 0);
  1814. break;
  1815. /* iso write tests */
  1816. case 15:
  1817. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1818. break;
  1819. dev_info(&intf->dev,
  1820. "TEST 15: write %d iso, %d entries of %d bytes\n",
  1821. param->iterations,
  1822. param->sglen, param->length);
  1823. /* FIRMWARE: iso sink */
  1824. retval = test_iso_queue(dev, param,
  1825. dev->out_iso_pipe, dev->iso_out, 0);
  1826. break;
  1827. /* iso read tests */
  1828. case 16:
  1829. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1830. break;
  1831. dev_info(&intf->dev,
  1832. "TEST 16: read %d iso, %d entries of %d bytes\n",
  1833. param->iterations,
  1834. param->sglen, param->length);
  1835. /* FIRMWARE: iso source */
  1836. retval = test_iso_queue(dev, param,
  1837. dev->in_iso_pipe, dev->iso_in, 0);
  1838. break;
  1839. /* FIXME scatterlist cancel (needs helper thread) */
  1840. /* Tests for bulk I/O using DMA mapping by core and odd address */
  1841. case 17:
  1842. if (dev->out_pipe == 0)
  1843. break;
  1844. dev_info(&intf->dev,
  1845. "TEST 17: write odd addr %d bytes %u times core map\n",
  1846. param->length, param->iterations);
  1847. retval = test_unaligned_bulk(
  1848. dev, dev->out_pipe,
  1849. param->length, param->iterations,
  1850. 0, "test17");
  1851. break;
  1852. case 18:
  1853. if (dev->in_pipe == 0)
  1854. break;
  1855. dev_info(&intf->dev,
  1856. "TEST 18: read odd addr %d bytes %u times core map\n",
  1857. param->length, param->iterations);
  1858. retval = test_unaligned_bulk(
  1859. dev, dev->in_pipe,
  1860. param->length, param->iterations,
  1861. 0, "test18");
  1862. break;
  1863. /* Tests for bulk I/O using premapped coherent buffer and odd address */
  1864. case 19:
  1865. if (dev->out_pipe == 0)
  1866. break;
  1867. dev_info(&intf->dev,
  1868. "TEST 19: write odd addr %d bytes %u times premapped\n",
  1869. param->length, param->iterations);
  1870. retval = test_unaligned_bulk(
  1871. dev, dev->out_pipe,
  1872. param->length, param->iterations,
  1873. URB_NO_TRANSFER_DMA_MAP, "test19");
  1874. break;
  1875. case 20:
  1876. if (dev->in_pipe == 0)
  1877. break;
  1878. dev_info(&intf->dev,
  1879. "TEST 20: read odd addr %d bytes %u times premapped\n",
  1880. param->length, param->iterations);
  1881. retval = test_unaligned_bulk(
  1882. dev, dev->in_pipe,
  1883. param->length, param->iterations,
  1884. URB_NO_TRANSFER_DMA_MAP, "test20");
  1885. break;
  1886. /* control write tests with unaligned buffer */
  1887. case 21:
  1888. if (!dev->info->ctrl_out)
  1889. break;
  1890. dev_info(&intf->dev,
  1891. "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
  1892. param->iterations,
  1893. realworld ? 1 : 0, param->length,
  1894. param->vary);
  1895. retval = ctrl_out(dev, param->iterations,
  1896. param->length, param->vary, 1);
  1897. break;
  1898. /* unaligned iso tests */
  1899. case 22:
  1900. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1901. break;
  1902. dev_info(&intf->dev,
  1903. "TEST 22: write %d iso odd, %d entries of %d bytes\n",
  1904. param->iterations,
  1905. param->sglen, param->length);
  1906. retval = test_iso_queue(dev, param,
  1907. dev->out_iso_pipe, dev->iso_out, 1);
  1908. break;
  1909. case 23:
  1910. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1911. break;
  1912. dev_info(&intf->dev,
  1913. "TEST 23: read %d iso odd, %d entries of %d bytes\n",
  1914. param->iterations,
  1915. param->sglen, param->length);
  1916. retval = test_iso_queue(dev, param,
  1917. dev->in_iso_pipe, dev->iso_in, 1);
  1918. break;
  1919. /* unlink URBs from a bulk-OUT queue */
  1920. case 24:
  1921. if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
  1922. break;
  1923. retval = 0;
  1924. dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
  1925. "%d %d-byte writes\n",
  1926. param->iterations, param->sglen, param->length);
  1927. for (i = param->iterations; retval == 0 && i > 0; --i) {
  1928. retval = unlink_queued(dev, dev->out_pipe,
  1929. param->sglen, param->length);
  1930. if (retval) {
  1931. dev_err(&intf->dev,
  1932. "unlink queued writes failed %d, "
  1933. "iterations left %d\n", retval, i);
  1934. break;
  1935. }
  1936. }
  1937. break;
  1938. }
  1939. do_gettimeofday(&param->duration);
  1940. param->duration.tv_sec -= start.tv_sec;
  1941. param->duration.tv_usec -= start.tv_usec;
  1942. if (param->duration.tv_usec < 0) {
  1943. param->duration.tv_usec += 1000 * 1000;
  1944. param->duration.tv_sec -= 1;
  1945. }
  1946. mutex_unlock(&dev->lock);
  1947. return retval;
  1948. }
  1949. /*-------------------------------------------------------------------------*/
  1950. static unsigned force_interrupt;
  1951. module_param(force_interrupt, uint, 0);
  1952. MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
  1953. #ifdef GENERIC
  1954. static unsigned short vendor;
  1955. module_param(vendor, ushort, 0);
  1956. MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
  1957. static unsigned short product;
  1958. module_param(product, ushort, 0);
  1959. MODULE_PARM_DESC(product, "product code (from vendor)");
  1960. #endif
  1961. static int
  1962. usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
  1963. {
  1964. struct usb_device *udev;
  1965. struct usbtest_dev *dev;
  1966. struct usbtest_info *info;
  1967. char *rtest, *wtest;
  1968. char *irtest, *iwtest;
  1969. udev = interface_to_usbdev(intf);
  1970. #ifdef GENERIC
  1971. /* specify devices by module parameters? */
  1972. if (id->match_flags == 0) {
  1973. /* vendor match required, product match optional */
  1974. if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
  1975. return -ENODEV;
  1976. if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
  1977. return -ENODEV;
  1978. dev_info(&intf->dev, "matched module params, "
  1979. "vend=0x%04x prod=0x%04x\n",
  1980. le16_to_cpu(udev->descriptor.idVendor),
  1981. le16_to_cpu(udev->descriptor.idProduct));
  1982. }
  1983. #endif
  1984. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1985. if (!dev)
  1986. return -ENOMEM;
  1987. info = (struct usbtest_info *) id->driver_info;
  1988. dev->info = info;
  1989. mutex_init(&dev->lock);
  1990. dev->intf = intf;
  1991. /* cacheline-aligned scratch for i/o */
  1992. dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
  1993. if (dev->buf == NULL) {
  1994. kfree(dev);
  1995. return -ENOMEM;
  1996. }
  1997. /* NOTE this doesn't yet test the handful of difference that are
  1998. * visible with high speed interrupts: bigger maxpacket (1K) and
  1999. * "high bandwidth" modes (up to 3 packets/uframe).
  2000. */
  2001. rtest = wtest = "";
  2002. irtest = iwtest = "";
  2003. if (force_interrupt || udev->speed == USB_SPEED_LOW) {
  2004. if (info->ep_in) {
  2005. dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
  2006. rtest = " intr-in";
  2007. }
  2008. if (info->ep_out) {
  2009. dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
  2010. wtest = " intr-out";
  2011. }
  2012. } else {
  2013. if (info->autoconf) {
  2014. int status;
  2015. status = get_endpoints(dev, intf);
  2016. if (status < 0) {
  2017. WARNING(dev, "couldn't get endpoints, %d\n",
  2018. status);
  2019. kfree(dev->buf);
  2020. kfree(dev);
  2021. return status;
  2022. }
  2023. /* may find bulk or ISO pipes */
  2024. } else {
  2025. if (info->ep_in)
  2026. dev->in_pipe = usb_rcvbulkpipe(udev,
  2027. info->ep_in);
  2028. if (info->ep_out)
  2029. dev->out_pipe = usb_sndbulkpipe(udev,
  2030. info->ep_out);
  2031. }
  2032. if (dev->in_pipe)
  2033. rtest = " bulk-in";
  2034. if (dev->out_pipe)
  2035. wtest = " bulk-out";
  2036. if (dev->in_iso_pipe)
  2037. irtest = " iso-in";
  2038. if (dev->out_iso_pipe)
  2039. iwtest = " iso-out";
  2040. }
  2041. usb_set_intfdata(intf, dev);
  2042. dev_info(&intf->dev, "%s\n", info->name);
  2043. dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
  2044. usb_speed_string(udev->speed),
  2045. info->ctrl_out ? " in/out" : "",
  2046. rtest, wtest,
  2047. irtest, iwtest,
  2048. info->alt >= 0 ? " (+alt)" : "");
  2049. return 0;
  2050. }
  2051. static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
  2052. {
  2053. return 0;
  2054. }
  2055. static int usbtest_resume(struct usb_interface *intf)
  2056. {
  2057. return 0;
  2058. }
  2059. static void usbtest_disconnect(struct usb_interface *intf)
  2060. {
  2061. struct usbtest_dev *dev = usb_get_intfdata(intf);
  2062. usb_set_intfdata(intf, NULL);
  2063. dev_dbg(&intf->dev, "disconnect\n");
  2064. kfree(dev);
  2065. }
  2066. /* Basic testing only needs a device that can source or sink bulk traffic.
  2067. * Any device can test control transfers (default with GENERIC binding).
  2068. *
  2069. * Several entries work with the default EP0 implementation that's built
  2070. * into EZ-USB chips. There's a default vendor ID which can be overridden
  2071. * by (very) small config EEPROMS, but otherwise all these devices act
  2072. * identically until firmware is loaded: only EP0 works. It turns out
  2073. * to be easy to make other endpoints work, without modifying that EP0
  2074. * behavior. For now, we expect that kind of firmware.
  2075. */
  2076. /* an21xx or fx versions of ez-usb */
  2077. static struct usbtest_info ez1_info = {
  2078. .name = "EZ-USB device",
  2079. .ep_in = 2,
  2080. .ep_out = 2,
  2081. .alt = 1,
  2082. };
  2083. /* fx2 version of ez-usb */
  2084. static struct usbtest_info ez2_info = {
  2085. .name = "FX2 device",
  2086. .ep_in = 6,
  2087. .ep_out = 2,
  2088. .alt = 1,
  2089. };
  2090. /* ezusb family device with dedicated usb test firmware,
  2091. */
  2092. static struct usbtest_info fw_info = {
  2093. .name = "usb test device",
  2094. .ep_in = 2,
  2095. .ep_out = 2,
  2096. .alt = 1,
  2097. .autoconf = 1, /* iso and ctrl_out need autoconf */
  2098. .ctrl_out = 1,
  2099. .iso = 1, /* iso_ep's are #8 in/out */
  2100. };
  2101. /* peripheral running Linux and 'zero.c' test firmware, or
  2102. * its user-mode cousin. different versions of this use
  2103. * different hardware with the same vendor/product codes.
  2104. * host side MUST rely on the endpoint descriptors.
  2105. */
  2106. static struct usbtest_info gz_info = {
  2107. .name = "Linux gadget zero",
  2108. .autoconf = 1,
  2109. .ctrl_out = 1,
  2110. .alt = 0,
  2111. };
  2112. static struct usbtest_info um_info = {
  2113. .name = "Linux user mode test driver",
  2114. .autoconf = 1,
  2115. .alt = -1,
  2116. };
  2117. static struct usbtest_info um2_info = {
  2118. .name = "Linux user mode ISO test driver",
  2119. .autoconf = 1,
  2120. .iso = 1,
  2121. .alt = -1,
  2122. };
  2123. #ifdef IBOT2
  2124. /* this is a nice source of high speed bulk data;
  2125. * uses an FX2, with firmware provided in the device
  2126. */
  2127. static struct usbtest_info ibot2_info = {
  2128. .name = "iBOT2 webcam",
  2129. .ep_in = 2,
  2130. .alt = -1,
  2131. };
  2132. #endif
  2133. #ifdef GENERIC
  2134. /* we can use any device to test control traffic */
  2135. static struct usbtest_info generic_info = {
  2136. .name = "Generic USB device",
  2137. .alt = -1,
  2138. };
  2139. #endif
  2140. static const struct usb_device_id id_table[] = {
  2141. /*-------------------------------------------------------------*/
  2142. /* EZ-USB devices which download firmware to replace (or in our
  2143. * case augment) the default device implementation.
  2144. */
  2145. /* generic EZ-USB FX controller */
  2146. { USB_DEVICE(0x0547, 0x2235),
  2147. .driver_info = (unsigned long) &ez1_info,
  2148. },
  2149. /* CY3671 development board with EZ-USB FX */
  2150. { USB_DEVICE(0x0547, 0x0080),
  2151. .driver_info = (unsigned long) &ez1_info,
  2152. },
  2153. /* generic EZ-USB FX2 controller (or development board) */
  2154. { USB_DEVICE(0x04b4, 0x8613),
  2155. .driver_info = (unsigned long) &ez2_info,
  2156. },
  2157. /* re-enumerated usb test device firmware */
  2158. { USB_DEVICE(0xfff0, 0xfff0),
  2159. .driver_info = (unsigned long) &fw_info,
  2160. },
  2161. /* "Gadget Zero" firmware runs under Linux */
  2162. { USB_DEVICE(0x0525, 0xa4a0),
  2163. .driver_info = (unsigned long) &gz_info,
  2164. },
  2165. /* so does a user-mode variant */
  2166. { USB_DEVICE(0x0525, 0xa4a4),
  2167. .driver_info = (unsigned long) &um_info,
  2168. },
  2169. /* ... and a user-mode variant that talks iso */
  2170. { USB_DEVICE(0x0525, 0xa4a3),
  2171. .driver_info = (unsigned long) &um2_info,
  2172. },
  2173. #ifdef KEYSPAN_19Qi
  2174. /* Keyspan 19qi uses an21xx (original EZ-USB) */
  2175. /* this does not coexist with the real Keyspan 19qi driver! */
  2176. { USB_DEVICE(0x06cd, 0x010b),
  2177. .driver_info = (unsigned long) &ez1_info,
  2178. },
  2179. #endif
  2180. /*-------------------------------------------------------------*/
  2181. #ifdef IBOT2
  2182. /* iBOT2 makes a nice source of high speed bulk-in data */
  2183. /* this does not coexist with a real iBOT2 driver! */
  2184. { USB_DEVICE(0x0b62, 0x0059),
  2185. .driver_info = (unsigned long) &ibot2_info,
  2186. },
  2187. #endif
  2188. /*-------------------------------------------------------------*/
  2189. #ifdef GENERIC
  2190. /* module params can specify devices to use for control tests */
  2191. { .driver_info = (unsigned long) &generic_info, },
  2192. #endif
  2193. /*-------------------------------------------------------------*/
  2194. { }
  2195. };
  2196. MODULE_DEVICE_TABLE(usb, id_table);
  2197. static struct usb_driver usbtest_driver = {
  2198. .name = "usbtest",
  2199. .id_table = id_table,
  2200. .probe = usbtest_probe,
  2201. .unlocked_ioctl = usbtest_ioctl,
  2202. .disconnect = usbtest_disconnect,
  2203. .suspend = usbtest_suspend,
  2204. .resume = usbtest_resume,
  2205. };
  2206. /*-------------------------------------------------------------------------*/
  2207. static int __init usbtest_init(void)
  2208. {
  2209. #ifdef GENERIC
  2210. if (vendor)
  2211. pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
  2212. #endif
  2213. return usb_register(&usbtest_driver);
  2214. }
  2215. module_init(usbtest_init);
  2216. static void __exit usbtest_exit(void)
  2217. {
  2218. usb_deregister(&usbtest_driver);
  2219. }
  2220. module_exit(usbtest_exit);
  2221. MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
  2222. MODULE_LICENSE("GPL");