usbtest.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/moduleparam.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/mutex.h>
  10. #include <linux/usb.h>
  11. /*-------------------------------------------------------------------------*/
  12. /* FIXME make these public somewhere; usbdevfs.h? */
  13. struct usbtest_param {
  14. /* inputs */
  15. unsigned test_num; /* 0..(TEST_CASES-1) */
  16. unsigned iterations;
  17. unsigned length;
  18. unsigned vary;
  19. unsigned sglen;
  20. /* outputs */
  21. struct timeval duration;
  22. };
  23. #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
  24. /*-------------------------------------------------------------------------*/
  25. #define GENERIC /* let probe() bind using module params */
  26. /* Some devices that can be used for testing will have "real" drivers.
  27. * Entries for those need to be enabled here by hand, after disabling
  28. * that "real" driver.
  29. */
  30. //#define IBOT2 /* grab iBOT2 webcams */
  31. //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
  32. /*-------------------------------------------------------------------------*/
  33. struct usbtest_info {
  34. const char *name;
  35. u8 ep_in; /* bulk/intr source */
  36. u8 ep_out; /* bulk/intr sink */
  37. unsigned autoconf:1;
  38. unsigned ctrl_out:1;
  39. unsigned iso:1; /* try iso in/out */
  40. int alt;
  41. };
  42. /* this is accessed only through usbfs ioctl calls.
  43. * one ioctl to issue a test ... one lock per device.
  44. * tests create other threads if they need them.
  45. * urbs and buffers are allocated dynamically,
  46. * and data generated deterministically.
  47. */
  48. struct usbtest_dev {
  49. struct usb_interface *intf;
  50. struct usbtest_info *info;
  51. int in_pipe;
  52. int out_pipe;
  53. int in_iso_pipe;
  54. int out_iso_pipe;
  55. struct usb_endpoint_descriptor *iso_in, *iso_out;
  56. struct mutex lock;
  57. #define TBUF_SIZE 256
  58. u8 *buf;
  59. };
  60. static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
  61. {
  62. return interface_to_usbdev(test->intf);
  63. }
  64. /* set up all urbs so they can be used with either bulk or interrupt */
  65. #define INTERRUPT_RATE 1 /* msec/transfer */
  66. #define ERROR(tdev, fmt, args...) \
  67. dev_err(&(tdev)->intf->dev , fmt , ## args)
  68. #define WARNING(tdev, fmt, args...) \
  69. dev_warn(&(tdev)->intf->dev , fmt , ## args)
  70. #define GUARD_BYTE 0xA5
  71. /*-------------------------------------------------------------------------*/
  72. static int
  73. get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
  74. {
  75. int tmp;
  76. struct usb_host_interface *alt;
  77. struct usb_host_endpoint *in, *out;
  78. struct usb_host_endpoint *iso_in, *iso_out;
  79. struct usb_device *udev;
  80. for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
  81. unsigned ep;
  82. in = out = NULL;
  83. iso_in = iso_out = NULL;
  84. alt = intf->altsetting + tmp;
  85. /* take the first altsetting with in-bulk + out-bulk;
  86. * ignore other endpoints and altsettings.
  87. */
  88. for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
  89. struct usb_host_endpoint *e;
  90. e = alt->endpoint + ep;
  91. switch (e->desc.bmAttributes) {
  92. case USB_ENDPOINT_XFER_BULK:
  93. break;
  94. case USB_ENDPOINT_XFER_ISOC:
  95. if (dev->info->iso)
  96. goto try_iso;
  97. /* FALLTHROUGH */
  98. default:
  99. continue;
  100. }
  101. if (usb_endpoint_dir_in(&e->desc)) {
  102. if (!in)
  103. in = e;
  104. } else {
  105. if (!out)
  106. out = e;
  107. }
  108. continue;
  109. try_iso:
  110. if (usb_endpoint_dir_in(&e->desc)) {
  111. if (!iso_in)
  112. iso_in = e;
  113. } else {
  114. if (!iso_out)
  115. iso_out = e;
  116. }
  117. }
  118. if ((in && out) || iso_in || iso_out)
  119. goto found;
  120. }
  121. return -EINVAL;
  122. found:
  123. udev = testdev_to_usbdev(dev);
  124. if (alt->desc.bAlternateSetting != 0) {
  125. tmp = usb_set_interface(udev,
  126. alt->desc.bInterfaceNumber,
  127. alt->desc.bAlternateSetting);
  128. if (tmp < 0)
  129. return tmp;
  130. }
  131. if (in) {
  132. dev->in_pipe = usb_rcvbulkpipe(udev,
  133. in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  134. dev->out_pipe = usb_sndbulkpipe(udev,
  135. out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  136. }
  137. if (iso_in) {
  138. dev->iso_in = &iso_in->desc;
  139. dev->in_iso_pipe = usb_rcvisocpipe(udev,
  140. iso_in->desc.bEndpointAddress
  141. & USB_ENDPOINT_NUMBER_MASK);
  142. }
  143. if (iso_out) {
  144. dev->iso_out = &iso_out->desc;
  145. dev->out_iso_pipe = usb_sndisocpipe(udev,
  146. iso_out->desc.bEndpointAddress
  147. & USB_ENDPOINT_NUMBER_MASK);
  148. }
  149. return 0;
  150. }
  151. /*-------------------------------------------------------------------------*/
  152. /* Support for testing basic non-queued I/O streams.
  153. *
  154. * These just package urbs as requests that can be easily canceled.
  155. * Each urb's data buffer is dynamically allocated; callers can fill
  156. * them with non-zero test data (or test for it) when appropriate.
  157. */
  158. static void simple_callback(struct urb *urb)
  159. {
  160. complete(urb->context);
  161. }
  162. static struct urb *usbtest_alloc_urb(
  163. struct usb_device *udev,
  164. int pipe,
  165. unsigned long bytes,
  166. unsigned transfer_flags,
  167. unsigned offset)
  168. {
  169. struct urb *urb;
  170. urb = usb_alloc_urb(0, GFP_KERNEL);
  171. if (!urb)
  172. return urb;
  173. usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
  174. urb->interval = (udev->speed == USB_SPEED_HIGH)
  175. ? (INTERRUPT_RATE << 3)
  176. : INTERRUPT_RATE;
  177. urb->transfer_flags = transfer_flags;
  178. if (usb_pipein(pipe))
  179. urb->transfer_flags |= URB_SHORT_NOT_OK;
  180. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  181. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  182. GFP_KERNEL, &urb->transfer_dma);
  183. else
  184. urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
  185. if (!urb->transfer_buffer) {
  186. usb_free_urb(urb);
  187. return NULL;
  188. }
  189. /* To test unaligned transfers add an offset and fill the
  190. unused memory with a guard value */
  191. if (offset) {
  192. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  193. urb->transfer_buffer += offset;
  194. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  195. urb->transfer_dma += offset;
  196. }
  197. /* For inbound transfers use guard byte so that test fails if
  198. data not correctly copied */
  199. memset(urb->transfer_buffer,
  200. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  201. bytes);
  202. return urb;
  203. }
  204. static struct urb *simple_alloc_urb(
  205. struct usb_device *udev,
  206. int pipe,
  207. unsigned long bytes)
  208. {
  209. return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
  210. }
  211. static unsigned pattern;
  212. static unsigned mod_pattern;
  213. module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
  214. MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
  215. static inline void simple_fill_buf(struct urb *urb)
  216. {
  217. unsigned i;
  218. u8 *buf = urb->transfer_buffer;
  219. unsigned len = urb->transfer_buffer_length;
  220. switch (pattern) {
  221. default:
  222. /* FALLTHROUGH */
  223. case 0:
  224. memset(buf, 0, len);
  225. break;
  226. case 1: /* mod63 */
  227. for (i = 0; i < len; i++)
  228. *buf++ = (u8) (i % 63);
  229. break;
  230. }
  231. }
  232. static inline unsigned long buffer_offset(void *buf)
  233. {
  234. return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
  235. }
  236. static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
  237. {
  238. u8 *buf = urb->transfer_buffer;
  239. u8 *guard = buf - buffer_offset(buf);
  240. unsigned i;
  241. for (i = 0; guard < buf; i++, guard++) {
  242. if (*guard != GUARD_BYTE) {
  243. ERROR(tdev, "guard byte[%d] %d (not %d)\n",
  244. i, *guard, GUARD_BYTE);
  245. return -EINVAL;
  246. }
  247. }
  248. return 0;
  249. }
  250. static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
  251. {
  252. unsigned i;
  253. u8 expected;
  254. u8 *buf = urb->transfer_buffer;
  255. unsigned len = urb->actual_length;
  256. int ret = check_guard_bytes(tdev, urb);
  257. if (ret)
  258. return ret;
  259. for (i = 0; i < len; i++, buf++) {
  260. switch (pattern) {
  261. /* all-zeroes has no synchronization issues */
  262. case 0:
  263. expected = 0;
  264. break;
  265. /* mod63 stays in sync with short-terminated transfers,
  266. * or otherwise when host and gadget agree on how large
  267. * each usb transfer request should be. resync is done
  268. * with set_interface or set_config.
  269. */
  270. case 1: /* mod63 */
  271. expected = i % 63;
  272. break;
  273. /* always fail unsupported patterns */
  274. default:
  275. expected = !*buf;
  276. break;
  277. }
  278. if (*buf == expected)
  279. continue;
  280. ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
  281. return -EINVAL;
  282. }
  283. return 0;
  284. }
  285. static void simple_free_urb(struct urb *urb)
  286. {
  287. unsigned long offset = buffer_offset(urb->transfer_buffer);
  288. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  289. usb_free_coherent(
  290. urb->dev,
  291. urb->transfer_buffer_length + offset,
  292. urb->transfer_buffer - offset,
  293. urb->transfer_dma - offset);
  294. else
  295. kfree(urb->transfer_buffer - offset);
  296. usb_free_urb(urb);
  297. }
  298. static int simple_io(
  299. struct usbtest_dev *tdev,
  300. struct urb *urb,
  301. int iterations,
  302. int vary,
  303. int expected,
  304. const char *label
  305. )
  306. {
  307. struct usb_device *udev = urb->dev;
  308. int max = urb->transfer_buffer_length;
  309. struct completion completion;
  310. int retval = 0;
  311. urb->context = &completion;
  312. while (retval == 0 && iterations-- > 0) {
  313. init_completion(&completion);
  314. if (usb_pipeout(urb->pipe)) {
  315. simple_fill_buf(urb);
  316. urb->transfer_flags |= URB_ZERO_PACKET;
  317. }
  318. retval = usb_submit_urb(urb, GFP_KERNEL);
  319. if (retval != 0)
  320. break;
  321. /* NOTE: no timeouts; can't be broken out of by interrupt */
  322. wait_for_completion(&completion);
  323. retval = urb->status;
  324. urb->dev = udev;
  325. if (retval == 0 && usb_pipein(urb->pipe))
  326. retval = simple_check_buf(tdev, urb);
  327. if (vary) {
  328. int len = urb->transfer_buffer_length;
  329. len += vary;
  330. len %= max;
  331. if (len == 0)
  332. len = (vary < max) ? vary : max;
  333. urb->transfer_buffer_length = len;
  334. }
  335. /* FIXME if endpoint halted, clear halt (and log) */
  336. }
  337. urb->transfer_buffer_length = max;
  338. if (expected != retval)
  339. dev_err(&udev->dev,
  340. "%s failed, iterations left %d, status %d (not %d)\n",
  341. label, iterations, retval, expected);
  342. return retval;
  343. }
  344. /*-------------------------------------------------------------------------*/
  345. /* We use scatterlist primitives to test queued I/O.
  346. * Yes, this also tests the scatterlist primitives.
  347. */
  348. static void free_sglist(struct scatterlist *sg, int nents)
  349. {
  350. unsigned i;
  351. if (!sg)
  352. return;
  353. for (i = 0; i < nents; i++) {
  354. if (!sg_page(&sg[i]))
  355. continue;
  356. kfree(sg_virt(&sg[i]));
  357. }
  358. kfree(sg);
  359. }
  360. static struct scatterlist *
  361. alloc_sglist(int nents, int max, int vary)
  362. {
  363. struct scatterlist *sg;
  364. unsigned i;
  365. unsigned size = max;
  366. if (max == 0)
  367. return NULL;
  368. sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
  369. if (!sg)
  370. return NULL;
  371. sg_init_table(sg, nents);
  372. for (i = 0; i < nents; i++) {
  373. char *buf;
  374. unsigned j;
  375. buf = kzalloc(size, GFP_KERNEL);
  376. if (!buf) {
  377. free_sglist(sg, i);
  378. return NULL;
  379. }
  380. /* kmalloc pages are always physically contiguous! */
  381. sg_set_buf(&sg[i], buf, size);
  382. switch (pattern) {
  383. case 0:
  384. /* already zeroed */
  385. break;
  386. case 1:
  387. for (j = 0; j < size; j++)
  388. *buf++ = (u8) (j % 63);
  389. break;
  390. }
  391. if (vary) {
  392. size += vary;
  393. size %= max;
  394. if (size == 0)
  395. size = (vary < max) ? vary : max;
  396. }
  397. }
  398. return sg;
  399. }
  400. static int perform_sglist(
  401. struct usbtest_dev *tdev,
  402. unsigned iterations,
  403. int pipe,
  404. struct usb_sg_request *req,
  405. struct scatterlist *sg,
  406. int nents
  407. )
  408. {
  409. struct usb_device *udev = testdev_to_usbdev(tdev);
  410. int retval = 0;
  411. while (retval == 0 && iterations-- > 0) {
  412. retval = usb_sg_init(req, udev, pipe,
  413. (udev->speed == USB_SPEED_HIGH)
  414. ? (INTERRUPT_RATE << 3)
  415. : INTERRUPT_RATE,
  416. sg, nents, 0, GFP_KERNEL);
  417. if (retval)
  418. break;
  419. usb_sg_wait(req);
  420. retval = req->status;
  421. /* FIXME check resulting data pattern */
  422. /* FIXME if endpoint halted, clear halt (and log) */
  423. }
  424. /* FIXME for unlink or fault handling tests, don't report
  425. * failure if retval is as we expected ...
  426. */
  427. if (retval)
  428. ERROR(tdev, "perform_sglist failed, "
  429. "iterations left %d, status %d\n",
  430. iterations, retval);
  431. return retval;
  432. }
  433. /*-------------------------------------------------------------------------*/
  434. /* unqueued control message testing
  435. *
  436. * there's a nice set of device functional requirements in chapter 9 of the
  437. * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
  438. * special test firmware.
  439. *
  440. * we know the device is configured (or suspended) by the time it's visible
  441. * through usbfs. we can't change that, so we won't test enumeration (which
  442. * worked 'well enough' to get here, this time), power management (ditto),
  443. * or remote wakeup (which needs human interaction).
  444. */
  445. static unsigned realworld = 1;
  446. module_param(realworld, uint, 0);
  447. MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
  448. static int get_altsetting(struct usbtest_dev *dev)
  449. {
  450. struct usb_interface *iface = dev->intf;
  451. struct usb_device *udev = interface_to_usbdev(iface);
  452. int retval;
  453. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  454. USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
  455. 0, iface->altsetting[0].desc.bInterfaceNumber,
  456. dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  457. switch (retval) {
  458. case 1:
  459. return dev->buf[0];
  460. case 0:
  461. retval = -ERANGE;
  462. /* FALLTHROUGH */
  463. default:
  464. return retval;
  465. }
  466. }
  467. static int set_altsetting(struct usbtest_dev *dev, int alternate)
  468. {
  469. struct usb_interface *iface = dev->intf;
  470. struct usb_device *udev;
  471. if (alternate < 0 || alternate >= 256)
  472. return -EINVAL;
  473. udev = interface_to_usbdev(iface);
  474. return usb_set_interface(udev,
  475. iface->altsetting[0].desc.bInterfaceNumber,
  476. alternate);
  477. }
  478. static int is_good_config(struct usbtest_dev *tdev, int len)
  479. {
  480. struct usb_config_descriptor *config;
  481. if (len < sizeof *config)
  482. return 0;
  483. config = (struct usb_config_descriptor *) tdev->buf;
  484. switch (config->bDescriptorType) {
  485. case USB_DT_CONFIG:
  486. case USB_DT_OTHER_SPEED_CONFIG:
  487. if (config->bLength != 9) {
  488. ERROR(tdev, "bogus config descriptor length\n");
  489. return 0;
  490. }
  491. /* this bit 'must be 1' but often isn't */
  492. if (!realworld && !(config->bmAttributes & 0x80)) {
  493. ERROR(tdev, "high bit of config attributes not set\n");
  494. return 0;
  495. }
  496. if (config->bmAttributes & 0x1f) { /* reserved == 0 */
  497. ERROR(tdev, "reserved config bits set\n");
  498. return 0;
  499. }
  500. break;
  501. default:
  502. return 0;
  503. }
  504. if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
  505. return 1;
  506. if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
  507. return 1;
  508. ERROR(tdev, "bogus config descriptor read size\n");
  509. return 0;
  510. }
  511. /* sanity test for standard requests working with usb_control_mesg() and some
  512. * of the utility functions which use it.
  513. *
  514. * this doesn't test how endpoint halts behave or data toggles get set, since
  515. * we won't do I/O to bulk/interrupt endpoints here (which is how to change
  516. * halt or toggle). toggle testing is impractical without support from hcds.
  517. *
  518. * this avoids failing devices linux would normally work with, by not testing
  519. * config/altsetting operations for devices that only support their defaults.
  520. * such devices rarely support those needless operations.
  521. *
  522. * NOTE that since this is a sanity test, it's not examining boundary cases
  523. * to see if usbcore, hcd, and device all behave right. such testing would
  524. * involve varied read sizes and other operation sequences.
  525. */
  526. static int ch9_postconfig(struct usbtest_dev *dev)
  527. {
  528. struct usb_interface *iface = dev->intf;
  529. struct usb_device *udev = interface_to_usbdev(iface);
  530. int i, alt, retval;
  531. /* [9.2.3] if there's more than one altsetting, we need to be able to
  532. * set and get each one. mostly trusts the descriptors from usbcore.
  533. */
  534. for (i = 0; i < iface->num_altsetting; i++) {
  535. /* 9.2.3 constrains the range here */
  536. alt = iface->altsetting[i].desc.bAlternateSetting;
  537. if (alt < 0 || alt >= iface->num_altsetting) {
  538. dev_err(&iface->dev,
  539. "invalid alt [%d].bAltSetting = %d\n",
  540. i, alt);
  541. }
  542. /* [real world] get/set unimplemented if there's only one */
  543. if (realworld && iface->num_altsetting == 1)
  544. continue;
  545. /* [9.4.10] set_interface */
  546. retval = set_altsetting(dev, alt);
  547. if (retval) {
  548. dev_err(&iface->dev, "can't set_interface = %d, %d\n",
  549. alt, retval);
  550. return retval;
  551. }
  552. /* [9.4.4] get_interface always works */
  553. retval = get_altsetting(dev);
  554. if (retval != alt) {
  555. dev_err(&iface->dev, "get alt should be %d, was %d\n",
  556. alt, retval);
  557. return (retval < 0) ? retval : -EDOM;
  558. }
  559. }
  560. /* [real world] get_config unimplemented if there's only one */
  561. if (!realworld || udev->descriptor.bNumConfigurations != 1) {
  562. int expected = udev->actconfig->desc.bConfigurationValue;
  563. /* [9.4.2] get_configuration always works
  564. * ... although some cheap devices (like one TI Hub I've got)
  565. * won't return config descriptors except before set_config.
  566. */
  567. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  568. USB_REQ_GET_CONFIGURATION,
  569. USB_DIR_IN | USB_RECIP_DEVICE,
  570. 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  571. if (retval != 1 || dev->buf[0] != expected) {
  572. dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
  573. retval, dev->buf[0], expected);
  574. return (retval < 0) ? retval : -EDOM;
  575. }
  576. }
  577. /* there's always [9.4.3] a device descriptor [9.6.1] */
  578. retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
  579. dev->buf, sizeof udev->descriptor);
  580. if (retval != sizeof udev->descriptor) {
  581. dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
  582. return (retval < 0) ? retval : -EDOM;
  583. }
  584. /* there's always [9.4.3] at least one config descriptor [9.6.3] */
  585. for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
  586. retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
  587. dev->buf, TBUF_SIZE);
  588. if (!is_good_config(dev, retval)) {
  589. dev_err(&iface->dev,
  590. "config [%d] descriptor --> %d\n",
  591. i, retval);
  592. return (retval < 0) ? retval : -EDOM;
  593. }
  594. /* FIXME cross-checking udev->config[i] to make sure usbcore
  595. * parsed it right (etc) would be good testing paranoia
  596. */
  597. }
  598. /* and sometimes [9.2.6.6] speed dependent descriptors */
  599. if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
  600. struct usb_qualifier_descriptor *d = NULL;
  601. /* device qualifier [9.6.2] */
  602. retval = usb_get_descriptor(udev,
  603. USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
  604. sizeof(struct usb_qualifier_descriptor));
  605. if (retval == -EPIPE) {
  606. if (udev->speed == USB_SPEED_HIGH) {
  607. dev_err(&iface->dev,
  608. "hs dev qualifier --> %d\n",
  609. retval);
  610. return (retval < 0) ? retval : -EDOM;
  611. }
  612. /* usb2.0 but not high-speed capable; fine */
  613. } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
  614. dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
  615. return (retval < 0) ? retval : -EDOM;
  616. } else
  617. d = (struct usb_qualifier_descriptor *) dev->buf;
  618. /* might not have [9.6.2] any other-speed configs [9.6.4] */
  619. if (d) {
  620. unsigned max = d->bNumConfigurations;
  621. for (i = 0; i < max; i++) {
  622. retval = usb_get_descriptor(udev,
  623. USB_DT_OTHER_SPEED_CONFIG, i,
  624. dev->buf, TBUF_SIZE);
  625. if (!is_good_config(dev, retval)) {
  626. dev_err(&iface->dev,
  627. "other speed config --> %d\n",
  628. retval);
  629. return (retval < 0) ? retval : -EDOM;
  630. }
  631. }
  632. }
  633. }
  634. /* FIXME fetch strings from at least the device descriptor */
  635. /* [9.4.5] get_status always works */
  636. retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
  637. if (retval != 2) {
  638. dev_err(&iface->dev, "get dev status --> %d\n", retval);
  639. return (retval < 0) ? retval : -EDOM;
  640. }
  641. /* FIXME configuration.bmAttributes says if we could try to set/clear
  642. * the device's remote wakeup feature ... if we can, test that here
  643. */
  644. retval = usb_get_status(udev, USB_RECIP_INTERFACE,
  645. iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
  646. if (retval != 2) {
  647. dev_err(&iface->dev, "get interface status --> %d\n", retval);
  648. return (retval < 0) ? retval : -EDOM;
  649. }
  650. /* FIXME get status for each endpoint in the interface */
  651. return 0;
  652. }
  653. /*-------------------------------------------------------------------------*/
  654. /* use ch9 requests to test whether:
  655. * (a) queues work for control, keeping N subtests queued and
  656. * active (auto-resubmit) for M loops through the queue.
  657. * (b) protocol stalls (control-only) will autorecover.
  658. * it's not like bulk/intr; no halt clearing.
  659. * (c) short control reads are reported and handled.
  660. * (d) queues are always processed in-order
  661. */
  662. struct ctrl_ctx {
  663. spinlock_t lock;
  664. struct usbtest_dev *dev;
  665. struct completion complete;
  666. unsigned count;
  667. unsigned pending;
  668. int status;
  669. struct urb **urb;
  670. struct usbtest_param *param;
  671. int last;
  672. };
  673. #define NUM_SUBCASES 15 /* how many test subcases here? */
  674. struct subcase {
  675. struct usb_ctrlrequest setup;
  676. int number;
  677. int expected;
  678. };
  679. static void ctrl_complete(struct urb *urb)
  680. {
  681. struct ctrl_ctx *ctx = urb->context;
  682. struct usb_ctrlrequest *reqp;
  683. struct subcase *subcase;
  684. int status = urb->status;
  685. reqp = (struct usb_ctrlrequest *)urb->setup_packet;
  686. subcase = container_of(reqp, struct subcase, setup);
  687. spin_lock(&ctx->lock);
  688. ctx->count--;
  689. ctx->pending--;
  690. /* queue must transfer and complete in fifo order, unless
  691. * usb_unlink_urb() is used to unlink something not at the
  692. * physical queue head (not tested).
  693. */
  694. if (subcase->number > 0) {
  695. if ((subcase->number - ctx->last) != 1) {
  696. ERROR(ctx->dev,
  697. "subcase %d completed out of order, last %d\n",
  698. subcase->number, ctx->last);
  699. status = -EDOM;
  700. ctx->last = subcase->number;
  701. goto error;
  702. }
  703. }
  704. ctx->last = subcase->number;
  705. /* succeed or fault in only one way? */
  706. if (status == subcase->expected)
  707. status = 0;
  708. /* async unlink for cleanup? */
  709. else if (status != -ECONNRESET) {
  710. /* some faults are allowed, not required */
  711. if (subcase->expected > 0 && (
  712. ((status == -subcase->expected /* happened */
  713. || status == 0)))) /* didn't */
  714. status = 0;
  715. /* sometimes more than one fault is allowed */
  716. else if (subcase->number == 12 && status == -EPIPE)
  717. status = 0;
  718. else
  719. ERROR(ctx->dev, "subtest %d error, status %d\n",
  720. subcase->number, status);
  721. }
  722. /* unexpected status codes mean errors; ideally, in hardware */
  723. if (status) {
  724. error:
  725. if (ctx->status == 0) {
  726. int i;
  727. ctx->status = status;
  728. ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
  729. "%d left, subcase %d, len %d/%d\n",
  730. reqp->bRequestType, reqp->bRequest,
  731. status, ctx->count, subcase->number,
  732. urb->actual_length,
  733. urb->transfer_buffer_length);
  734. /* FIXME this "unlink everything" exit route should
  735. * be a separate test case.
  736. */
  737. /* unlink whatever's still pending */
  738. for (i = 1; i < ctx->param->sglen; i++) {
  739. struct urb *u = ctx->urb[
  740. (i + subcase->number)
  741. % ctx->param->sglen];
  742. if (u == urb || !u->dev)
  743. continue;
  744. spin_unlock(&ctx->lock);
  745. status = usb_unlink_urb(u);
  746. spin_lock(&ctx->lock);
  747. switch (status) {
  748. case -EINPROGRESS:
  749. case -EBUSY:
  750. case -EIDRM:
  751. continue;
  752. default:
  753. ERROR(ctx->dev, "urb unlink --> %d\n",
  754. status);
  755. }
  756. }
  757. status = ctx->status;
  758. }
  759. }
  760. /* resubmit if we need to, else mark this as done */
  761. if ((status == 0) && (ctx->pending < ctx->count)) {
  762. status = usb_submit_urb(urb, GFP_ATOMIC);
  763. if (status != 0) {
  764. ERROR(ctx->dev,
  765. "can't resubmit ctrl %02x.%02x, err %d\n",
  766. reqp->bRequestType, reqp->bRequest, status);
  767. urb->dev = NULL;
  768. } else
  769. ctx->pending++;
  770. } else
  771. urb->dev = NULL;
  772. /* signal completion when nothing's queued */
  773. if (ctx->pending == 0)
  774. complete(&ctx->complete);
  775. spin_unlock(&ctx->lock);
  776. }
  777. static int
  778. test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
  779. {
  780. struct usb_device *udev = testdev_to_usbdev(dev);
  781. struct urb **urb;
  782. struct ctrl_ctx context;
  783. int i;
  784. if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
  785. return -EOPNOTSUPP;
  786. spin_lock_init(&context.lock);
  787. context.dev = dev;
  788. init_completion(&context.complete);
  789. context.count = param->sglen * param->iterations;
  790. context.pending = 0;
  791. context.status = -ENOMEM;
  792. context.param = param;
  793. context.last = -1;
  794. /* allocate and init the urbs we'll queue.
  795. * as with bulk/intr sglists, sglen is the queue depth; it also
  796. * controls which subtests run (more tests than sglen) or rerun.
  797. */
  798. urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
  799. if (!urb)
  800. return -ENOMEM;
  801. for (i = 0; i < param->sglen; i++) {
  802. int pipe = usb_rcvctrlpipe(udev, 0);
  803. unsigned len;
  804. struct urb *u;
  805. struct usb_ctrlrequest req;
  806. struct subcase *reqp;
  807. /* sign of this variable means:
  808. * -: tested code must return this (negative) error code
  809. * +: tested code may return this (negative too) error code
  810. */
  811. int expected = 0;
  812. /* requests here are mostly expected to succeed on any
  813. * device, but some are chosen to trigger protocol stalls
  814. * or short reads.
  815. */
  816. memset(&req, 0, sizeof req);
  817. req.bRequest = USB_REQ_GET_DESCRIPTOR;
  818. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  819. switch (i % NUM_SUBCASES) {
  820. case 0: /* get device descriptor */
  821. req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
  822. len = sizeof(struct usb_device_descriptor);
  823. break;
  824. case 1: /* get first config descriptor (only) */
  825. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  826. len = sizeof(struct usb_config_descriptor);
  827. break;
  828. case 2: /* get altsetting (OFTEN STALLS) */
  829. req.bRequest = USB_REQ_GET_INTERFACE;
  830. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  831. /* index = 0 means first interface */
  832. len = 1;
  833. expected = EPIPE;
  834. break;
  835. case 3: /* get interface status */
  836. req.bRequest = USB_REQ_GET_STATUS;
  837. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  838. /* interface 0 */
  839. len = 2;
  840. break;
  841. case 4: /* get device status */
  842. req.bRequest = USB_REQ_GET_STATUS;
  843. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  844. len = 2;
  845. break;
  846. case 5: /* get device qualifier (MAY STALL) */
  847. req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
  848. len = sizeof(struct usb_qualifier_descriptor);
  849. if (udev->speed != USB_SPEED_HIGH)
  850. expected = EPIPE;
  851. break;
  852. case 6: /* get first config descriptor, plus interface */
  853. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  854. len = sizeof(struct usb_config_descriptor);
  855. len += sizeof(struct usb_interface_descriptor);
  856. break;
  857. case 7: /* get interface descriptor (ALWAYS STALLS) */
  858. req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
  859. /* interface == 0 */
  860. len = sizeof(struct usb_interface_descriptor);
  861. expected = -EPIPE;
  862. break;
  863. /* NOTE: two consecutive stalls in the queue here.
  864. * that tests fault recovery a bit more aggressively. */
  865. case 8: /* clear endpoint halt (MAY STALL) */
  866. req.bRequest = USB_REQ_CLEAR_FEATURE;
  867. req.bRequestType = USB_RECIP_ENDPOINT;
  868. /* wValue 0 == ep halt */
  869. /* wIndex 0 == ep0 (shouldn't halt!) */
  870. len = 0;
  871. pipe = usb_sndctrlpipe(udev, 0);
  872. expected = EPIPE;
  873. break;
  874. case 9: /* get endpoint status */
  875. req.bRequest = USB_REQ_GET_STATUS;
  876. req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
  877. /* endpoint 0 */
  878. len = 2;
  879. break;
  880. case 10: /* trigger short read (EREMOTEIO) */
  881. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  882. len = 1024;
  883. expected = -EREMOTEIO;
  884. break;
  885. /* NOTE: two consecutive _different_ faults in the queue. */
  886. case 11: /* get endpoint descriptor (ALWAYS STALLS) */
  887. req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
  888. /* endpoint == 0 */
  889. len = sizeof(struct usb_interface_descriptor);
  890. expected = EPIPE;
  891. break;
  892. /* NOTE: sometimes even a third fault in the queue! */
  893. case 12: /* get string 0 descriptor (MAY STALL) */
  894. req.wValue = cpu_to_le16(USB_DT_STRING << 8);
  895. /* string == 0, for language IDs */
  896. len = sizeof(struct usb_interface_descriptor);
  897. /* may succeed when > 4 languages */
  898. expected = EREMOTEIO; /* or EPIPE, if no strings */
  899. break;
  900. case 13: /* short read, resembling case 10 */
  901. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  902. /* last data packet "should" be DATA1, not DATA0 */
  903. if (udev->speed == USB_SPEED_SUPER)
  904. len = 1024 - 512;
  905. else
  906. len = 1024 - udev->descriptor.bMaxPacketSize0;
  907. expected = -EREMOTEIO;
  908. break;
  909. case 14: /* short read; try to fill the last packet */
  910. req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
  911. /* device descriptor size == 18 bytes */
  912. len = udev->descriptor.bMaxPacketSize0;
  913. if (udev->speed == USB_SPEED_SUPER)
  914. len = 512;
  915. switch (len) {
  916. case 8:
  917. len = 24;
  918. break;
  919. case 16:
  920. len = 32;
  921. break;
  922. }
  923. expected = -EREMOTEIO;
  924. break;
  925. default:
  926. ERROR(dev, "bogus number of ctrl queue testcases!\n");
  927. context.status = -EINVAL;
  928. goto cleanup;
  929. }
  930. req.wLength = cpu_to_le16(len);
  931. urb[i] = u = simple_alloc_urb(udev, pipe, len);
  932. if (!u)
  933. goto cleanup;
  934. reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
  935. if (!reqp)
  936. goto cleanup;
  937. reqp->setup = req;
  938. reqp->number = i % NUM_SUBCASES;
  939. reqp->expected = expected;
  940. u->setup_packet = (char *) &reqp->setup;
  941. u->context = &context;
  942. u->complete = ctrl_complete;
  943. }
  944. /* queue the urbs */
  945. context.urb = urb;
  946. spin_lock_irq(&context.lock);
  947. for (i = 0; i < param->sglen; i++) {
  948. context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
  949. if (context.status != 0) {
  950. ERROR(dev, "can't submit urb[%d], status %d\n",
  951. i, context.status);
  952. context.count = context.pending;
  953. break;
  954. }
  955. context.pending++;
  956. }
  957. spin_unlock_irq(&context.lock);
  958. /* FIXME set timer and time out; provide a disconnect hook */
  959. /* wait for the last one to complete */
  960. if (context.pending > 0)
  961. wait_for_completion(&context.complete);
  962. cleanup:
  963. for (i = 0; i < param->sglen; i++) {
  964. if (!urb[i])
  965. continue;
  966. urb[i]->dev = udev;
  967. kfree(urb[i]->setup_packet);
  968. simple_free_urb(urb[i]);
  969. }
  970. kfree(urb);
  971. return context.status;
  972. }
  973. #undef NUM_SUBCASES
  974. /*-------------------------------------------------------------------------*/
  975. static void unlink1_callback(struct urb *urb)
  976. {
  977. int status = urb->status;
  978. /* we "know" -EPIPE (stall) never happens */
  979. if (!status)
  980. status = usb_submit_urb(urb, GFP_ATOMIC);
  981. if (status) {
  982. urb->status = status;
  983. complete(urb->context);
  984. }
  985. }
  986. static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
  987. {
  988. struct urb *urb;
  989. struct completion completion;
  990. int retval = 0;
  991. init_completion(&completion);
  992. urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
  993. if (!urb)
  994. return -ENOMEM;
  995. urb->context = &completion;
  996. urb->complete = unlink1_callback;
  997. /* keep the endpoint busy. there are lots of hc/hcd-internal
  998. * states, and testing should get to all of them over time.
  999. *
  1000. * FIXME want additional tests for when endpoint is STALLing
  1001. * due to errors, or is just NAKing requests.
  1002. */
  1003. retval = usb_submit_urb(urb, GFP_KERNEL);
  1004. if (retval != 0) {
  1005. dev_err(&dev->intf->dev, "submit fail %d\n", retval);
  1006. return retval;
  1007. }
  1008. /* unlinking that should always work. variable delay tests more
  1009. * hcd states and code paths, even with little other system load.
  1010. */
  1011. msleep(jiffies % (2 * INTERRUPT_RATE));
  1012. if (async) {
  1013. while (!completion_done(&completion)) {
  1014. retval = usb_unlink_urb(urb);
  1015. switch (retval) {
  1016. case -EBUSY:
  1017. case -EIDRM:
  1018. /* we can't unlink urbs while they're completing
  1019. * or if they've completed, and we haven't
  1020. * resubmitted. "normal" drivers would prevent
  1021. * resubmission, but since we're testing unlink
  1022. * paths, we can't.
  1023. */
  1024. ERROR(dev, "unlink retry\n");
  1025. continue;
  1026. case 0:
  1027. case -EINPROGRESS:
  1028. break;
  1029. default:
  1030. dev_err(&dev->intf->dev,
  1031. "unlink fail %d\n", retval);
  1032. return retval;
  1033. }
  1034. break;
  1035. }
  1036. } else
  1037. usb_kill_urb(urb);
  1038. wait_for_completion(&completion);
  1039. retval = urb->status;
  1040. simple_free_urb(urb);
  1041. if (async)
  1042. return (retval == -ECONNRESET) ? 0 : retval - 1000;
  1043. else
  1044. return (retval == -ENOENT || retval == -EPERM) ?
  1045. 0 : retval - 2000;
  1046. }
  1047. static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
  1048. {
  1049. int retval = 0;
  1050. /* test sync and async paths */
  1051. retval = unlink1(dev, pipe, len, 1);
  1052. if (!retval)
  1053. retval = unlink1(dev, pipe, len, 0);
  1054. return retval;
  1055. }
  1056. /*-------------------------------------------------------------------------*/
  1057. struct queued_ctx {
  1058. struct completion complete;
  1059. atomic_t pending;
  1060. unsigned num;
  1061. int status;
  1062. struct urb **urbs;
  1063. };
  1064. static void unlink_queued_callback(struct urb *urb)
  1065. {
  1066. int status = urb->status;
  1067. struct queued_ctx *ctx = urb->context;
  1068. if (ctx->status)
  1069. goto done;
  1070. if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
  1071. if (status == -ECONNRESET)
  1072. goto done;
  1073. /* What error should we report if the URB completed normally? */
  1074. }
  1075. if (status != 0)
  1076. ctx->status = status;
  1077. done:
  1078. if (atomic_dec_and_test(&ctx->pending))
  1079. complete(&ctx->complete);
  1080. }
  1081. static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
  1082. unsigned size)
  1083. {
  1084. struct queued_ctx ctx;
  1085. struct usb_device *udev = testdev_to_usbdev(dev);
  1086. void *buf;
  1087. dma_addr_t buf_dma;
  1088. int i;
  1089. int retval = -ENOMEM;
  1090. init_completion(&ctx.complete);
  1091. atomic_set(&ctx.pending, 1); /* One more than the actual value */
  1092. ctx.num = num;
  1093. ctx.status = 0;
  1094. buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
  1095. if (!buf)
  1096. return retval;
  1097. memset(buf, 0, size);
  1098. /* Allocate and init the urbs we'll queue */
  1099. ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
  1100. if (!ctx.urbs)
  1101. goto free_buf;
  1102. for (i = 0; i < num; i++) {
  1103. ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  1104. if (!ctx.urbs[i])
  1105. goto free_urbs;
  1106. usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
  1107. unlink_queued_callback, &ctx);
  1108. ctx.urbs[i]->transfer_dma = buf_dma;
  1109. ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
  1110. }
  1111. /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
  1112. for (i = 0; i < num; i++) {
  1113. atomic_inc(&ctx.pending);
  1114. retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
  1115. if (retval != 0) {
  1116. dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
  1117. i, retval);
  1118. atomic_dec(&ctx.pending);
  1119. ctx.status = retval;
  1120. break;
  1121. }
  1122. }
  1123. if (i == num) {
  1124. usb_unlink_urb(ctx.urbs[num - 4]);
  1125. usb_unlink_urb(ctx.urbs[num - 2]);
  1126. } else {
  1127. while (--i >= 0)
  1128. usb_unlink_urb(ctx.urbs[i]);
  1129. }
  1130. if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
  1131. complete(&ctx.complete);
  1132. wait_for_completion(&ctx.complete);
  1133. retval = ctx.status;
  1134. free_urbs:
  1135. for (i = 0; i < num; i++)
  1136. usb_free_urb(ctx.urbs[i]);
  1137. kfree(ctx.urbs);
  1138. free_buf:
  1139. usb_free_coherent(udev, size, buf, buf_dma);
  1140. return retval;
  1141. }
  1142. /*-------------------------------------------------------------------------*/
  1143. static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1144. {
  1145. int retval;
  1146. u16 status;
  1147. /* shouldn't look or act halted */
  1148. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1149. if (retval < 0) {
  1150. ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
  1151. ep, retval);
  1152. return retval;
  1153. }
  1154. if (status != 0) {
  1155. ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
  1156. return -EINVAL;
  1157. }
  1158. retval = simple_io(tdev, urb, 1, 0, 0, __func__);
  1159. if (retval != 0)
  1160. return -EINVAL;
  1161. return 0;
  1162. }
  1163. static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1164. {
  1165. int retval;
  1166. u16 status;
  1167. /* should look and act halted */
  1168. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1169. if (retval < 0) {
  1170. ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
  1171. ep, retval);
  1172. return retval;
  1173. }
  1174. le16_to_cpus(&status);
  1175. if (status != 1) {
  1176. ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
  1177. return -EINVAL;
  1178. }
  1179. retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
  1180. if (retval != -EPIPE)
  1181. return -EINVAL;
  1182. retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
  1183. if (retval != -EPIPE)
  1184. return -EINVAL;
  1185. return 0;
  1186. }
  1187. static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1188. {
  1189. int retval;
  1190. /* shouldn't look or act halted now */
  1191. retval = verify_not_halted(tdev, ep, urb);
  1192. if (retval < 0)
  1193. return retval;
  1194. /* set halt (protocol test only), verify it worked */
  1195. retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
  1196. USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
  1197. USB_ENDPOINT_HALT, ep,
  1198. NULL, 0, USB_CTRL_SET_TIMEOUT);
  1199. if (retval < 0) {
  1200. ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
  1201. return retval;
  1202. }
  1203. retval = verify_halted(tdev, ep, urb);
  1204. if (retval < 0)
  1205. return retval;
  1206. /* clear halt (tests API + protocol), verify it worked */
  1207. retval = usb_clear_halt(urb->dev, urb->pipe);
  1208. if (retval < 0) {
  1209. ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
  1210. return retval;
  1211. }
  1212. retval = verify_not_halted(tdev, ep, urb);
  1213. if (retval < 0)
  1214. return retval;
  1215. /* NOTE: could also verify SET_INTERFACE clear halts ... */
  1216. return 0;
  1217. }
  1218. static int halt_simple(struct usbtest_dev *dev)
  1219. {
  1220. int ep;
  1221. int retval = 0;
  1222. struct urb *urb;
  1223. struct usb_device *udev = testdev_to_usbdev(dev);
  1224. if (udev->speed == USB_SPEED_SUPER)
  1225. urb = simple_alloc_urb(udev, 0, 1024);
  1226. else
  1227. urb = simple_alloc_urb(udev, 0, 512);
  1228. if (urb == NULL)
  1229. return -ENOMEM;
  1230. if (dev->in_pipe) {
  1231. ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
  1232. urb->pipe = dev->in_pipe;
  1233. retval = test_halt(dev, ep, urb);
  1234. if (retval < 0)
  1235. goto done;
  1236. }
  1237. if (dev->out_pipe) {
  1238. ep = usb_pipeendpoint(dev->out_pipe);
  1239. urb->pipe = dev->out_pipe;
  1240. retval = test_halt(dev, ep, urb);
  1241. }
  1242. done:
  1243. simple_free_urb(urb);
  1244. return retval;
  1245. }
  1246. /*-------------------------------------------------------------------------*/
  1247. /* Control OUT tests use the vendor control requests from Intel's
  1248. * USB 2.0 compliance test device: write a buffer, read it back.
  1249. *
  1250. * Intel's spec only _requires_ that it work for one packet, which
  1251. * is pretty weak. Some HCDs place limits here; most devices will
  1252. * need to be able to handle more than one OUT data packet. We'll
  1253. * try whatever we're told to try.
  1254. */
  1255. static int ctrl_out(struct usbtest_dev *dev,
  1256. unsigned count, unsigned length, unsigned vary, unsigned offset)
  1257. {
  1258. unsigned i, j, len;
  1259. int retval;
  1260. u8 *buf;
  1261. char *what = "?";
  1262. struct usb_device *udev;
  1263. if (length < 1 || length > 0xffff || vary >= length)
  1264. return -EINVAL;
  1265. buf = kmalloc(length + offset, GFP_KERNEL);
  1266. if (!buf)
  1267. return -ENOMEM;
  1268. buf += offset;
  1269. udev = testdev_to_usbdev(dev);
  1270. len = length;
  1271. retval = 0;
  1272. /* NOTE: hardware might well act differently if we pushed it
  1273. * with lots back-to-back queued requests.
  1274. */
  1275. for (i = 0; i < count; i++) {
  1276. /* write patterned data */
  1277. for (j = 0; j < len; j++)
  1278. buf[j] = i + j;
  1279. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  1280. 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
  1281. 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
  1282. if (retval != len) {
  1283. what = "write";
  1284. if (retval >= 0) {
  1285. ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
  1286. retval, len);
  1287. retval = -EBADMSG;
  1288. }
  1289. break;
  1290. }
  1291. /* read it back -- assuming nothing intervened!! */
  1292. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  1293. 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
  1294. 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
  1295. if (retval != len) {
  1296. what = "read";
  1297. if (retval >= 0) {
  1298. ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
  1299. retval, len);
  1300. retval = -EBADMSG;
  1301. }
  1302. break;
  1303. }
  1304. /* fail if we can't verify */
  1305. for (j = 0; j < len; j++) {
  1306. if (buf[j] != (u8) (i + j)) {
  1307. ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
  1308. j, buf[j], (u8) i + j);
  1309. retval = -EBADMSG;
  1310. break;
  1311. }
  1312. }
  1313. if (retval < 0) {
  1314. what = "verify";
  1315. break;
  1316. }
  1317. len += vary;
  1318. /* [real world] the "zero bytes IN" case isn't really used.
  1319. * hardware can easily trip up in this weird case, since its
  1320. * status stage is IN, not OUT like other ep0in transfers.
  1321. */
  1322. if (len > length)
  1323. len = realworld ? 1 : 0;
  1324. }
  1325. if (retval < 0)
  1326. ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
  1327. what, retval, i);
  1328. kfree(buf - offset);
  1329. return retval;
  1330. }
  1331. /*-------------------------------------------------------------------------*/
  1332. /* ISO tests ... mimics common usage
  1333. * - buffer length is split into N packets (mostly maxpacket sized)
  1334. * - multi-buffers according to sglen
  1335. */
  1336. struct iso_context {
  1337. unsigned count;
  1338. unsigned pending;
  1339. spinlock_t lock;
  1340. struct completion done;
  1341. int submit_error;
  1342. unsigned long errors;
  1343. unsigned long packet_count;
  1344. struct usbtest_dev *dev;
  1345. };
  1346. static void iso_callback(struct urb *urb)
  1347. {
  1348. struct iso_context *ctx = urb->context;
  1349. spin_lock(&ctx->lock);
  1350. ctx->count--;
  1351. ctx->packet_count += urb->number_of_packets;
  1352. if (urb->error_count > 0)
  1353. ctx->errors += urb->error_count;
  1354. else if (urb->status != 0)
  1355. ctx->errors += urb->number_of_packets;
  1356. else if (urb->actual_length != urb->transfer_buffer_length)
  1357. ctx->errors++;
  1358. else if (check_guard_bytes(ctx->dev, urb) != 0)
  1359. ctx->errors++;
  1360. if (urb->status == 0 && ctx->count > (ctx->pending - 1)
  1361. && !ctx->submit_error) {
  1362. int status = usb_submit_urb(urb, GFP_ATOMIC);
  1363. switch (status) {
  1364. case 0:
  1365. goto done;
  1366. default:
  1367. dev_err(&ctx->dev->intf->dev,
  1368. "iso resubmit err %d\n",
  1369. status);
  1370. /* FALLTHROUGH */
  1371. case -ENODEV: /* disconnected */
  1372. case -ESHUTDOWN: /* endpoint disabled */
  1373. ctx->submit_error = 1;
  1374. break;
  1375. }
  1376. }
  1377. ctx->pending--;
  1378. if (ctx->pending == 0) {
  1379. if (ctx->errors)
  1380. dev_err(&ctx->dev->intf->dev,
  1381. "iso test, %lu errors out of %lu\n",
  1382. ctx->errors, ctx->packet_count);
  1383. complete(&ctx->done);
  1384. }
  1385. done:
  1386. spin_unlock(&ctx->lock);
  1387. }
  1388. static struct urb *iso_alloc_urb(
  1389. struct usb_device *udev,
  1390. int pipe,
  1391. struct usb_endpoint_descriptor *desc,
  1392. long bytes,
  1393. unsigned offset
  1394. )
  1395. {
  1396. struct urb *urb;
  1397. unsigned i, maxp, packets;
  1398. if (bytes < 0 || !desc)
  1399. return NULL;
  1400. maxp = 0x7ff & usb_endpoint_maxp(desc);
  1401. maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
  1402. packets = DIV_ROUND_UP(bytes, maxp);
  1403. urb = usb_alloc_urb(packets, GFP_KERNEL);
  1404. if (!urb)
  1405. return urb;
  1406. urb->dev = udev;
  1407. urb->pipe = pipe;
  1408. urb->number_of_packets = packets;
  1409. urb->transfer_buffer_length = bytes;
  1410. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  1411. GFP_KERNEL,
  1412. &urb->transfer_dma);
  1413. if (!urb->transfer_buffer) {
  1414. usb_free_urb(urb);
  1415. return NULL;
  1416. }
  1417. if (offset) {
  1418. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  1419. urb->transfer_buffer += offset;
  1420. urb->transfer_dma += offset;
  1421. }
  1422. /* For inbound transfers use guard byte so that test fails if
  1423. data not correctly copied */
  1424. memset(urb->transfer_buffer,
  1425. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  1426. bytes);
  1427. for (i = 0; i < packets; i++) {
  1428. /* here, only the last packet will be short */
  1429. urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
  1430. bytes -= urb->iso_frame_desc[i].length;
  1431. urb->iso_frame_desc[i].offset = maxp * i;
  1432. }
  1433. urb->complete = iso_callback;
  1434. /* urb->context = SET BY CALLER */
  1435. urb->interval = 1 << (desc->bInterval - 1);
  1436. urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
  1437. return urb;
  1438. }
  1439. static int
  1440. test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
  1441. int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
  1442. {
  1443. struct iso_context context;
  1444. struct usb_device *udev;
  1445. unsigned i;
  1446. unsigned long packets = 0;
  1447. int status = 0;
  1448. struct urb *urbs[10]; /* FIXME no limit */
  1449. if (param->sglen > 10)
  1450. return -EDOM;
  1451. memset(&context, 0, sizeof context);
  1452. context.count = param->iterations * param->sglen;
  1453. context.dev = dev;
  1454. init_completion(&context.done);
  1455. spin_lock_init(&context.lock);
  1456. memset(urbs, 0, sizeof urbs);
  1457. udev = testdev_to_usbdev(dev);
  1458. dev_info(&dev->intf->dev,
  1459. "... iso period %d %sframes, wMaxPacket %04x\n",
  1460. 1 << (desc->bInterval - 1),
  1461. (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
  1462. usb_endpoint_maxp(desc));
  1463. for (i = 0; i < param->sglen; i++) {
  1464. urbs[i] = iso_alloc_urb(udev, pipe, desc,
  1465. param->length, offset);
  1466. if (!urbs[i]) {
  1467. status = -ENOMEM;
  1468. goto fail;
  1469. }
  1470. packets += urbs[i]->number_of_packets;
  1471. urbs[i]->context = &context;
  1472. }
  1473. packets *= param->iterations;
  1474. dev_info(&dev->intf->dev,
  1475. "... total %lu msec (%lu packets)\n",
  1476. (packets * (1 << (desc->bInterval - 1)))
  1477. / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
  1478. packets);
  1479. spin_lock_irq(&context.lock);
  1480. for (i = 0; i < param->sglen; i++) {
  1481. ++context.pending;
  1482. status = usb_submit_urb(urbs[i], GFP_ATOMIC);
  1483. if (status < 0) {
  1484. ERROR(dev, "submit iso[%d], error %d\n", i, status);
  1485. if (i == 0) {
  1486. spin_unlock_irq(&context.lock);
  1487. goto fail;
  1488. }
  1489. simple_free_urb(urbs[i]);
  1490. urbs[i] = NULL;
  1491. context.pending--;
  1492. context.submit_error = 1;
  1493. break;
  1494. }
  1495. }
  1496. spin_unlock_irq(&context.lock);
  1497. wait_for_completion(&context.done);
  1498. for (i = 0; i < param->sglen; i++) {
  1499. if (urbs[i])
  1500. simple_free_urb(urbs[i]);
  1501. }
  1502. /*
  1503. * Isochronous transfers are expected to fail sometimes. As an
  1504. * arbitrary limit, we will report an error if any submissions
  1505. * fail or if the transfer failure rate is > 10%.
  1506. */
  1507. if (status != 0)
  1508. ;
  1509. else if (context.submit_error)
  1510. status = -EACCES;
  1511. else if (context.errors > context.packet_count / 10)
  1512. status = -EIO;
  1513. return status;
  1514. fail:
  1515. for (i = 0; i < param->sglen; i++) {
  1516. if (urbs[i])
  1517. simple_free_urb(urbs[i]);
  1518. }
  1519. return status;
  1520. }
  1521. static int test_unaligned_bulk(
  1522. struct usbtest_dev *tdev,
  1523. int pipe,
  1524. unsigned length,
  1525. int iterations,
  1526. unsigned transfer_flags,
  1527. const char *label)
  1528. {
  1529. int retval;
  1530. struct urb *urb = usbtest_alloc_urb(
  1531. testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
  1532. if (!urb)
  1533. return -ENOMEM;
  1534. retval = simple_io(tdev, urb, iterations, 0, 0, label);
  1535. simple_free_urb(urb);
  1536. return retval;
  1537. }
  1538. /*-------------------------------------------------------------------------*/
  1539. /* We only have this one interface to user space, through usbfs.
  1540. * User mode code can scan usbfs to find N different devices (maybe on
  1541. * different busses) to use when testing, and allocate one thread per
  1542. * test. So discovery is simplified, and we have no device naming issues.
  1543. *
  1544. * Don't use these only as stress/load tests. Use them along with with
  1545. * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
  1546. * video capture, and so on. Run different tests at different times, in
  1547. * different sequences. Nothing here should interact with other devices,
  1548. * except indirectly by consuming USB bandwidth and CPU resources for test
  1549. * threads and request completion. But the only way to know that for sure
  1550. * is to test when HC queues are in use by many devices.
  1551. *
  1552. * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
  1553. * it locks out usbcore in certain code paths. Notably, if you disconnect
  1554. * the device-under-test, khubd will wait block forever waiting for the
  1555. * ioctl to complete ... so that usb_disconnect() can abort the pending
  1556. * urbs and then call usbtest_disconnect(). To abort a test, you're best
  1557. * off just killing the userspace task and waiting for it to exit.
  1558. */
  1559. static int
  1560. usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
  1561. {
  1562. struct usbtest_dev *dev = usb_get_intfdata(intf);
  1563. struct usb_device *udev = testdev_to_usbdev(dev);
  1564. struct usbtest_param *param = buf;
  1565. int retval = -EOPNOTSUPP;
  1566. struct urb *urb;
  1567. struct scatterlist *sg;
  1568. struct usb_sg_request req;
  1569. struct timeval start;
  1570. unsigned i;
  1571. /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
  1572. pattern = mod_pattern;
  1573. if (code != USBTEST_REQUEST)
  1574. return -EOPNOTSUPP;
  1575. if (param->iterations <= 0)
  1576. return -EINVAL;
  1577. if (mutex_lock_interruptible(&dev->lock))
  1578. return -ERESTARTSYS;
  1579. /* FIXME: What if a system sleep starts while a test is running? */
  1580. /* some devices, like ez-usb default devices, need a non-default
  1581. * altsetting to have any active endpoints. some tests change
  1582. * altsettings; force a default so most tests don't need to check.
  1583. */
  1584. if (dev->info->alt >= 0) {
  1585. int res;
  1586. if (intf->altsetting->desc.bInterfaceNumber) {
  1587. mutex_unlock(&dev->lock);
  1588. return -ENODEV;
  1589. }
  1590. res = set_altsetting(dev, dev->info->alt);
  1591. if (res) {
  1592. dev_err(&intf->dev,
  1593. "set altsetting to %d failed, %d\n",
  1594. dev->info->alt, res);
  1595. mutex_unlock(&dev->lock);
  1596. return res;
  1597. }
  1598. }
  1599. /*
  1600. * Just a bunch of test cases that every HCD is expected to handle.
  1601. *
  1602. * Some may need specific firmware, though it'd be good to have
  1603. * one firmware image to handle all the test cases.
  1604. *
  1605. * FIXME add more tests! cancel requests, verify the data, control
  1606. * queueing, concurrent read+write threads, and so on.
  1607. */
  1608. do_gettimeofday(&start);
  1609. switch (param->test_num) {
  1610. case 0:
  1611. dev_info(&intf->dev, "TEST 0: NOP\n");
  1612. retval = 0;
  1613. break;
  1614. /* Simple non-queued bulk I/O tests */
  1615. case 1:
  1616. if (dev->out_pipe == 0)
  1617. break;
  1618. dev_info(&intf->dev,
  1619. "TEST 1: write %d bytes %u times\n",
  1620. param->length, param->iterations);
  1621. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1622. if (!urb) {
  1623. retval = -ENOMEM;
  1624. break;
  1625. }
  1626. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1627. retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
  1628. simple_free_urb(urb);
  1629. break;
  1630. case 2:
  1631. if (dev->in_pipe == 0)
  1632. break;
  1633. dev_info(&intf->dev,
  1634. "TEST 2: read %d bytes %u times\n",
  1635. param->length, param->iterations);
  1636. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1637. if (!urb) {
  1638. retval = -ENOMEM;
  1639. break;
  1640. }
  1641. /* FIRMWARE: bulk source (maybe generates short writes) */
  1642. retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
  1643. simple_free_urb(urb);
  1644. break;
  1645. case 3:
  1646. if (dev->out_pipe == 0 || param->vary == 0)
  1647. break;
  1648. dev_info(&intf->dev,
  1649. "TEST 3: write/%d 0..%d bytes %u times\n",
  1650. param->vary, param->length, param->iterations);
  1651. urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
  1652. if (!urb) {
  1653. retval = -ENOMEM;
  1654. break;
  1655. }
  1656. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1657. retval = simple_io(dev, urb, param->iterations, param->vary,
  1658. 0, "test3");
  1659. simple_free_urb(urb);
  1660. break;
  1661. case 4:
  1662. if (dev->in_pipe == 0 || param->vary == 0)
  1663. break;
  1664. dev_info(&intf->dev,
  1665. "TEST 4: read/%d 0..%d bytes %u times\n",
  1666. param->vary, param->length, param->iterations);
  1667. urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
  1668. if (!urb) {
  1669. retval = -ENOMEM;
  1670. break;
  1671. }
  1672. /* FIRMWARE: bulk source (maybe generates short writes) */
  1673. retval = simple_io(dev, urb, param->iterations, param->vary,
  1674. 0, "test4");
  1675. simple_free_urb(urb);
  1676. break;
  1677. /* Queued bulk I/O tests */
  1678. case 5:
  1679. if (dev->out_pipe == 0 || param->sglen == 0)
  1680. break;
  1681. dev_info(&intf->dev,
  1682. "TEST 5: write %d sglists %d entries of %d bytes\n",
  1683. param->iterations,
  1684. param->sglen, param->length);
  1685. sg = alloc_sglist(param->sglen, param->length, 0);
  1686. if (!sg) {
  1687. retval = -ENOMEM;
  1688. break;
  1689. }
  1690. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1691. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1692. &req, sg, param->sglen);
  1693. free_sglist(sg, param->sglen);
  1694. break;
  1695. case 6:
  1696. if (dev->in_pipe == 0 || param->sglen == 0)
  1697. break;
  1698. dev_info(&intf->dev,
  1699. "TEST 6: read %d sglists %d entries of %d bytes\n",
  1700. param->iterations,
  1701. param->sglen, param->length);
  1702. sg = alloc_sglist(param->sglen, param->length, 0);
  1703. if (!sg) {
  1704. retval = -ENOMEM;
  1705. break;
  1706. }
  1707. /* FIRMWARE: bulk source (maybe generates short writes) */
  1708. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1709. &req, sg, param->sglen);
  1710. free_sglist(sg, param->sglen);
  1711. break;
  1712. case 7:
  1713. if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1714. break;
  1715. dev_info(&intf->dev,
  1716. "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
  1717. param->vary, param->iterations,
  1718. param->sglen, param->length);
  1719. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1720. if (!sg) {
  1721. retval = -ENOMEM;
  1722. break;
  1723. }
  1724. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1725. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1726. &req, sg, param->sglen);
  1727. free_sglist(sg, param->sglen);
  1728. break;
  1729. case 8:
  1730. if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1731. break;
  1732. dev_info(&intf->dev,
  1733. "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
  1734. param->vary, param->iterations,
  1735. param->sglen, param->length);
  1736. sg = alloc_sglist(param->sglen, param->length, param->vary);
  1737. if (!sg) {
  1738. retval = -ENOMEM;
  1739. break;
  1740. }
  1741. /* FIRMWARE: bulk source (maybe generates short writes) */
  1742. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1743. &req, sg, param->sglen);
  1744. free_sglist(sg, param->sglen);
  1745. break;
  1746. /* non-queued sanity tests for control (chapter 9 subset) */
  1747. case 9:
  1748. retval = 0;
  1749. dev_info(&intf->dev,
  1750. "TEST 9: ch9 (subset) control tests, %d times\n",
  1751. param->iterations);
  1752. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1753. retval = ch9_postconfig(dev);
  1754. if (retval)
  1755. dev_err(&intf->dev, "ch9 subset failed, "
  1756. "iterations left %d\n", i);
  1757. break;
  1758. /* queued control messaging */
  1759. case 10:
  1760. retval = 0;
  1761. dev_info(&intf->dev,
  1762. "TEST 10: queue %d control calls, %d times\n",
  1763. param->sglen,
  1764. param->iterations);
  1765. retval = test_ctrl_queue(dev, param);
  1766. break;
  1767. /* simple non-queued unlinks (ring with one urb) */
  1768. case 11:
  1769. if (dev->in_pipe == 0 || !param->length)
  1770. break;
  1771. retval = 0;
  1772. dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
  1773. param->iterations, param->length);
  1774. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1775. retval = unlink_simple(dev, dev->in_pipe,
  1776. param->length);
  1777. if (retval)
  1778. dev_err(&intf->dev, "unlink reads failed %d, "
  1779. "iterations left %d\n", retval, i);
  1780. break;
  1781. case 12:
  1782. if (dev->out_pipe == 0 || !param->length)
  1783. break;
  1784. retval = 0;
  1785. dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
  1786. param->iterations, param->length);
  1787. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1788. retval = unlink_simple(dev, dev->out_pipe,
  1789. param->length);
  1790. if (retval)
  1791. dev_err(&intf->dev, "unlink writes failed %d, "
  1792. "iterations left %d\n", retval, i);
  1793. break;
  1794. /* ep halt tests */
  1795. case 13:
  1796. if (dev->out_pipe == 0 && dev->in_pipe == 0)
  1797. break;
  1798. retval = 0;
  1799. dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
  1800. param->iterations);
  1801. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1802. retval = halt_simple(dev);
  1803. if (retval)
  1804. ERROR(dev, "halts failed, iterations left %d\n", i);
  1805. break;
  1806. /* control write tests */
  1807. case 14:
  1808. if (!dev->info->ctrl_out)
  1809. break;
  1810. dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
  1811. param->iterations,
  1812. realworld ? 1 : 0, param->length,
  1813. param->vary);
  1814. retval = ctrl_out(dev, param->iterations,
  1815. param->length, param->vary, 0);
  1816. break;
  1817. /* iso write tests */
  1818. case 15:
  1819. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1820. break;
  1821. dev_info(&intf->dev,
  1822. "TEST 15: write %d iso, %d entries of %d bytes\n",
  1823. param->iterations,
  1824. param->sglen, param->length);
  1825. /* FIRMWARE: iso sink */
  1826. retval = test_iso_queue(dev, param,
  1827. dev->out_iso_pipe, dev->iso_out, 0);
  1828. break;
  1829. /* iso read tests */
  1830. case 16:
  1831. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1832. break;
  1833. dev_info(&intf->dev,
  1834. "TEST 16: read %d iso, %d entries of %d bytes\n",
  1835. param->iterations,
  1836. param->sglen, param->length);
  1837. /* FIRMWARE: iso source */
  1838. retval = test_iso_queue(dev, param,
  1839. dev->in_iso_pipe, dev->iso_in, 0);
  1840. break;
  1841. /* FIXME scatterlist cancel (needs helper thread) */
  1842. /* Tests for bulk I/O using DMA mapping by core and odd address */
  1843. case 17:
  1844. if (dev->out_pipe == 0)
  1845. break;
  1846. dev_info(&intf->dev,
  1847. "TEST 17: write odd addr %d bytes %u times core map\n",
  1848. param->length, param->iterations);
  1849. retval = test_unaligned_bulk(
  1850. dev, dev->out_pipe,
  1851. param->length, param->iterations,
  1852. 0, "test17");
  1853. break;
  1854. case 18:
  1855. if (dev->in_pipe == 0)
  1856. break;
  1857. dev_info(&intf->dev,
  1858. "TEST 18: read odd addr %d bytes %u times core map\n",
  1859. param->length, param->iterations);
  1860. retval = test_unaligned_bulk(
  1861. dev, dev->in_pipe,
  1862. param->length, param->iterations,
  1863. 0, "test18");
  1864. break;
  1865. /* Tests for bulk I/O using premapped coherent buffer and odd address */
  1866. case 19:
  1867. if (dev->out_pipe == 0)
  1868. break;
  1869. dev_info(&intf->dev,
  1870. "TEST 19: write odd addr %d bytes %u times premapped\n",
  1871. param->length, param->iterations);
  1872. retval = test_unaligned_bulk(
  1873. dev, dev->out_pipe,
  1874. param->length, param->iterations,
  1875. URB_NO_TRANSFER_DMA_MAP, "test19");
  1876. break;
  1877. case 20:
  1878. if (dev->in_pipe == 0)
  1879. break;
  1880. dev_info(&intf->dev,
  1881. "TEST 20: read odd addr %d bytes %u times premapped\n",
  1882. param->length, param->iterations);
  1883. retval = test_unaligned_bulk(
  1884. dev, dev->in_pipe,
  1885. param->length, param->iterations,
  1886. URB_NO_TRANSFER_DMA_MAP, "test20");
  1887. break;
  1888. /* control write tests with unaligned buffer */
  1889. case 21:
  1890. if (!dev->info->ctrl_out)
  1891. break;
  1892. dev_info(&intf->dev,
  1893. "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
  1894. param->iterations,
  1895. realworld ? 1 : 0, param->length,
  1896. param->vary);
  1897. retval = ctrl_out(dev, param->iterations,
  1898. param->length, param->vary, 1);
  1899. break;
  1900. /* unaligned iso tests */
  1901. case 22:
  1902. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  1903. break;
  1904. dev_info(&intf->dev,
  1905. "TEST 22: write %d iso odd, %d entries of %d bytes\n",
  1906. param->iterations,
  1907. param->sglen, param->length);
  1908. retval = test_iso_queue(dev, param,
  1909. dev->out_iso_pipe, dev->iso_out, 1);
  1910. break;
  1911. case 23:
  1912. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  1913. break;
  1914. dev_info(&intf->dev,
  1915. "TEST 23: read %d iso odd, %d entries of %d bytes\n",
  1916. param->iterations,
  1917. param->sglen, param->length);
  1918. retval = test_iso_queue(dev, param,
  1919. dev->in_iso_pipe, dev->iso_in, 1);
  1920. break;
  1921. /* unlink URBs from a bulk-OUT queue */
  1922. case 24:
  1923. if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
  1924. break;
  1925. retval = 0;
  1926. dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
  1927. "%d %d-byte writes\n",
  1928. param->iterations, param->sglen, param->length);
  1929. for (i = param->iterations; retval == 0 && i > 0; --i) {
  1930. retval = unlink_queued(dev, dev->out_pipe,
  1931. param->sglen, param->length);
  1932. if (retval) {
  1933. dev_err(&intf->dev,
  1934. "unlink queued writes failed %d, "
  1935. "iterations left %d\n", retval, i);
  1936. break;
  1937. }
  1938. }
  1939. break;
  1940. }
  1941. do_gettimeofday(&param->duration);
  1942. param->duration.tv_sec -= start.tv_sec;
  1943. param->duration.tv_usec -= start.tv_usec;
  1944. if (param->duration.tv_usec < 0) {
  1945. param->duration.tv_usec += 1000 * 1000;
  1946. param->duration.tv_sec -= 1;
  1947. }
  1948. mutex_unlock(&dev->lock);
  1949. return retval;
  1950. }
  1951. /*-------------------------------------------------------------------------*/
  1952. static unsigned force_interrupt;
  1953. module_param(force_interrupt, uint, 0);
  1954. MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
  1955. #ifdef GENERIC
  1956. static unsigned short vendor;
  1957. module_param(vendor, ushort, 0);
  1958. MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
  1959. static unsigned short product;
  1960. module_param(product, ushort, 0);
  1961. MODULE_PARM_DESC(product, "product code (from vendor)");
  1962. #endif
  1963. static int
  1964. usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
  1965. {
  1966. struct usb_device *udev;
  1967. struct usbtest_dev *dev;
  1968. struct usbtest_info *info;
  1969. char *rtest, *wtest;
  1970. char *irtest, *iwtest;
  1971. udev = interface_to_usbdev(intf);
  1972. #ifdef GENERIC
  1973. /* specify devices by module parameters? */
  1974. if (id->match_flags == 0) {
  1975. /* vendor match required, product match optional */
  1976. if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
  1977. return -ENODEV;
  1978. if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
  1979. return -ENODEV;
  1980. dev_info(&intf->dev, "matched module params, "
  1981. "vend=0x%04x prod=0x%04x\n",
  1982. le16_to_cpu(udev->descriptor.idVendor),
  1983. le16_to_cpu(udev->descriptor.idProduct));
  1984. }
  1985. #endif
  1986. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1987. if (!dev)
  1988. return -ENOMEM;
  1989. info = (struct usbtest_info *) id->driver_info;
  1990. dev->info = info;
  1991. mutex_init(&dev->lock);
  1992. dev->intf = intf;
  1993. /* cacheline-aligned scratch for i/o */
  1994. dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
  1995. if (dev->buf == NULL) {
  1996. kfree(dev);
  1997. return -ENOMEM;
  1998. }
  1999. /* NOTE this doesn't yet test the handful of difference that are
  2000. * visible with high speed interrupts: bigger maxpacket (1K) and
  2001. * "high bandwidth" modes (up to 3 packets/uframe).
  2002. */
  2003. rtest = wtest = "";
  2004. irtest = iwtest = "";
  2005. if (force_interrupt || udev->speed == USB_SPEED_LOW) {
  2006. if (info->ep_in) {
  2007. dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
  2008. rtest = " intr-in";
  2009. }
  2010. if (info->ep_out) {
  2011. dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
  2012. wtest = " intr-out";
  2013. }
  2014. } else {
  2015. if (info->autoconf) {
  2016. int status;
  2017. status = get_endpoints(dev, intf);
  2018. if (status < 0) {
  2019. WARNING(dev, "couldn't get endpoints, %d\n",
  2020. status);
  2021. kfree(dev->buf);
  2022. kfree(dev);
  2023. return status;
  2024. }
  2025. /* may find bulk or ISO pipes */
  2026. } else {
  2027. if (info->ep_in)
  2028. dev->in_pipe = usb_rcvbulkpipe(udev,
  2029. info->ep_in);
  2030. if (info->ep_out)
  2031. dev->out_pipe = usb_sndbulkpipe(udev,
  2032. info->ep_out);
  2033. }
  2034. if (dev->in_pipe)
  2035. rtest = " bulk-in";
  2036. if (dev->out_pipe)
  2037. wtest = " bulk-out";
  2038. if (dev->in_iso_pipe)
  2039. irtest = " iso-in";
  2040. if (dev->out_iso_pipe)
  2041. iwtest = " iso-out";
  2042. }
  2043. usb_set_intfdata(intf, dev);
  2044. dev_info(&intf->dev, "%s\n", info->name);
  2045. dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
  2046. usb_speed_string(udev->speed),
  2047. info->ctrl_out ? " in/out" : "",
  2048. rtest, wtest,
  2049. irtest, iwtest,
  2050. info->alt >= 0 ? " (+alt)" : "");
  2051. return 0;
  2052. }
  2053. static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
  2054. {
  2055. return 0;
  2056. }
  2057. static int usbtest_resume(struct usb_interface *intf)
  2058. {
  2059. return 0;
  2060. }
  2061. static void usbtest_disconnect(struct usb_interface *intf)
  2062. {
  2063. struct usbtest_dev *dev = usb_get_intfdata(intf);
  2064. usb_set_intfdata(intf, NULL);
  2065. dev_dbg(&intf->dev, "disconnect\n");
  2066. kfree(dev);
  2067. }
  2068. /* Basic testing only needs a device that can source or sink bulk traffic.
  2069. * Any device can test control transfers (default with GENERIC binding).
  2070. *
  2071. * Several entries work with the default EP0 implementation that's built
  2072. * into EZ-USB chips. There's a default vendor ID which can be overridden
  2073. * by (very) small config EEPROMS, but otherwise all these devices act
  2074. * identically until firmware is loaded: only EP0 works. It turns out
  2075. * to be easy to make other endpoints work, without modifying that EP0
  2076. * behavior. For now, we expect that kind of firmware.
  2077. */
  2078. /* an21xx or fx versions of ez-usb */
  2079. static struct usbtest_info ez1_info = {
  2080. .name = "EZ-USB device",
  2081. .ep_in = 2,
  2082. .ep_out = 2,
  2083. .alt = 1,
  2084. };
  2085. /* fx2 version of ez-usb */
  2086. static struct usbtest_info ez2_info = {
  2087. .name = "FX2 device",
  2088. .ep_in = 6,
  2089. .ep_out = 2,
  2090. .alt = 1,
  2091. };
  2092. /* ezusb family device with dedicated usb test firmware,
  2093. */
  2094. static struct usbtest_info fw_info = {
  2095. .name = "usb test device",
  2096. .ep_in = 2,
  2097. .ep_out = 2,
  2098. .alt = 1,
  2099. .autoconf = 1, /* iso and ctrl_out need autoconf */
  2100. .ctrl_out = 1,
  2101. .iso = 1, /* iso_ep's are #8 in/out */
  2102. };
  2103. /* peripheral running Linux and 'zero.c' test firmware, or
  2104. * its user-mode cousin. different versions of this use
  2105. * different hardware with the same vendor/product codes.
  2106. * host side MUST rely on the endpoint descriptors.
  2107. */
  2108. static struct usbtest_info gz_info = {
  2109. .name = "Linux gadget zero",
  2110. .autoconf = 1,
  2111. .ctrl_out = 1,
  2112. .iso = 1,
  2113. .alt = 0,
  2114. };
  2115. static struct usbtest_info um_info = {
  2116. .name = "Linux user mode test driver",
  2117. .autoconf = 1,
  2118. .alt = -1,
  2119. };
  2120. static struct usbtest_info um2_info = {
  2121. .name = "Linux user mode ISO test driver",
  2122. .autoconf = 1,
  2123. .iso = 1,
  2124. .alt = -1,
  2125. };
  2126. #ifdef IBOT2
  2127. /* this is a nice source of high speed bulk data;
  2128. * uses an FX2, with firmware provided in the device
  2129. */
  2130. static struct usbtest_info ibot2_info = {
  2131. .name = "iBOT2 webcam",
  2132. .ep_in = 2,
  2133. .alt = -1,
  2134. };
  2135. #endif
  2136. #ifdef GENERIC
  2137. /* we can use any device to test control traffic */
  2138. static struct usbtest_info generic_info = {
  2139. .name = "Generic USB device",
  2140. .alt = -1,
  2141. };
  2142. #endif
  2143. static const struct usb_device_id id_table[] = {
  2144. /*-------------------------------------------------------------*/
  2145. /* EZ-USB devices which download firmware to replace (or in our
  2146. * case augment) the default device implementation.
  2147. */
  2148. /* generic EZ-USB FX controller */
  2149. { USB_DEVICE(0x0547, 0x2235),
  2150. .driver_info = (unsigned long) &ez1_info,
  2151. },
  2152. /* CY3671 development board with EZ-USB FX */
  2153. { USB_DEVICE(0x0547, 0x0080),
  2154. .driver_info = (unsigned long) &ez1_info,
  2155. },
  2156. /* generic EZ-USB FX2 controller (or development board) */
  2157. { USB_DEVICE(0x04b4, 0x8613),
  2158. .driver_info = (unsigned long) &ez2_info,
  2159. },
  2160. /* re-enumerated usb test device firmware */
  2161. { USB_DEVICE(0xfff0, 0xfff0),
  2162. .driver_info = (unsigned long) &fw_info,
  2163. },
  2164. /* "Gadget Zero" firmware runs under Linux */
  2165. { USB_DEVICE(0x0525, 0xa4a0),
  2166. .driver_info = (unsigned long) &gz_info,
  2167. },
  2168. /* so does a user-mode variant */
  2169. { USB_DEVICE(0x0525, 0xa4a4),
  2170. .driver_info = (unsigned long) &um_info,
  2171. },
  2172. /* ... and a user-mode variant that talks iso */
  2173. { USB_DEVICE(0x0525, 0xa4a3),
  2174. .driver_info = (unsigned long) &um2_info,
  2175. },
  2176. #ifdef KEYSPAN_19Qi
  2177. /* Keyspan 19qi uses an21xx (original EZ-USB) */
  2178. /* this does not coexist with the real Keyspan 19qi driver! */
  2179. { USB_DEVICE(0x06cd, 0x010b),
  2180. .driver_info = (unsigned long) &ez1_info,
  2181. },
  2182. #endif
  2183. /*-------------------------------------------------------------*/
  2184. #ifdef IBOT2
  2185. /* iBOT2 makes a nice source of high speed bulk-in data */
  2186. /* this does not coexist with a real iBOT2 driver! */
  2187. { USB_DEVICE(0x0b62, 0x0059),
  2188. .driver_info = (unsigned long) &ibot2_info,
  2189. },
  2190. #endif
  2191. /*-------------------------------------------------------------*/
  2192. #ifdef GENERIC
  2193. /* module params can specify devices to use for control tests */
  2194. { .driver_info = (unsigned long) &generic_info, },
  2195. #endif
  2196. /*-------------------------------------------------------------*/
  2197. { }
  2198. };
  2199. MODULE_DEVICE_TABLE(usb, id_table);
  2200. static struct usb_driver usbtest_driver = {
  2201. .name = "usbtest",
  2202. .id_table = id_table,
  2203. .probe = usbtest_probe,
  2204. .unlocked_ioctl = usbtest_ioctl,
  2205. .disconnect = usbtest_disconnect,
  2206. .suspend = usbtest_suspend,
  2207. .resume = usbtest_resume,
  2208. };
  2209. /*-------------------------------------------------------------------------*/
  2210. static int __init usbtest_init(void)
  2211. {
  2212. #ifdef GENERIC
  2213. if (vendor)
  2214. pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
  2215. #endif
  2216. return usb_register(&usbtest_driver);
  2217. }
  2218. module_init(usbtest_init);
  2219. static void __exit usbtest_exit(void)
  2220. {
  2221. usb_deregister(&usbtest_driver);
  2222. }
  2223. module_exit(usbtest_exit);
  2224. MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
  2225. MODULE_LICENSE("GPL");