dmatest.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * DMA Engine test module
  3. *
  4. * Copyright (C) 2007 Atmel Corporation
  5. * Copyright (C) 2013 Intel Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/freezer.h>
  15. #include <linux/init.h>
  16. #include <linux/kthread.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. #include <linux/ctype.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/seq_file.h>
  26. static unsigned int test_buf_size = 16384;
  27. module_param(test_buf_size, uint, S_IRUGO);
  28. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  29. static char test_channel[20];
  30. module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
  31. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  32. static char test_device[20];
  33. module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
  34. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  35. static unsigned int threads_per_chan = 1;
  36. module_param(threads_per_chan, uint, S_IRUGO);
  37. MODULE_PARM_DESC(threads_per_chan,
  38. "Number of threads to start per channel (default: 1)");
  39. static unsigned int max_channels;
  40. module_param(max_channels, uint, S_IRUGO);
  41. MODULE_PARM_DESC(max_channels,
  42. "Maximum number of channels to use (default: all)");
  43. static unsigned int iterations;
  44. module_param(iterations, uint, S_IRUGO);
  45. MODULE_PARM_DESC(iterations,
  46. "Iterations before stopping test (default: infinite)");
  47. static unsigned int xor_sources = 3;
  48. module_param(xor_sources, uint, S_IRUGO);
  49. MODULE_PARM_DESC(xor_sources,
  50. "Number of xor source buffers (default: 3)");
  51. static unsigned int pq_sources = 3;
  52. module_param(pq_sources, uint, S_IRUGO);
  53. MODULE_PARM_DESC(pq_sources,
  54. "Number of p+q source buffers (default: 3)");
  55. static int timeout = 3000;
  56. module_param(timeout, uint, S_IRUGO);
  57. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  58. "Pass -1 for infinite timeout");
  59. /* Maximum amount of mismatched bytes in buffer to print */
  60. #define MAX_ERROR_COUNT 32
  61. /*
  62. * Initialization patterns. All bytes in the source buffer has bit 7
  63. * set, all bytes in the destination buffer has bit 7 cleared.
  64. *
  65. * Bit 6 is set for all bytes which are to be copied by the DMA
  66. * engine. Bit 5 is set for all bytes which are to be overwritten by
  67. * the DMA engine.
  68. *
  69. * The remaining bits are the inverse of a counter which increments by
  70. * one for each byte address.
  71. */
  72. #define PATTERN_SRC 0x80
  73. #define PATTERN_DST 0x00
  74. #define PATTERN_COPY 0x40
  75. #define PATTERN_OVERWRITE 0x20
  76. #define PATTERN_COUNT_MASK 0x1f
  77. enum dmatest_error_type {
  78. DMATEST_ET_OK,
  79. DMATEST_ET_MAP_SRC,
  80. DMATEST_ET_MAP_DST,
  81. DMATEST_ET_PREP,
  82. DMATEST_ET_SUBMIT,
  83. DMATEST_ET_TIMEOUT,
  84. DMATEST_ET_DMA_ERROR,
  85. DMATEST_ET_DMA_IN_PROGRESS,
  86. DMATEST_ET_VERIFY,
  87. DMATEST_ET_VERIFY_BUF,
  88. };
  89. struct dmatest_verify_buffer {
  90. unsigned int index;
  91. u8 expected;
  92. u8 actual;
  93. };
  94. struct dmatest_verify_result {
  95. unsigned int error_count;
  96. struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
  97. u8 pattern;
  98. bool is_srcbuf;
  99. };
  100. struct dmatest_thread_result {
  101. struct list_head node;
  102. unsigned int n;
  103. unsigned int src_off;
  104. unsigned int dst_off;
  105. unsigned int len;
  106. enum dmatest_error_type type;
  107. union {
  108. unsigned long data;
  109. dma_cookie_t cookie;
  110. enum dma_status status;
  111. int error;
  112. struct dmatest_verify_result *vr;
  113. };
  114. };
  115. struct dmatest_result {
  116. struct list_head node;
  117. char *name;
  118. struct list_head results;
  119. };
  120. struct dmatest_info;
  121. struct dmatest_thread {
  122. struct list_head node;
  123. struct dmatest_info *info;
  124. struct task_struct *task;
  125. struct dma_chan *chan;
  126. u8 **srcs;
  127. u8 **dsts;
  128. enum dma_transaction_type type;
  129. bool done;
  130. };
  131. struct dmatest_chan {
  132. struct list_head node;
  133. struct dma_chan *chan;
  134. struct list_head threads;
  135. };
  136. /**
  137. * struct dmatest_params - test parameters.
  138. * @buf_size: size of the memcpy test buffer
  139. * @channel: bus ID of the channel to test
  140. * @device: bus ID of the DMA Engine to test
  141. * @threads_per_chan: number of threads to start per channel
  142. * @max_channels: maximum number of channels to use
  143. * @iterations: iterations before stopping test
  144. * @xor_sources: number of xor source buffers
  145. * @pq_sources: number of p+q source buffers
  146. * @timeout: transfer timeout in msec, -1 for infinite timeout
  147. */
  148. struct dmatest_params {
  149. unsigned int buf_size;
  150. char channel[20];
  151. char device[20];
  152. unsigned int threads_per_chan;
  153. unsigned int max_channels;
  154. unsigned int iterations;
  155. unsigned int xor_sources;
  156. unsigned int pq_sources;
  157. int timeout;
  158. };
  159. /**
  160. * struct dmatest_info - test information.
  161. * @params: test parameters
  162. * @lock: access protection to the fields of this structure
  163. */
  164. struct dmatest_info {
  165. /* Test parameters */
  166. struct dmatest_params params;
  167. /* Internal state */
  168. struct list_head channels;
  169. unsigned int nr_channels;
  170. struct mutex lock;
  171. /* debugfs related stuff */
  172. struct dentry *root;
  173. struct dmatest_params dbgfs_params;
  174. /* Test results */
  175. struct list_head results;
  176. struct mutex results_lock;
  177. };
  178. static struct dmatest_info test_info;
  179. static bool dmatest_match_channel(struct dmatest_params *params,
  180. struct dma_chan *chan)
  181. {
  182. if (params->channel[0] == '\0')
  183. return true;
  184. return strcmp(dma_chan_name(chan), params->channel) == 0;
  185. }
  186. static bool dmatest_match_device(struct dmatest_params *params,
  187. struct dma_device *device)
  188. {
  189. if (params->device[0] == '\0')
  190. return true;
  191. return strcmp(dev_name(device->dev), params->device) == 0;
  192. }
  193. static unsigned long dmatest_random(void)
  194. {
  195. unsigned long buf;
  196. get_random_bytes(&buf, sizeof(buf));
  197. return buf;
  198. }
  199. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  200. unsigned int buf_size)
  201. {
  202. unsigned int i;
  203. u8 *buf;
  204. for (; (buf = *bufs); bufs++) {
  205. for (i = 0; i < start; i++)
  206. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  207. for ( ; i < start + len; i++)
  208. buf[i] = PATTERN_SRC | PATTERN_COPY
  209. | (~i & PATTERN_COUNT_MASK);
  210. for ( ; i < buf_size; i++)
  211. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  212. buf++;
  213. }
  214. }
  215. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  216. unsigned int buf_size)
  217. {
  218. unsigned int i;
  219. u8 *buf;
  220. for (; (buf = *bufs); bufs++) {
  221. for (i = 0; i < start; i++)
  222. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  223. for ( ; i < start + len; i++)
  224. buf[i] = PATTERN_DST | PATTERN_OVERWRITE
  225. | (~i & PATTERN_COUNT_MASK);
  226. for ( ; i < buf_size; i++)
  227. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  228. }
  229. }
  230. static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
  231. unsigned int start, unsigned int end, unsigned int counter,
  232. u8 pattern, bool is_srcbuf)
  233. {
  234. unsigned int i;
  235. unsigned int error_count = 0;
  236. u8 actual;
  237. u8 expected;
  238. u8 *buf;
  239. unsigned int counter_orig = counter;
  240. struct dmatest_verify_buffer *vb;
  241. for (; (buf = *bufs); bufs++) {
  242. counter = counter_orig;
  243. for (i = start; i < end; i++) {
  244. actual = buf[i];
  245. expected = pattern | (~counter & PATTERN_COUNT_MASK);
  246. if (actual != expected) {
  247. if (error_count < MAX_ERROR_COUNT && vr) {
  248. vb = &vr->data[error_count];
  249. vb->index = i;
  250. vb->expected = expected;
  251. vb->actual = actual;
  252. }
  253. error_count++;
  254. }
  255. counter++;
  256. }
  257. }
  258. if (error_count > MAX_ERROR_COUNT)
  259. pr_warning("%s: %u errors suppressed\n",
  260. current->comm, error_count - MAX_ERROR_COUNT);
  261. return error_count;
  262. }
  263. /* poor man's completion - we want to use wait_event_freezable() on it */
  264. struct dmatest_done {
  265. bool done;
  266. wait_queue_head_t *wait;
  267. };
  268. static void dmatest_callback(void *arg)
  269. {
  270. struct dmatest_done *done = arg;
  271. done->done = true;
  272. wake_up_all(done->wait);
  273. }
  274. static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
  275. unsigned int count)
  276. {
  277. while (count--)
  278. dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
  279. }
  280. static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
  281. unsigned int count)
  282. {
  283. while (count--)
  284. dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
  285. }
  286. static unsigned int min_odd(unsigned int x, unsigned int y)
  287. {
  288. unsigned int val = min(x, y);
  289. return val % 2 ? val : val - 1;
  290. }
  291. static char *verify_result_get_one(struct dmatest_verify_result *vr,
  292. unsigned int i)
  293. {
  294. struct dmatest_verify_buffer *vb = &vr->data[i];
  295. u8 diff = vb->actual ^ vr->pattern;
  296. static char buf[512];
  297. char *msg;
  298. if (vr->is_srcbuf)
  299. msg = "srcbuf overwritten!";
  300. else if ((vr->pattern & PATTERN_COPY)
  301. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  302. msg = "dstbuf not copied!";
  303. else if (diff & PATTERN_SRC)
  304. msg = "dstbuf was copied!";
  305. else
  306. msg = "dstbuf mismatch!";
  307. snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
  308. vb->index, vb->expected, vb->actual);
  309. return buf;
  310. }
  311. static char *thread_result_get(const char *name,
  312. struct dmatest_thread_result *tr)
  313. {
  314. static const char * const messages[] = {
  315. [DMATEST_ET_OK] = "No errors",
  316. [DMATEST_ET_MAP_SRC] = "src mapping error",
  317. [DMATEST_ET_MAP_DST] = "dst mapping error",
  318. [DMATEST_ET_PREP] = "prep error",
  319. [DMATEST_ET_SUBMIT] = "submit error",
  320. [DMATEST_ET_TIMEOUT] = "test timed out",
  321. [DMATEST_ET_DMA_ERROR] =
  322. "got completion callback (DMA_ERROR)",
  323. [DMATEST_ET_DMA_IN_PROGRESS] =
  324. "got completion callback (DMA_IN_PROGRESS)",
  325. [DMATEST_ET_VERIFY] = "errors",
  326. [DMATEST_ET_VERIFY_BUF] = "verify errors",
  327. };
  328. static char buf[512];
  329. snprintf(buf, sizeof(buf) - 1,
  330. "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
  331. name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
  332. tr->len, tr->data);
  333. return buf;
  334. }
  335. static int thread_result_add(struct dmatest_info *info,
  336. struct dmatest_result *r, enum dmatest_error_type type,
  337. unsigned int n, unsigned int src_off, unsigned int dst_off,
  338. unsigned int len, unsigned long data)
  339. {
  340. struct dmatest_thread_result *tr;
  341. tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  342. if (!tr)
  343. return -ENOMEM;
  344. tr->type = type;
  345. tr->n = n;
  346. tr->src_off = src_off;
  347. tr->dst_off = dst_off;
  348. tr->len = len;
  349. tr->data = data;
  350. mutex_lock(&info->results_lock);
  351. list_add_tail(&tr->node, &r->results);
  352. mutex_unlock(&info->results_lock);
  353. pr_warn("%s\n", thread_result_get(r->name, tr));
  354. return 0;
  355. }
  356. static unsigned int verify_result_add(struct dmatest_info *info,
  357. struct dmatest_result *r, unsigned int n,
  358. unsigned int src_off, unsigned int dst_off, unsigned int len,
  359. u8 **bufs, int whence, unsigned int counter, u8 pattern,
  360. bool is_srcbuf)
  361. {
  362. struct dmatest_verify_result *vr;
  363. unsigned int error_count;
  364. unsigned int buf_off = is_srcbuf ? src_off : dst_off;
  365. unsigned int start, end;
  366. if (whence < 0) {
  367. start = 0;
  368. end = buf_off;
  369. } else if (whence > 0) {
  370. start = buf_off + len;
  371. end = info->params.buf_size;
  372. } else {
  373. start = buf_off;
  374. end = buf_off + len;
  375. }
  376. vr = kmalloc(sizeof(*vr), GFP_KERNEL);
  377. if (!vr) {
  378. pr_warn("dmatest: No memory to store verify result\n");
  379. return dmatest_verify(NULL, bufs, start, end, counter, pattern,
  380. is_srcbuf);
  381. }
  382. vr->pattern = pattern;
  383. vr->is_srcbuf = is_srcbuf;
  384. error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
  385. is_srcbuf);
  386. if (error_count) {
  387. vr->error_count = error_count;
  388. thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
  389. dst_off, len, (unsigned long)vr);
  390. return error_count;
  391. }
  392. kfree(vr);
  393. return 0;
  394. }
  395. static void result_free(struct dmatest_info *info, const char *name)
  396. {
  397. struct dmatest_result *r, *_r;
  398. mutex_lock(&info->results_lock);
  399. list_for_each_entry_safe(r, _r, &info->results, node) {
  400. struct dmatest_thread_result *tr, *_tr;
  401. if (name && strcmp(r->name, name))
  402. continue;
  403. list_for_each_entry_safe(tr, _tr, &r->results, node) {
  404. if (tr->type == DMATEST_ET_VERIFY_BUF)
  405. kfree(tr->vr);
  406. list_del(&tr->node);
  407. kfree(tr);
  408. }
  409. kfree(r->name);
  410. list_del(&r->node);
  411. kfree(r);
  412. }
  413. mutex_unlock(&info->results_lock);
  414. }
  415. static struct dmatest_result *result_init(struct dmatest_info *info,
  416. const char *name)
  417. {
  418. struct dmatest_result *r;
  419. r = kzalloc(sizeof(*r), GFP_KERNEL);
  420. if (r) {
  421. r->name = kstrdup(name, GFP_KERNEL);
  422. INIT_LIST_HEAD(&r->results);
  423. mutex_lock(&info->results_lock);
  424. list_add_tail(&r->node, &info->results);
  425. mutex_unlock(&info->results_lock);
  426. }
  427. return r;
  428. }
  429. /*
  430. * This function repeatedly tests DMA transfers of various lengths and
  431. * offsets for a given operation type until it is told to exit by
  432. * kthread_stop(). There may be multiple threads running this function
  433. * in parallel for a single channel, and there may be multiple channels
  434. * being tested in parallel.
  435. *
  436. * Before each test, the source and destination buffer is initialized
  437. * with a known pattern. This pattern is different depending on
  438. * whether it's in an area which is supposed to be copied or
  439. * overwritten, and different in the source and destination buffers.
  440. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  441. * we'll notice.
  442. */
  443. static int dmatest_func(void *data)
  444. {
  445. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
  446. struct dmatest_thread *thread = data;
  447. struct dmatest_done done = { .wait = &done_wait };
  448. struct dmatest_info *info;
  449. struct dmatest_params *params;
  450. struct dma_chan *chan;
  451. struct dma_device *dev;
  452. const char *thread_name;
  453. unsigned int src_off, dst_off, len;
  454. unsigned int error_count;
  455. unsigned int failed_tests = 0;
  456. unsigned int total_tests = 0;
  457. dma_cookie_t cookie;
  458. enum dma_status status;
  459. enum dma_ctrl_flags flags;
  460. u8 *pq_coefs = NULL;
  461. int ret;
  462. int src_cnt;
  463. int dst_cnt;
  464. int i;
  465. struct dmatest_result *result;
  466. thread_name = current->comm;
  467. set_freezable();
  468. ret = -ENOMEM;
  469. smp_rmb();
  470. info = thread->info;
  471. params = &info->params;
  472. chan = thread->chan;
  473. dev = chan->device;
  474. if (thread->type == DMA_MEMCPY)
  475. src_cnt = dst_cnt = 1;
  476. else if (thread->type == DMA_XOR) {
  477. /* force odd to ensure dst = src */
  478. src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  479. dst_cnt = 1;
  480. } else if (thread->type == DMA_PQ) {
  481. /* force odd to ensure dst = src */
  482. src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  483. dst_cnt = 2;
  484. pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
  485. if (!pq_coefs)
  486. goto err_thread_type;
  487. for (i = 0; i < src_cnt; i++)
  488. pq_coefs[i] = 1;
  489. } else
  490. goto err_thread_type;
  491. result = result_init(info, thread_name);
  492. if (!result)
  493. goto err_srcs;
  494. thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
  495. if (!thread->srcs)
  496. goto err_srcs;
  497. for (i = 0; i < src_cnt; i++) {
  498. thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
  499. if (!thread->srcs[i])
  500. goto err_srcbuf;
  501. }
  502. thread->srcs[i] = NULL;
  503. thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
  504. if (!thread->dsts)
  505. goto err_dsts;
  506. for (i = 0; i < dst_cnt; i++) {
  507. thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
  508. if (!thread->dsts[i])
  509. goto err_dstbuf;
  510. }
  511. thread->dsts[i] = NULL;
  512. set_user_nice(current, 10);
  513. /*
  514. * src buffers are freed by the DMAEngine code with dma_unmap_single()
  515. * dst buffers are freed by ourselves below
  516. */
  517. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
  518. | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
  519. while (!kthread_should_stop()
  520. && !(params->iterations && total_tests >= params->iterations)) {
  521. struct dma_async_tx_descriptor *tx = NULL;
  522. dma_addr_t dma_srcs[src_cnt];
  523. dma_addr_t dma_dsts[dst_cnt];
  524. u8 align = 0;
  525. total_tests++;
  526. /* honor alignment restrictions */
  527. if (thread->type == DMA_MEMCPY)
  528. align = dev->copy_align;
  529. else if (thread->type == DMA_XOR)
  530. align = dev->xor_align;
  531. else if (thread->type == DMA_PQ)
  532. align = dev->pq_align;
  533. if (1 << align > params->buf_size) {
  534. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  535. params->buf_size, 1 << align);
  536. break;
  537. }
  538. len = dmatest_random() % params->buf_size + 1;
  539. len = (len >> align) << align;
  540. if (!len)
  541. len = 1 << align;
  542. src_off = dmatest_random() % (params->buf_size - len + 1);
  543. dst_off = dmatest_random() % (params->buf_size - len + 1);
  544. src_off = (src_off >> align) << align;
  545. dst_off = (dst_off >> align) << align;
  546. dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
  547. dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
  548. for (i = 0; i < src_cnt; i++) {
  549. u8 *buf = thread->srcs[i] + src_off;
  550. dma_srcs[i] = dma_map_single(dev->dev, buf, len,
  551. DMA_TO_DEVICE);
  552. ret = dma_mapping_error(dev->dev, dma_srcs[i]);
  553. if (ret) {
  554. unmap_src(dev->dev, dma_srcs, len, i);
  555. thread_result_add(info, result,
  556. DMATEST_ET_MAP_SRC,
  557. total_tests, src_off, dst_off,
  558. len, ret);
  559. failed_tests++;
  560. continue;
  561. }
  562. }
  563. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  564. for (i = 0; i < dst_cnt; i++) {
  565. dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
  566. params->buf_size,
  567. DMA_BIDIRECTIONAL);
  568. ret = dma_mapping_error(dev->dev, dma_dsts[i]);
  569. if (ret) {
  570. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  571. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  572. i);
  573. thread_result_add(info, result,
  574. DMATEST_ET_MAP_DST,
  575. total_tests, src_off, dst_off,
  576. len, ret);
  577. failed_tests++;
  578. continue;
  579. }
  580. }
  581. if (thread->type == DMA_MEMCPY)
  582. tx = dev->device_prep_dma_memcpy(chan,
  583. dma_dsts[0] + dst_off,
  584. dma_srcs[0], len,
  585. flags);
  586. else if (thread->type == DMA_XOR)
  587. tx = dev->device_prep_dma_xor(chan,
  588. dma_dsts[0] + dst_off,
  589. dma_srcs, src_cnt,
  590. len, flags);
  591. else if (thread->type == DMA_PQ) {
  592. dma_addr_t dma_pq[dst_cnt];
  593. for (i = 0; i < dst_cnt; i++)
  594. dma_pq[i] = dma_dsts[i] + dst_off;
  595. tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
  596. src_cnt, pq_coefs,
  597. len, flags);
  598. }
  599. if (!tx) {
  600. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  601. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  602. dst_cnt);
  603. thread_result_add(info, result, DMATEST_ET_PREP,
  604. total_tests, src_off, dst_off,
  605. len, 0);
  606. msleep(100);
  607. failed_tests++;
  608. continue;
  609. }
  610. done.done = false;
  611. tx->callback = dmatest_callback;
  612. tx->callback_param = &done;
  613. cookie = tx->tx_submit(tx);
  614. if (dma_submit_error(cookie)) {
  615. thread_result_add(info, result, DMATEST_ET_SUBMIT,
  616. total_tests, src_off, dst_off,
  617. len, cookie);
  618. msleep(100);
  619. failed_tests++;
  620. continue;
  621. }
  622. dma_async_issue_pending(chan);
  623. wait_event_freezable_timeout(done_wait, done.done,
  624. msecs_to_jiffies(params->timeout));
  625. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  626. if (!done.done) {
  627. /*
  628. * We're leaving the timed out dma operation with
  629. * dangling pointer to done_wait. To make this
  630. * correct, we'll need to allocate wait_done for
  631. * each test iteration and perform "who's gonna
  632. * free it this time?" dancing. For now, just
  633. * leave it dangling.
  634. */
  635. thread_result_add(info, result, DMATEST_ET_TIMEOUT,
  636. total_tests, src_off, dst_off,
  637. len, 0);
  638. failed_tests++;
  639. continue;
  640. } else if (status != DMA_SUCCESS) {
  641. enum dmatest_error_type type = (status == DMA_ERROR) ?
  642. DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
  643. thread_result_add(info, result, type,
  644. total_tests, src_off, dst_off,
  645. len, status);
  646. failed_tests++;
  647. continue;
  648. }
  649. /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
  650. unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
  651. error_count = 0;
  652. pr_debug("%s: verifying source buffer...\n", thread_name);
  653. error_count += verify_result_add(info, result, total_tests,
  654. src_off, dst_off, len, thread->srcs, -1,
  655. 0, PATTERN_SRC, true);
  656. error_count += verify_result_add(info, result, total_tests,
  657. src_off, dst_off, len, thread->srcs, 0,
  658. src_off, PATTERN_SRC | PATTERN_COPY, true);
  659. error_count += verify_result_add(info, result, total_tests,
  660. src_off, dst_off, len, thread->srcs, 1,
  661. src_off + len, PATTERN_SRC, true);
  662. pr_debug("%s: verifying dest buffer...\n", thread_name);
  663. error_count += verify_result_add(info, result, total_tests,
  664. src_off, dst_off, len, thread->dsts, -1,
  665. 0, PATTERN_DST, false);
  666. error_count += verify_result_add(info, result, total_tests,
  667. src_off, dst_off, len, thread->dsts, 0,
  668. src_off, PATTERN_SRC | PATTERN_COPY, false);
  669. error_count += verify_result_add(info, result, total_tests,
  670. src_off, dst_off, len, thread->dsts, 1,
  671. dst_off + len, PATTERN_DST, false);
  672. if (error_count) {
  673. thread_result_add(info, result, DMATEST_ET_VERIFY,
  674. total_tests, src_off, dst_off,
  675. len, error_count);
  676. failed_tests++;
  677. } else {
  678. thread_result_add(info, result, DMATEST_ET_OK,
  679. total_tests, src_off, dst_off,
  680. len, 0);
  681. }
  682. }
  683. ret = 0;
  684. for (i = 0; thread->dsts[i]; i++)
  685. kfree(thread->dsts[i]);
  686. err_dstbuf:
  687. kfree(thread->dsts);
  688. err_dsts:
  689. for (i = 0; thread->srcs[i]; i++)
  690. kfree(thread->srcs[i]);
  691. err_srcbuf:
  692. kfree(thread->srcs);
  693. err_srcs:
  694. kfree(pq_coefs);
  695. err_thread_type:
  696. pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
  697. thread_name, total_tests, failed_tests, ret);
  698. /* terminate all transfers on specified channels */
  699. if (ret)
  700. dmaengine_terminate_all(chan);
  701. thread->done = true;
  702. if (params->iterations > 0)
  703. while (!kthread_should_stop()) {
  704. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
  705. interruptible_sleep_on(&wait_dmatest_exit);
  706. }
  707. return ret;
  708. }
  709. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  710. {
  711. struct dmatest_thread *thread;
  712. struct dmatest_thread *_thread;
  713. int ret;
  714. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  715. ret = kthread_stop(thread->task);
  716. pr_debug("dmatest: thread %s exited with status %d\n",
  717. thread->task->comm, ret);
  718. list_del(&thread->node);
  719. kfree(thread);
  720. }
  721. /* terminate all transfers on specified channels */
  722. dmaengine_terminate_all(dtc->chan);
  723. kfree(dtc);
  724. }
  725. static int dmatest_add_threads(struct dmatest_info *info,
  726. struct dmatest_chan *dtc, enum dma_transaction_type type)
  727. {
  728. struct dmatest_params *params = &info->params;
  729. struct dmatest_thread *thread;
  730. struct dma_chan *chan = dtc->chan;
  731. char *op;
  732. unsigned int i;
  733. if (type == DMA_MEMCPY)
  734. op = "copy";
  735. else if (type == DMA_XOR)
  736. op = "xor";
  737. else if (type == DMA_PQ)
  738. op = "pq";
  739. else
  740. return -EINVAL;
  741. for (i = 0; i < params->threads_per_chan; i++) {
  742. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  743. if (!thread) {
  744. pr_warning("dmatest: No memory for %s-%s%u\n",
  745. dma_chan_name(chan), op, i);
  746. break;
  747. }
  748. thread->info = info;
  749. thread->chan = dtc->chan;
  750. thread->type = type;
  751. smp_wmb();
  752. thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
  753. dma_chan_name(chan), op, i);
  754. if (IS_ERR(thread->task)) {
  755. pr_warning("dmatest: Failed to run thread %s-%s%u\n",
  756. dma_chan_name(chan), op, i);
  757. kfree(thread);
  758. break;
  759. }
  760. /* srcbuf and dstbuf are allocated by the thread itself */
  761. list_add_tail(&thread->node, &dtc->threads);
  762. }
  763. return i;
  764. }
  765. static int dmatest_add_channel(struct dmatest_info *info,
  766. struct dma_chan *chan)
  767. {
  768. struct dmatest_chan *dtc;
  769. struct dma_device *dma_dev = chan->device;
  770. unsigned int thread_count = 0;
  771. int cnt;
  772. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  773. if (!dtc) {
  774. pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
  775. return -ENOMEM;
  776. }
  777. dtc->chan = chan;
  778. INIT_LIST_HEAD(&dtc->threads);
  779. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  780. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  781. thread_count += cnt > 0 ? cnt : 0;
  782. }
  783. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  784. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  785. thread_count += cnt > 0 ? cnt : 0;
  786. }
  787. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  788. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  789. thread_count += cnt > 0 ? cnt : 0;
  790. }
  791. pr_info("dmatest: Started %u threads using %s\n",
  792. thread_count, dma_chan_name(chan));
  793. list_add_tail(&dtc->node, &info->channels);
  794. info->nr_channels++;
  795. return 0;
  796. }
  797. static bool filter(struct dma_chan *chan, void *param)
  798. {
  799. struct dmatest_params *params = param;
  800. if (!dmatest_match_channel(params, chan) ||
  801. !dmatest_match_device(params, chan->device))
  802. return false;
  803. else
  804. return true;
  805. }
  806. static int __run_threaded_test(struct dmatest_info *info)
  807. {
  808. dma_cap_mask_t mask;
  809. struct dma_chan *chan;
  810. struct dmatest_params *params = &info->params;
  811. int err = 0;
  812. dma_cap_zero(mask);
  813. dma_cap_set(DMA_MEMCPY, mask);
  814. for (;;) {
  815. chan = dma_request_channel(mask, filter, params);
  816. if (chan) {
  817. err = dmatest_add_channel(info, chan);
  818. if (err) {
  819. dma_release_channel(chan);
  820. break; /* add_channel failed, punt */
  821. }
  822. } else
  823. break; /* no more channels available */
  824. if (params->max_channels &&
  825. info->nr_channels >= params->max_channels)
  826. break; /* we have all we need */
  827. }
  828. return err;
  829. }
  830. #ifndef MODULE
  831. static int run_threaded_test(struct dmatest_info *info)
  832. {
  833. int ret;
  834. mutex_lock(&info->lock);
  835. ret = __run_threaded_test(info);
  836. mutex_unlock(&info->lock);
  837. return ret;
  838. }
  839. #endif
  840. static void __stop_threaded_test(struct dmatest_info *info)
  841. {
  842. struct dmatest_chan *dtc, *_dtc;
  843. struct dma_chan *chan;
  844. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  845. list_del(&dtc->node);
  846. chan = dtc->chan;
  847. dmatest_cleanup_channel(dtc);
  848. pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
  849. dma_release_channel(chan);
  850. }
  851. info->nr_channels = 0;
  852. }
  853. static void stop_threaded_test(struct dmatest_info *info)
  854. {
  855. mutex_lock(&info->lock);
  856. __stop_threaded_test(info);
  857. mutex_unlock(&info->lock);
  858. }
  859. static int __restart_threaded_test(struct dmatest_info *info, bool run)
  860. {
  861. struct dmatest_params *params = &info->params;
  862. /* Stop any running test first */
  863. __stop_threaded_test(info);
  864. if (run == false)
  865. return 0;
  866. /* Clear results from previous run */
  867. result_free(info, NULL);
  868. /* Copy test parameters */
  869. memcpy(params, &info->dbgfs_params, sizeof(*params));
  870. /* Run test with new parameters */
  871. return __run_threaded_test(info);
  872. }
  873. static bool __is_threaded_test_run(struct dmatest_info *info)
  874. {
  875. struct dmatest_chan *dtc;
  876. list_for_each_entry(dtc, &info->channels, node) {
  877. struct dmatest_thread *thread;
  878. list_for_each_entry(thread, &dtc->threads, node) {
  879. if (!thread->done)
  880. return true;
  881. }
  882. }
  883. return false;
  884. }
  885. static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
  886. const void __user *from, size_t count)
  887. {
  888. char tmp[20];
  889. ssize_t len;
  890. len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
  891. if (len >= 0) {
  892. tmp[len] = '\0';
  893. strlcpy(to, strim(tmp), available);
  894. }
  895. return len;
  896. }
  897. static ssize_t dtf_read_channel(struct file *file, char __user *buf,
  898. size_t count, loff_t *ppos)
  899. {
  900. struct dmatest_info *info = file->private_data;
  901. return simple_read_from_buffer(buf, count, ppos,
  902. info->dbgfs_params.channel,
  903. strlen(info->dbgfs_params.channel));
  904. }
  905. static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
  906. size_t size, loff_t *ppos)
  907. {
  908. struct dmatest_info *info = file->private_data;
  909. return dtf_write_string(info->dbgfs_params.channel,
  910. sizeof(info->dbgfs_params.channel),
  911. ppos, buf, size);
  912. }
  913. static const struct file_operations dtf_channel_fops = {
  914. .read = dtf_read_channel,
  915. .write = dtf_write_channel,
  916. .open = simple_open,
  917. .llseek = default_llseek,
  918. };
  919. static ssize_t dtf_read_device(struct file *file, char __user *buf,
  920. size_t count, loff_t *ppos)
  921. {
  922. struct dmatest_info *info = file->private_data;
  923. return simple_read_from_buffer(buf, count, ppos,
  924. info->dbgfs_params.device,
  925. strlen(info->dbgfs_params.device));
  926. }
  927. static ssize_t dtf_write_device(struct file *file, const char __user *buf,
  928. size_t size, loff_t *ppos)
  929. {
  930. struct dmatest_info *info = file->private_data;
  931. return dtf_write_string(info->dbgfs_params.device,
  932. sizeof(info->dbgfs_params.device),
  933. ppos, buf, size);
  934. }
  935. static const struct file_operations dtf_device_fops = {
  936. .read = dtf_read_device,
  937. .write = dtf_write_device,
  938. .open = simple_open,
  939. .llseek = default_llseek,
  940. };
  941. static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
  942. size_t count, loff_t *ppos)
  943. {
  944. struct dmatest_info *info = file->private_data;
  945. char buf[3];
  946. mutex_lock(&info->lock);
  947. if (__is_threaded_test_run(info)) {
  948. buf[0] = 'Y';
  949. } else {
  950. __stop_threaded_test(info);
  951. buf[0] = 'N';
  952. }
  953. mutex_unlock(&info->lock);
  954. buf[1] = '\n';
  955. buf[2] = 0x00;
  956. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  957. }
  958. static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
  959. size_t count, loff_t *ppos)
  960. {
  961. struct dmatest_info *info = file->private_data;
  962. char buf[16];
  963. bool bv;
  964. int ret = 0;
  965. if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
  966. return -EFAULT;
  967. if (strtobool(buf, &bv) == 0) {
  968. mutex_lock(&info->lock);
  969. if (__is_threaded_test_run(info))
  970. ret = -EBUSY;
  971. else
  972. ret = __restart_threaded_test(info, bv);
  973. mutex_unlock(&info->lock);
  974. }
  975. return ret ? ret : count;
  976. }
  977. static const struct file_operations dtf_run_fops = {
  978. .read = dtf_read_run,
  979. .write = dtf_write_run,
  980. .open = simple_open,
  981. .llseek = default_llseek,
  982. };
  983. static int dtf_results_show(struct seq_file *sf, void *data)
  984. {
  985. struct dmatest_info *info = sf->private;
  986. struct dmatest_result *result;
  987. struct dmatest_thread_result *tr;
  988. unsigned int i;
  989. mutex_lock(&info->results_lock);
  990. list_for_each_entry(result, &info->results, node) {
  991. list_for_each_entry(tr, &result->results, node) {
  992. seq_printf(sf, "%s\n",
  993. thread_result_get(result->name, tr));
  994. if (tr->type == DMATEST_ET_VERIFY_BUF) {
  995. for (i = 0; i < tr->vr->error_count; i++) {
  996. seq_printf(sf, "\t%s\n",
  997. verify_result_get_one(tr->vr, i));
  998. }
  999. }
  1000. }
  1001. }
  1002. mutex_unlock(&info->results_lock);
  1003. return 0;
  1004. }
  1005. static int dtf_results_open(struct inode *inode, struct file *file)
  1006. {
  1007. return single_open(file, dtf_results_show, inode->i_private);
  1008. }
  1009. static const struct file_operations dtf_results_fops = {
  1010. .open = dtf_results_open,
  1011. .read = seq_read,
  1012. .llseek = seq_lseek,
  1013. .release = single_release,
  1014. };
  1015. static int dmatest_register_dbgfs(struct dmatest_info *info)
  1016. {
  1017. struct dentry *d;
  1018. struct dmatest_params *params = &info->dbgfs_params;
  1019. int ret = -ENOMEM;
  1020. d = debugfs_create_dir("dmatest", NULL);
  1021. if (IS_ERR(d))
  1022. return PTR_ERR(d);
  1023. if (!d)
  1024. goto err_root;
  1025. info->root = d;
  1026. /* Copy initial values */
  1027. memcpy(params, &info->params, sizeof(*params));
  1028. /* Test parameters */
  1029. d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
  1030. (u32 *)&params->buf_size);
  1031. if (IS_ERR_OR_NULL(d))
  1032. goto err_node;
  1033. d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
  1034. info, &dtf_channel_fops);
  1035. if (IS_ERR_OR_NULL(d))
  1036. goto err_node;
  1037. d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
  1038. info, &dtf_device_fops);
  1039. if (IS_ERR_OR_NULL(d))
  1040. goto err_node;
  1041. d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
  1042. (u32 *)&params->threads_per_chan);
  1043. if (IS_ERR_OR_NULL(d))
  1044. goto err_node;
  1045. d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
  1046. (u32 *)&params->max_channels);
  1047. if (IS_ERR_OR_NULL(d))
  1048. goto err_node;
  1049. d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
  1050. (u32 *)&params->iterations);
  1051. if (IS_ERR_OR_NULL(d))
  1052. goto err_node;
  1053. d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
  1054. (u32 *)&params->xor_sources);
  1055. if (IS_ERR_OR_NULL(d))
  1056. goto err_node;
  1057. d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
  1058. (u32 *)&params->pq_sources);
  1059. if (IS_ERR_OR_NULL(d))
  1060. goto err_node;
  1061. d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
  1062. (u32 *)&params->timeout);
  1063. if (IS_ERR_OR_NULL(d))
  1064. goto err_node;
  1065. /* Run or stop threaded test */
  1066. d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
  1067. info, &dtf_run_fops);
  1068. if (IS_ERR_OR_NULL(d))
  1069. goto err_node;
  1070. /* Results of test in progress */
  1071. d = debugfs_create_file("results", S_IRUGO, info->root, info,
  1072. &dtf_results_fops);
  1073. if (IS_ERR_OR_NULL(d))
  1074. goto err_node;
  1075. return 0;
  1076. err_node:
  1077. debugfs_remove_recursive(info->root);
  1078. err_root:
  1079. pr_err("dmatest: Failed to initialize debugfs\n");
  1080. return ret;
  1081. }
  1082. static int __init dmatest_init(void)
  1083. {
  1084. struct dmatest_info *info = &test_info;
  1085. struct dmatest_params *params = &info->params;
  1086. int ret;
  1087. memset(info, 0, sizeof(*info));
  1088. mutex_init(&info->lock);
  1089. INIT_LIST_HEAD(&info->channels);
  1090. mutex_init(&info->results_lock);
  1091. INIT_LIST_HEAD(&info->results);
  1092. /* Set default parameters */
  1093. params->buf_size = test_buf_size;
  1094. strlcpy(params->channel, test_channel, sizeof(params->channel));
  1095. strlcpy(params->device, test_device, sizeof(params->device));
  1096. params->threads_per_chan = threads_per_chan;
  1097. params->max_channels = max_channels;
  1098. params->iterations = iterations;
  1099. params->xor_sources = xor_sources;
  1100. params->pq_sources = pq_sources;
  1101. params->timeout = timeout;
  1102. ret = dmatest_register_dbgfs(info);
  1103. if (ret)
  1104. return ret;
  1105. #ifdef MODULE
  1106. return 0;
  1107. #else
  1108. return run_threaded_test(info);
  1109. #endif
  1110. }
  1111. /* when compiled-in wait for drivers to load first */
  1112. late_initcall(dmatest_init);
  1113. static void __exit dmatest_exit(void)
  1114. {
  1115. struct dmatest_info *info = &test_info;
  1116. debugfs_remove_recursive(info->root);
  1117. stop_threaded_test(info);
  1118. result_free(info, NULL);
  1119. }
  1120. module_exit(dmatest_exit);
  1121. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  1122. MODULE_LICENSE("GPL v2");