dmatest.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. /*
  2. * DMA Engine test module
  3. *
  4. * Copyright (C) 2007 Atmel Corporation
  5. * Copyright (C) 2013 Intel Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/freezer.h>
  15. #include <linux/init.h>
  16. #include <linux/kthread.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. #include <linux/ctype.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/seq_file.h>
  26. static unsigned int test_buf_size = 16384;
  27. module_param(test_buf_size, uint, S_IRUGO);
  28. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  29. static char test_channel[20];
  30. module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
  31. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  32. static char test_device[20];
  33. module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
  34. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  35. static unsigned int threads_per_chan = 1;
  36. module_param(threads_per_chan, uint, S_IRUGO);
  37. MODULE_PARM_DESC(threads_per_chan,
  38. "Number of threads to start per channel (default: 1)");
  39. static unsigned int max_channels;
  40. module_param(max_channels, uint, S_IRUGO);
  41. MODULE_PARM_DESC(max_channels,
  42. "Maximum number of channels to use (default: all)");
  43. static unsigned int iterations;
  44. module_param(iterations, uint, S_IRUGO);
  45. MODULE_PARM_DESC(iterations,
  46. "Iterations before stopping test (default: infinite)");
  47. static unsigned int xor_sources = 3;
  48. module_param(xor_sources, uint, S_IRUGO);
  49. MODULE_PARM_DESC(xor_sources,
  50. "Number of xor source buffers (default: 3)");
  51. static unsigned int pq_sources = 3;
  52. module_param(pq_sources, uint, S_IRUGO);
  53. MODULE_PARM_DESC(pq_sources,
  54. "Number of p+q source buffers (default: 3)");
  55. static int timeout = 3000;
  56. module_param(timeout, uint, S_IRUGO);
  57. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  58. "Pass -1 for infinite timeout");
  59. /* Maximum amount of mismatched bytes in buffer to print */
  60. #define MAX_ERROR_COUNT 32
  61. /*
  62. * Initialization patterns. All bytes in the source buffer has bit 7
  63. * set, all bytes in the destination buffer has bit 7 cleared.
  64. *
  65. * Bit 6 is set for all bytes which are to be copied by the DMA
  66. * engine. Bit 5 is set for all bytes which are to be overwritten by
  67. * the DMA engine.
  68. *
  69. * The remaining bits are the inverse of a counter which increments by
  70. * one for each byte address.
  71. */
  72. #define PATTERN_SRC 0x80
  73. #define PATTERN_DST 0x00
  74. #define PATTERN_COPY 0x40
  75. #define PATTERN_OVERWRITE 0x20
  76. #define PATTERN_COUNT_MASK 0x1f
  77. enum dmatest_error_type {
  78. DMATEST_ET_OK,
  79. DMATEST_ET_MAP_SRC,
  80. DMATEST_ET_MAP_DST,
  81. DMATEST_ET_PREP,
  82. DMATEST_ET_SUBMIT,
  83. DMATEST_ET_TIMEOUT,
  84. DMATEST_ET_DMA_ERROR,
  85. DMATEST_ET_DMA_IN_PROGRESS,
  86. DMATEST_ET_VERIFY,
  87. DMATEST_ET_VERIFY_BUF,
  88. };
  89. struct dmatest_verify_buffer {
  90. unsigned int index;
  91. u8 expected;
  92. u8 actual;
  93. };
  94. struct dmatest_verify_result {
  95. unsigned int error_count;
  96. struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
  97. u8 pattern;
  98. bool is_srcbuf;
  99. };
  100. struct dmatest_thread_result {
  101. struct list_head node;
  102. unsigned int n;
  103. unsigned int src_off;
  104. unsigned int dst_off;
  105. unsigned int len;
  106. enum dmatest_error_type type;
  107. union {
  108. unsigned long data;
  109. dma_cookie_t cookie;
  110. enum dma_status status;
  111. int error;
  112. struct dmatest_verify_result *vr;
  113. };
  114. };
  115. struct dmatest_result {
  116. struct list_head node;
  117. char *name;
  118. struct list_head results;
  119. };
  120. struct dmatest_info;
  121. struct dmatest_thread {
  122. struct list_head node;
  123. struct dmatest_info *info;
  124. struct task_struct *task;
  125. struct dma_chan *chan;
  126. u8 **srcs;
  127. u8 **dsts;
  128. enum dma_transaction_type type;
  129. bool done;
  130. };
  131. struct dmatest_chan {
  132. struct list_head node;
  133. struct dma_chan *chan;
  134. struct list_head threads;
  135. };
  136. /**
  137. * struct dmatest_params - test parameters.
  138. * @buf_size: size of the memcpy test buffer
  139. * @channel: bus ID of the channel to test
  140. * @device: bus ID of the DMA Engine to test
  141. * @threads_per_chan: number of threads to start per channel
  142. * @max_channels: maximum number of channels to use
  143. * @iterations: iterations before stopping test
  144. * @xor_sources: number of xor source buffers
  145. * @pq_sources: number of p+q source buffers
  146. * @timeout: transfer timeout in msec, -1 for infinite timeout
  147. */
  148. struct dmatest_params {
  149. unsigned int buf_size;
  150. char channel[20];
  151. char device[20];
  152. unsigned int threads_per_chan;
  153. unsigned int max_channels;
  154. unsigned int iterations;
  155. unsigned int xor_sources;
  156. unsigned int pq_sources;
  157. int timeout;
  158. };
  159. /**
  160. * struct dmatest_info - test information.
  161. * @params: test parameters
  162. * @lock: access protection to the fields of this structure
  163. */
  164. struct dmatest_info {
  165. /* Test parameters */
  166. struct dmatest_params params;
  167. /* Internal state */
  168. struct list_head channels;
  169. unsigned int nr_channels;
  170. struct mutex lock;
  171. /* debugfs related stuff */
  172. struct dentry *root;
  173. struct dmatest_params dbgfs_params;
  174. /* Test results */
  175. struct list_head results;
  176. struct mutex results_lock;
  177. };
  178. static struct dmatest_info test_info;
  179. static bool dmatest_match_channel(struct dmatest_params *params,
  180. struct dma_chan *chan)
  181. {
  182. if (params->channel[0] == '\0')
  183. return true;
  184. return strcmp(dma_chan_name(chan), params->channel) == 0;
  185. }
  186. static bool dmatest_match_device(struct dmatest_params *params,
  187. struct dma_device *device)
  188. {
  189. if (params->device[0] == '\0')
  190. return true;
  191. return strcmp(dev_name(device->dev), params->device) == 0;
  192. }
  193. static unsigned long dmatest_random(void)
  194. {
  195. unsigned long buf;
  196. get_random_bytes(&buf, sizeof(buf));
  197. return buf;
  198. }
  199. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  200. unsigned int buf_size)
  201. {
  202. unsigned int i;
  203. u8 *buf;
  204. for (; (buf = *bufs); bufs++) {
  205. for (i = 0; i < start; i++)
  206. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  207. for ( ; i < start + len; i++)
  208. buf[i] = PATTERN_SRC | PATTERN_COPY
  209. | (~i & PATTERN_COUNT_MASK);
  210. for ( ; i < buf_size; i++)
  211. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  212. buf++;
  213. }
  214. }
  215. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  216. unsigned int buf_size)
  217. {
  218. unsigned int i;
  219. u8 *buf;
  220. for (; (buf = *bufs); bufs++) {
  221. for (i = 0; i < start; i++)
  222. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  223. for ( ; i < start + len; i++)
  224. buf[i] = PATTERN_DST | PATTERN_OVERWRITE
  225. | (~i & PATTERN_COUNT_MASK);
  226. for ( ; i < buf_size; i++)
  227. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  228. }
  229. }
  230. static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
  231. unsigned int start, unsigned int end, unsigned int counter,
  232. u8 pattern, bool is_srcbuf)
  233. {
  234. unsigned int i;
  235. unsigned int error_count = 0;
  236. u8 actual;
  237. u8 expected;
  238. u8 *buf;
  239. unsigned int counter_orig = counter;
  240. struct dmatest_verify_buffer *vb;
  241. for (; (buf = *bufs); bufs++) {
  242. counter = counter_orig;
  243. for (i = start; i < end; i++) {
  244. actual = buf[i];
  245. expected = pattern | (~counter & PATTERN_COUNT_MASK);
  246. if (actual != expected) {
  247. if (error_count < MAX_ERROR_COUNT && vr) {
  248. vb = &vr->data[error_count];
  249. vb->index = i;
  250. vb->expected = expected;
  251. vb->actual = actual;
  252. }
  253. error_count++;
  254. }
  255. counter++;
  256. }
  257. }
  258. if (error_count > MAX_ERROR_COUNT)
  259. pr_warning("%s: %u errors suppressed\n",
  260. current->comm, error_count - MAX_ERROR_COUNT);
  261. return error_count;
  262. }
  263. /* poor man's completion - we want to use wait_event_freezable() on it */
  264. struct dmatest_done {
  265. bool done;
  266. wait_queue_head_t *wait;
  267. };
  268. static void dmatest_callback(void *arg)
  269. {
  270. struct dmatest_done *done = arg;
  271. done->done = true;
  272. wake_up_all(done->wait);
  273. }
  274. static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
  275. unsigned int count)
  276. {
  277. while (count--)
  278. dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
  279. }
  280. static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
  281. unsigned int count)
  282. {
  283. while (count--)
  284. dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
  285. }
  286. static unsigned int min_odd(unsigned int x, unsigned int y)
  287. {
  288. unsigned int val = min(x, y);
  289. return val % 2 ? val : val - 1;
  290. }
  291. static char *verify_result_get_one(struct dmatest_verify_result *vr,
  292. unsigned int i)
  293. {
  294. struct dmatest_verify_buffer *vb = &vr->data[i];
  295. u8 diff = vb->actual ^ vr->pattern;
  296. static char buf[512];
  297. char *msg;
  298. if (vr->is_srcbuf)
  299. msg = "srcbuf overwritten!";
  300. else if ((vr->pattern & PATTERN_COPY)
  301. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  302. msg = "dstbuf not copied!";
  303. else if (diff & PATTERN_SRC)
  304. msg = "dstbuf was copied!";
  305. else
  306. msg = "dstbuf mismatch!";
  307. snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
  308. vb->index, vb->expected, vb->actual);
  309. return buf;
  310. }
  311. static char *thread_result_get(const char *name,
  312. struct dmatest_thread_result *tr)
  313. {
  314. static const char * const messages[] = {
  315. [DMATEST_ET_OK] = "No errors",
  316. [DMATEST_ET_MAP_SRC] = "src mapping error",
  317. [DMATEST_ET_MAP_DST] = "dst mapping error",
  318. [DMATEST_ET_PREP] = "prep error",
  319. [DMATEST_ET_SUBMIT] = "submit error",
  320. [DMATEST_ET_TIMEOUT] = "test timed out",
  321. [DMATEST_ET_DMA_ERROR] =
  322. "got completion callback (DMA_ERROR)",
  323. [DMATEST_ET_DMA_IN_PROGRESS] =
  324. "got completion callback (DMA_IN_PROGRESS)",
  325. [DMATEST_ET_VERIFY] = "errors",
  326. [DMATEST_ET_VERIFY_BUF] = "verify errors",
  327. };
  328. static char buf[512];
  329. snprintf(buf, sizeof(buf) - 1,
  330. "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
  331. name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
  332. tr->len, tr->data);
  333. return buf;
  334. }
  335. static int thread_result_add(struct dmatest_info *info,
  336. struct dmatest_result *r, enum dmatest_error_type type,
  337. unsigned int n, unsigned int src_off, unsigned int dst_off,
  338. unsigned int len, unsigned long data)
  339. {
  340. struct dmatest_thread_result *tr;
  341. tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  342. if (!tr)
  343. return -ENOMEM;
  344. tr->type = type;
  345. tr->n = n;
  346. tr->src_off = src_off;
  347. tr->dst_off = dst_off;
  348. tr->len = len;
  349. tr->data = data;
  350. mutex_lock(&info->results_lock);
  351. list_add_tail(&tr->node, &r->results);
  352. mutex_unlock(&info->results_lock);
  353. pr_warn("%s\n", thread_result_get(r->name, tr));
  354. return 0;
  355. }
  356. static unsigned int verify_result_add(struct dmatest_info *info,
  357. struct dmatest_result *r, unsigned int n,
  358. unsigned int src_off, unsigned int dst_off, unsigned int len,
  359. u8 **bufs, int whence, unsigned int counter, u8 pattern,
  360. bool is_srcbuf)
  361. {
  362. struct dmatest_verify_result *vr;
  363. unsigned int error_count;
  364. unsigned int buf_off = is_srcbuf ? src_off : dst_off;
  365. unsigned int start, end;
  366. if (whence < 0) {
  367. start = 0;
  368. end = buf_off;
  369. } else if (whence > 0) {
  370. start = buf_off + len;
  371. end = info->params.buf_size;
  372. } else {
  373. start = buf_off;
  374. end = buf_off + len;
  375. }
  376. vr = kmalloc(sizeof(*vr), GFP_KERNEL);
  377. if (!vr) {
  378. pr_warn("dmatest: No memory to store verify result\n");
  379. return dmatest_verify(NULL, bufs, start, end, counter, pattern,
  380. is_srcbuf);
  381. }
  382. vr->pattern = pattern;
  383. vr->is_srcbuf = is_srcbuf;
  384. error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
  385. is_srcbuf);
  386. if (error_count) {
  387. vr->error_count = error_count;
  388. thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
  389. dst_off, len, (unsigned long)vr);
  390. return error_count;
  391. }
  392. kfree(vr);
  393. return 0;
  394. }
  395. static void result_free(struct dmatest_info *info, const char *name)
  396. {
  397. struct dmatest_result *r, *_r;
  398. mutex_lock(&info->results_lock);
  399. list_for_each_entry_safe(r, _r, &info->results, node) {
  400. struct dmatest_thread_result *tr, *_tr;
  401. if (name && strcmp(r->name, name))
  402. continue;
  403. list_for_each_entry_safe(tr, _tr, &r->results, node) {
  404. if (tr->type == DMATEST_ET_VERIFY_BUF)
  405. kfree(tr->vr);
  406. list_del(&tr->node);
  407. kfree(tr);
  408. }
  409. kfree(r->name);
  410. list_del(&r->node);
  411. kfree(r);
  412. }
  413. mutex_unlock(&info->results_lock);
  414. }
  415. static struct dmatest_result *result_init(struct dmatest_info *info,
  416. const char *name)
  417. {
  418. struct dmatest_result *r;
  419. r = kzalloc(sizeof(*r), GFP_KERNEL);
  420. if (r) {
  421. r->name = kstrdup(name, GFP_KERNEL);
  422. INIT_LIST_HEAD(&r->results);
  423. mutex_lock(&info->results_lock);
  424. list_add_tail(&r->node, &info->results);
  425. mutex_unlock(&info->results_lock);
  426. }
  427. return r;
  428. }
  429. /*
  430. * This function repeatedly tests DMA transfers of various lengths and
  431. * offsets for a given operation type until it is told to exit by
  432. * kthread_stop(). There may be multiple threads running this function
  433. * in parallel for a single channel, and there may be multiple channels
  434. * being tested in parallel.
  435. *
  436. * Before each test, the source and destination buffer is initialized
  437. * with a known pattern. This pattern is different depending on
  438. * whether it's in an area which is supposed to be copied or
  439. * overwritten, and different in the source and destination buffers.
  440. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  441. * we'll notice.
  442. */
  443. static int dmatest_func(void *data)
  444. {
  445. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
  446. struct dmatest_thread *thread = data;
  447. struct dmatest_done done = { .wait = &done_wait };
  448. struct dmatest_info *info;
  449. struct dmatest_params *params;
  450. struct dma_chan *chan;
  451. struct dma_device *dev;
  452. const char *thread_name;
  453. unsigned int src_off, dst_off, len;
  454. unsigned int error_count;
  455. unsigned int failed_tests = 0;
  456. unsigned int total_tests = 0;
  457. dma_cookie_t cookie;
  458. enum dma_status status;
  459. enum dma_ctrl_flags flags;
  460. u8 *pq_coefs = NULL;
  461. int ret;
  462. int src_cnt;
  463. int dst_cnt;
  464. int i;
  465. struct dmatest_result *result;
  466. thread_name = current->comm;
  467. set_freezable();
  468. ret = -ENOMEM;
  469. smp_rmb();
  470. info = thread->info;
  471. params = &info->params;
  472. chan = thread->chan;
  473. dev = chan->device;
  474. if (thread->type == DMA_MEMCPY)
  475. src_cnt = dst_cnt = 1;
  476. else if (thread->type == DMA_XOR) {
  477. /* force odd to ensure dst = src */
  478. src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  479. dst_cnt = 1;
  480. } else if (thread->type == DMA_PQ) {
  481. /* force odd to ensure dst = src */
  482. src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  483. dst_cnt = 2;
  484. pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
  485. if (!pq_coefs)
  486. goto err_thread_type;
  487. for (i = 0; i < src_cnt; i++)
  488. pq_coefs[i] = 1;
  489. } else
  490. goto err_thread_type;
  491. result = result_init(info, thread_name);
  492. if (!result)
  493. goto err_srcs;
  494. thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
  495. if (!thread->srcs)
  496. goto err_srcs;
  497. for (i = 0; i < src_cnt; i++) {
  498. thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
  499. if (!thread->srcs[i])
  500. goto err_srcbuf;
  501. }
  502. thread->srcs[i] = NULL;
  503. thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
  504. if (!thread->dsts)
  505. goto err_dsts;
  506. for (i = 0; i < dst_cnt; i++) {
  507. thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
  508. if (!thread->dsts[i])
  509. goto err_dstbuf;
  510. }
  511. thread->dsts[i] = NULL;
  512. set_user_nice(current, 10);
  513. /*
  514. * src buffers are freed by the DMAEngine code with dma_unmap_single()
  515. * dst buffers are freed by ourselves below
  516. */
  517. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
  518. | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
  519. while (!kthread_should_stop()
  520. && !(params->iterations && total_tests >= params->iterations)) {
  521. struct dma_async_tx_descriptor *tx = NULL;
  522. dma_addr_t dma_srcs[src_cnt];
  523. dma_addr_t dma_dsts[dst_cnt];
  524. u8 align = 0;
  525. total_tests++;
  526. /* honor alignment restrictions */
  527. if (thread->type == DMA_MEMCPY)
  528. align = dev->copy_align;
  529. else if (thread->type == DMA_XOR)
  530. align = dev->xor_align;
  531. else if (thread->type == DMA_PQ)
  532. align = dev->pq_align;
  533. if (1 << align > params->buf_size) {
  534. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  535. params->buf_size, 1 << align);
  536. break;
  537. }
  538. len = dmatest_random() % params->buf_size + 1;
  539. len = (len >> align) << align;
  540. if (!len)
  541. len = 1 << align;
  542. src_off = dmatest_random() % (params->buf_size - len + 1);
  543. dst_off = dmatest_random() % (params->buf_size - len + 1);
  544. src_off = (src_off >> align) << align;
  545. dst_off = (dst_off >> align) << align;
  546. dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
  547. dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
  548. for (i = 0; i < src_cnt; i++) {
  549. u8 *buf = thread->srcs[i] + src_off;
  550. dma_srcs[i] = dma_map_single(dev->dev, buf, len,
  551. DMA_TO_DEVICE);
  552. ret = dma_mapping_error(dev->dev, dma_srcs[i]);
  553. if (ret) {
  554. unmap_src(dev->dev, dma_srcs, len, i);
  555. thread_result_add(info, result,
  556. DMATEST_ET_MAP_SRC,
  557. total_tests, src_off, dst_off,
  558. len, ret);
  559. failed_tests++;
  560. continue;
  561. }
  562. }
  563. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  564. for (i = 0; i < dst_cnt; i++) {
  565. dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
  566. params->buf_size,
  567. DMA_BIDIRECTIONAL);
  568. ret = dma_mapping_error(dev->dev, dma_dsts[i]);
  569. if (ret) {
  570. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  571. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  572. i);
  573. thread_result_add(info, result,
  574. DMATEST_ET_MAP_DST,
  575. total_tests, src_off, dst_off,
  576. len, ret);
  577. failed_tests++;
  578. continue;
  579. }
  580. }
  581. if (thread->type == DMA_MEMCPY)
  582. tx = dev->device_prep_dma_memcpy(chan,
  583. dma_dsts[0] + dst_off,
  584. dma_srcs[0], len,
  585. flags);
  586. else if (thread->type == DMA_XOR)
  587. tx = dev->device_prep_dma_xor(chan,
  588. dma_dsts[0] + dst_off,
  589. dma_srcs, src_cnt,
  590. len, flags);
  591. else if (thread->type == DMA_PQ) {
  592. dma_addr_t dma_pq[dst_cnt];
  593. for (i = 0; i < dst_cnt; i++)
  594. dma_pq[i] = dma_dsts[i] + dst_off;
  595. tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
  596. src_cnt, pq_coefs,
  597. len, flags);
  598. }
  599. if (!tx) {
  600. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  601. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  602. dst_cnt);
  603. thread_result_add(info, result, DMATEST_ET_PREP,
  604. total_tests, src_off, dst_off,
  605. len, 0);
  606. msleep(100);
  607. failed_tests++;
  608. continue;
  609. }
  610. done.done = false;
  611. tx->callback = dmatest_callback;
  612. tx->callback_param = &done;
  613. cookie = tx->tx_submit(tx);
  614. if (dma_submit_error(cookie)) {
  615. thread_result_add(info, result, DMATEST_ET_SUBMIT,
  616. total_tests, src_off, dst_off,
  617. len, cookie);
  618. msleep(100);
  619. failed_tests++;
  620. continue;
  621. }
  622. dma_async_issue_pending(chan);
  623. wait_event_freezable_timeout(done_wait,
  624. done.done || kthread_should_stop(),
  625. msecs_to_jiffies(params->timeout));
  626. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  627. if (!done.done) {
  628. /*
  629. * We're leaving the timed out dma operation with
  630. * dangling pointer to done_wait. To make this
  631. * correct, we'll need to allocate wait_done for
  632. * each test iteration and perform "who's gonna
  633. * free it this time?" dancing. For now, just
  634. * leave it dangling.
  635. */
  636. thread_result_add(info, result, DMATEST_ET_TIMEOUT,
  637. total_tests, src_off, dst_off,
  638. len, 0);
  639. failed_tests++;
  640. continue;
  641. } else if (status != DMA_SUCCESS) {
  642. enum dmatest_error_type type = (status == DMA_ERROR) ?
  643. DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
  644. thread_result_add(info, result, type,
  645. total_tests, src_off, dst_off,
  646. len, status);
  647. failed_tests++;
  648. continue;
  649. }
  650. /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
  651. unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
  652. error_count = 0;
  653. pr_debug("%s: verifying source buffer...\n", thread_name);
  654. error_count += verify_result_add(info, result, total_tests,
  655. src_off, dst_off, len, thread->srcs, -1,
  656. 0, PATTERN_SRC, true);
  657. error_count += verify_result_add(info, result, total_tests,
  658. src_off, dst_off, len, thread->srcs, 0,
  659. src_off, PATTERN_SRC | PATTERN_COPY, true);
  660. error_count += verify_result_add(info, result, total_tests,
  661. src_off, dst_off, len, thread->srcs, 1,
  662. src_off + len, PATTERN_SRC, true);
  663. pr_debug("%s: verifying dest buffer...\n", thread_name);
  664. error_count += verify_result_add(info, result, total_tests,
  665. src_off, dst_off, len, thread->dsts, -1,
  666. 0, PATTERN_DST, false);
  667. error_count += verify_result_add(info, result, total_tests,
  668. src_off, dst_off, len, thread->dsts, 0,
  669. src_off, PATTERN_SRC | PATTERN_COPY, false);
  670. error_count += verify_result_add(info, result, total_tests,
  671. src_off, dst_off, len, thread->dsts, 1,
  672. dst_off + len, PATTERN_DST, false);
  673. if (error_count) {
  674. thread_result_add(info, result, DMATEST_ET_VERIFY,
  675. total_tests, src_off, dst_off,
  676. len, error_count);
  677. failed_tests++;
  678. } else {
  679. thread_result_add(info, result, DMATEST_ET_OK,
  680. total_tests, src_off, dst_off,
  681. len, 0);
  682. }
  683. }
  684. ret = 0;
  685. for (i = 0; thread->dsts[i]; i++)
  686. kfree(thread->dsts[i]);
  687. err_dstbuf:
  688. kfree(thread->dsts);
  689. err_dsts:
  690. for (i = 0; thread->srcs[i]; i++)
  691. kfree(thread->srcs[i]);
  692. err_srcbuf:
  693. kfree(thread->srcs);
  694. err_srcs:
  695. kfree(pq_coefs);
  696. err_thread_type:
  697. pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
  698. thread_name, total_tests, failed_tests, ret);
  699. /* terminate all transfers on specified channels */
  700. if (ret)
  701. dmaengine_terminate_all(chan);
  702. thread->done = true;
  703. if (params->iterations > 0)
  704. while (!kthread_should_stop()) {
  705. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
  706. interruptible_sleep_on(&wait_dmatest_exit);
  707. }
  708. return ret;
  709. }
  710. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  711. {
  712. struct dmatest_thread *thread;
  713. struct dmatest_thread *_thread;
  714. int ret;
  715. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  716. ret = kthread_stop(thread->task);
  717. pr_debug("dmatest: thread %s exited with status %d\n",
  718. thread->task->comm, ret);
  719. list_del(&thread->node);
  720. kfree(thread);
  721. }
  722. /* terminate all transfers on specified channels */
  723. dmaengine_terminate_all(dtc->chan);
  724. kfree(dtc);
  725. }
  726. static int dmatest_add_threads(struct dmatest_info *info,
  727. struct dmatest_chan *dtc, enum dma_transaction_type type)
  728. {
  729. struct dmatest_params *params = &info->params;
  730. struct dmatest_thread *thread;
  731. struct dma_chan *chan = dtc->chan;
  732. char *op;
  733. unsigned int i;
  734. if (type == DMA_MEMCPY)
  735. op = "copy";
  736. else if (type == DMA_XOR)
  737. op = "xor";
  738. else if (type == DMA_PQ)
  739. op = "pq";
  740. else
  741. return -EINVAL;
  742. for (i = 0; i < params->threads_per_chan; i++) {
  743. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  744. if (!thread) {
  745. pr_warning("dmatest: No memory for %s-%s%u\n",
  746. dma_chan_name(chan), op, i);
  747. break;
  748. }
  749. thread->info = info;
  750. thread->chan = dtc->chan;
  751. thread->type = type;
  752. smp_wmb();
  753. thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
  754. dma_chan_name(chan), op, i);
  755. if (IS_ERR(thread->task)) {
  756. pr_warning("dmatest: Failed to run thread %s-%s%u\n",
  757. dma_chan_name(chan), op, i);
  758. kfree(thread);
  759. break;
  760. }
  761. /* srcbuf and dstbuf are allocated by the thread itself */
  762. list_add_tail(&thread->node, &dtc->threads);
  763. }
  764. return i;
  765. }
  766. static int dmatest_add_channel(struct dmatest_info *info,
  767. struct dma_chan *chan)
  768. {
  769. struct dmatest_chan *dtc;
  770. struct dma_device *dma_dev = chan->device;
  771. unsigned int thread_count = 0;
  772. int cnt;
  773. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  774. if (!dtc) {
  775. pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
  776. return -ENOMEM;
  777. }
  778. dtc->chan = chan;
  779. INIT_LIST_HEAD(&dtc->threads);
  780. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  781. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  782. thread_count += cnt > 0 ? cnt : 0;
  783. }
  784. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  785. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  786. thread_count += cnt > 0 ? cnt : 0;
  787. }
  788. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  789. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  790. thread_count += cnt > 0 ? cnt : 0;
  791. }
  792. pr_info("dmatest: Started %u threads using %s\n",
  793. thread_count, dma_chan_name(chan));
  794. list_add_tail(&dtc->node, &info->channels);
  795. info->nr_channels++;
  796. return 0;
  797. }
  798. static bool filter(struct dma_chan *chan, void *param)
  799. {
  800. struct dmatest_params *params = param;
  801. if (!dmatest_match_channel(params, chan) ||
  802. !dmatest_match_device(params, chan->device))
  803. return false;
  804. else
  805. return true;
  806. }
  807. static int __run_threaded_test(struct dmatest_info *info)
  808. {
  809. dma_cap_mask_t mask;
  810. struct dma_chan *chan;
  811. struct dmatest_params *params = &info->params;
  812. int err = 0;
  813. dma_cap_zero(mask);
  814. dma_cap_set(DMA_MEMCPY, mask);
  815. for (;;) {
  816. chan = dma_request_channel(mask, filter, params);
  817. if (chan) {
  818. err = dmatest_add_channel(info, chan);
  819. if (err) {
  820. dma_release_channel(chan);
  821. break; /* add_channel failed, punt */
  822. }
  823. } else
  824. break; /* no more channels available */
  825. if (params->max_channels &&
  826. info->nr_channels >= params->max_channels)
  827. break; /* we have all we need */
  828. }
  829. return err;
  830. }
  831. #ifndef MODULE
  832. static int run_threaded_test(struct dmatest_info *info)
  833. {
  834. int ret;
  835. mutex_lock(&info->lock);
  836. ret = __run_threaded_test(info);
  837. mutex_unlock(&info->lock);
  838. return ret;
  839. }
  840. #endif
  841. static void __stop_threaded_test(struct dmatest_info *info)
  842. {
  843. struct dmatest_chan *dtc, *_dtc;
  844. struct dma_chan *chan;
  845. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  846. list_del(&dtc->node);
  847. chan = dtc->chan;
  848. dmatest_cleanup_channel(dtc);
  849. pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
  850. dma_release_channel(chan);
  851. }
  852. info->nr_channels = 0;
  853. }
  854. static void stop_threaded_test(struct dmatest_info *info)
  855. {
  856. mutex_lock(&info->lock);
  857. __stop_threaded_test(info);
  858. mutex_unlock(&info->lock);
  859. }
  860. static int __restart_threaded_test(struct dmatest_info *info, bool run)
  861. {
  862. struct dmatest_params *params = &info->params;
  863. int ret;
  864. /* Stop any running test first */
  865. __stop_threaded_test(info);
  866. if (run == false)
  867. return 0;
  868. /* Clear results from previous run */
  869. result_free(info, NULL);
  870. /* Copy test parameters */
  871. memcpy(params, &info->dbgfs_params, sizeof(*params));
  872. /* Run test with new parameters */
  873. ret = __run_threaded_test(info);
  874. if (ret) {
  875. __stop_threaded_test(info);
  876. pr_err("dmatest: Can't run test\n");
  877. }
  878. return ret;
  879. }
  880. static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
  881. const void __user *from, size_t count)
  882. {
  883. char tmp[20];
  884. ssize_t len;
  885. len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
  886. if (len >= 0) {
  887. tmp[len] = '\0';
  888. strlcpy(to, strim(tmp), available);
  889. }
  890. return len;
  891. }
  892. static ssize_t dtf_read_channel(struct file *file, char __user *buf,
  893. size_t count, loff_t *ppos)
  894. {
  895. struct dmatest_info *info = file->private_data;
  896. return simple_read_from_buffer(buf, count, ppos,
  897. info->dbgfs_params.channel,
  898. strlen(info->dbgfs_params.channel));
  899. }
  900. static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
  901. size_t size, loff_t *ppos)
  902. {
  903. struct dmatest_info *info = file->private_data;
  904. return dtf_write_string(info->dbgfs_params.channel,
  905. sizeof(info->dbgfs_params.channel),
  906. ppos, buf, size);
  907. }
  908. static const struct file_operations dtf_channel_fops = {
  909. .read = dtf_read_channel,
  910. .write = dtf_write_channel,
  911. .open = simple_open,
  912. .llseek = default_llseek,
  913. };
  914. static ssize_t dtf_read_device(struct file *file, char __user *buf,
  915. size_t count, loff_t *ppos)
  916. {
  917. struct dmatest_info *info = file->private_data;
  918. return simple_read_from_buffer(buf, count, ppos,
  919. info->dbgfs_params.device,
  920. strlen(info->dbgfs_params.device));
  921. }
  922. static ssize_t dtf_write_device(struct file *file, const char __user *buf,
  923. size_t size, loff_t *ppos)
  924. {
  925. struct dmatest_info *info = file->private_data;
  926. return dtf_write_string(info->dbgfs_params.device,
  927. sizeof(info->dbgfs_params.device),
  928. ppos, buf, size);
  929. }
  930. static const struct file_operations dtf_device_fops = {
  931. .read = dtf_read_device,
  932. .write = dtf_write_device,
  933. .open = simple_open,
  934. .llseek = default_llseek,
  935. };
  936. static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
  937. size_t count, loff_t *ppos)
  938. {
  939. struct dmatest_info *info = file->private_data;
  940. char buf[3];
  941. struct dmatest_chan *dtc;
  942. bool alive = false;
  943. mutex_lock(&info->lock);
  944. list_for_each_entry(dtc, &info->channels, node) {
  945. struct dmatest_thread *thread;
  946. list_for_each_entry(thread, &dtc->threads, node) {
  947. if (!thread->done) {
  948. alive = true;
  949. break;
  950. }
  951. }
  952. }
  953. if (alive) {
  954. buf[0] = 'Y';
  955. } else {
  956. __stop_threaded_test(info);
  957. buf[0] = 'N';
  958. }
  959. mutex_unlock(&info->lock);
  960. buf[1] = '\n';
  961. buf[2] = 0x00;
  962. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  963. }
  964. static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
  965. size_t count, loff_t *ppos)
  966. {
  967. struct dmatest_info *info = file->private_data;
  968. char buf[16];
  969. bool bv;
  970. int ret = 0;
  971. if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
  972. return -EFAULT;
  973. if (strtobool(buf, &bv) == 0) {
  974. mutex_lock(&info->lock);
  975. ret = __restart_threaded_test(info, bv);
  976. mutex_unlock(&info->lock);
  977. }
  978. return ret ? ret : count;
  979. }
  980. static const struct file_operations dtf_run_fops = {
  981. .read = dtf_read_run,
  982. .write = dtf_write_run,
  983. .open = simple_open,
  984. .llseek = default_llseek,
  985. };
  986. static int dtf_results_show(struct seq_file *sf, void *data)
  987. {
  988. struct dmatest_info *info = sf->private;
  989. struct dmatest_result *result;
  990. struct dmatest_thread_result *tr;
  991. unsigned int i;
  992. mutex_lock(&info->results_lock);
  993. list_for_each_entry(result, &info->results, node) {
  994. list_for_each_entry(tr, &result->results, node) {
  995. seq_printf(sf, "%s\n",
  996. thread_result_get(result->name, tr));
  997. if (tr->type == DMATEST_ET_VERIFY_BUF) {
  998. for (i = 0; i < tr->vr->error_count; i++) {
  999. seq_printf(sf, "\t%s\n",
  1000. verify_result_get_one(tr->vr, i));
  1001. }
  1002. }
  1003. }
  1004. }
  1005. mutex_unlock(&info->results_lock);
  1006. return 0;
  1007. }
  1008. static int dtf_results_open(struct inode *inode, struct file *file)
  1009. {
  1010. return single_open(file, dtf_results_show, inode->i_private);
  1011. }
  1012. static const struct file_operations dtf_results_fops = {
  1013. .open = dtf_results_open,
  1014. .read = seq_read,
  1015. .llseek = seq_lseek,
  1016. .release = single_release,
  1017. };
  1018. static int dmatest_register_dbgfs(struct dmatest_info *info)
  1019. {
  1020. struct dentry *d;
  1021. struct dmatest_params *params = &info->dbgfs_params;
  1022. int ret = -ENOMEM;
  1023. d = debugfs_create_dir("dmatest", NULL);
  1024. if (IS_ERR(d))
  1025. return PTR_ERR(d);
  1026. if (!d)
  1027. goto err_root;
  1028. info->root = d;
  1029. /* Copy initial values */
  1030. memcpy(params, &info->params, sizeof(*params));
  1031. /* Test parameters */
  1032. d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
  1033. (u32 *)&params->buf_size);
  1034. if (IS_ERR_OR_NULL(d))
  1035. goto err_node;
  1036. d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
  1037. info, &dtf_channel_fops);
  1038. if (IS_ERR_OR_NULL(d))
  1039. goto err_node;
  1040. d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
  1041. info, &dtf_device_fops);
  1042. if (IS_ERR_OR_NULL(d))
  1043. goto err_node;
  1044. d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
  1045. (u32 *)&params->threads_per_chan);
  1046. if (IS_ERR_OR_NULL(d))
  1047. goto err_node;
  1048. d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
  1049. (u32 *)&params->max_channels);
  1050. if (IS_ERR_OR_NULL(d))
  1051. goto err_node;
  1052. d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
  1053. (u32 *)&params->iterations);
  1054. if (IS_ERR_OR_NULL(d))
  1055. goto err_node;
  1056. d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
  1057. (u32 *)&params->xor_sources);
  1058. if (IS_ERR_OR_NULL(d))
  1059. goto err_node;
  1060. d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
  1061. (u32 *)&params->pq_sources);
  1062. if (IS_ERR_OR_NULL(d))
  1063. goto err_node;
  1064. d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
  1065. (u32 *)&params->timeout);
  1066. if (IS_ERR_OR_NULL(d))
  1067. goto err_node;
  1068. /* Run or stop threaded test */
  1069. d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
  1070. info, &dtf_run_fops);
  1071. if (IS_ERR_OR_NULL(d))
  1072. goto err_node;
  1073. /* Results of test in progress */
  1074. d = debugfs_create_file("results", S_IRUGO, info->root, info,
  1075. &dtf_results_fops);
  1076. if (IS_ERR_OR_NULL(d))
  1077. goto err_node;
  1078. return 0;
  1079. err_node:
  1080. debugfs_remove_recursive(info->root);
  1081. err_root:
  1082. pr_err("dmatest: Failed to initialize debugfs\n");
  1083. return ret;
  1084. }
  1085. static int __init dmatest_init(void)
  1086. {
  1087. struct dmatest_info *info = &test_info;
  1088. struct dmatest_params *params = &info->params;
  1089. int ret;
  1090. memset(info, 0, sizeof(*info));
  1091. mutex_init(&info->lock);
  1092. INIT_LIST_HEAD(&info->channels);
  1093. mutex_init(&info->results_lock);
  1094. INIT_LIST_HEAD(&info->results);
  1095. /* Set default parameters */
  1096. params->buf_size = test_buf_size;
  1097. strlcpy(params->channel, test_channel, sizeof(params->channel));
  1098. strlcpy(params->device, test_device, sizeof(params->device));
  1099. params->threads_per_chan = threads_per_chan;
  1100. params->max_channels = max_channels;
  1101. params->iterations = iterations;
  1102. params->xor_sources = xor_sources;
  1103. params->pq_sources = pq_sources;
  1104. params->timeout = timeout;
  1105. ret = dmatest_register_dbgfs(info);
  1106. if (ret)
  1107. return ret;
  1108. #ifdef MODULE
  1109. return 0;
  1110. #else
  1111. return run_threaded_test(info);
  1112. #endif
  1113. }
  1114. /* when compiled-in wait for drivers to load first */
  1115. late_initcall(dmatest_init);
  1116. static void __exit dmatest_exit(void)
  1117. {
  1118. struct dmatest_info *info = &test_info;
  1119. debugfs_remove_recursive(info->root);
  1120. stop_threaded_test(info);
  1121. result_free(info, NULL);
  1122. }
  1123. module_exit(dmatest_exit);
  1124. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  1125. MODULE_LICENSE("GPL v2");