dmatest.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. /*
  2. * DMA Engine test module
  3. *
  4. * Copyright (C) 2007 Atmel Corporation
  5. * Copyright (C) 2013 Intel Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/freezer.h>
  15. #include <linux/init.h>
  16. #include <linux/kthread.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. #include <linux/ctype.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/seq_file.h>
  26. static unsigned int test_buf_size = 16384;
  27. module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
  28. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  29. static char test_channel[20];
  30. module_param_string(channel, test_channel, sizeof(test_channel),
  31. S_IRUGO | S_IWUSR);
  32. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  33. static char test_device[20];
  34. module_param_string(device, test_device, sizeof(test_device),
  35. S_IRUGO | S_IWUSR);
  36. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  37. static unsigned int threads_per_chan = 1;
  38. module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
  39. MODULE_PARM_DESC(threads_per_chan,
  40. "Number of threads to start per channel (default: 1)");
  41. static unsigned int max_channels;
  42. module_param(max_channels, uint, S_IRUGO | S_IWUSR);
  43. MODULE_PARM_DESC(max_channels,
  44. "Maximum number of channels to use (default: all)");
  45. static unsigned int iterations;
  46. module_param(iterations, uint, S_IRUGO | S_IWUSR);
  47. MODULE_PARM_DESC(iterations,
  48. "Iterations before stopping test (default: infinite)");
  49. static unsigned int xor_sources = 3;
  50. module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
  51. MODULE_PARM_DESC(xor_sources,
  52. "Number of xor source buffers (default: 3)");
  53. static unsigned int pq_sources = 3;
  54. module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
  55. MODULE_PARM_DESC(pq_sources,
  56. "Number of p+q source buffers (default: 3)");
  57. static int timeout = 3000;
  58. module_param(timeout, uint, S_IRUGO | S_IWUSR);
  59. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  60. "Pass -1 for infinite timeout");
  61. /* Maximum amount of mismatched bytes in buffer to print */
  62. #define MAX_ERROR_COUNT 32
  63. /*
  64. * Initialization patterns. All bytes in the source buffer has bit 7
  65. * set, all bytes in the destination buffer has bit 7 cleared.
  66. *
  67. * Bit 6 is set for all bytes which are to be copied by the DMA
  68. * engine. Bit 5 is set for all bytes which are to be overwritten by
  69. * the DMA engine.
  70. *
  71. * The remaining bits are the inverse of a counter which increments by
  72. * one for each byte address.
  73. */
  74. #define PATTERN_SRC 0x80
  75. #define PATTERN_DST 0x00
  76. #define PATTERN_COPY 0x40
  77. #define PATTERN_OVERWRITE 0x20
  78. #define PATTERN_COUNT_MASK 0x1f
  79. enum dmatest_error_type {
  80. DMATEST_ET_OK,
  81. DMATEST_ET_MAP_SRC,
  82. DMATEST_ET_MAP_DST,
  83. DMATEST_ET_PREP,
  84. DMATEST_ET_SUBMIT,
  85. DMATEST_ET_TIMEOUT,
  86. DMATEST_ET_DMA_ERROR,
  87. DMATEST_ET_DMA_IN_PROGRESS,
  88. DMATEST_ET_VERIFY,
  89. };
  90. struct dmatest_thread_result {
  91. struct list_head node;
  92. unsigned int n;
  93. unsigned int src_off;
  94. unsigned int dst_off;
  95. unsigned int len;
  96. enum dmatest_error_type type;
  97. union {
  98. unsigned long data;
  99. dma_cookie_t cookie;
  100. enum dma_status status;
  101. int error;
  102. };
  103. };
  104. struct dmatest_result {
  105. struct list_head node;
  106. char *name;
  107. struct list_head results;
  108. };
  109. struct dmatest_info;
  110. struct dmatest_thread {
  111. struct list_head node;
  112. struct dmatest_info *info;
  113. struct task_struct *task;
  114. struct dma_chan *chan;
  115. u8 **srcs;
  116. u8 **dsts;
  117. enum dma_transaction_type type;
  118. bool done;
  119. };
  120. struct dmatest_chan {
  121. struct list_head node;
  122. struct dma_chan *chan;
  123. struct list_head threads;
  124. };
  125. /**
  126. * struct dmatest_params - test parameters.
  127. * @buf_size: size of the memcpy test buffer
  128. * @channel: bus ID of the channel to test
  129. * @device: bus ID of the DMA Engine to test
  130. * @threads_per_chan: number of threads to start per channel
  131. * @max_channels: maximum number of channels to use
  132. * @iterations: iterations before stopping test
  133. * @xor_sources: number of xor source buffers
  134. * @pq_sources: number of p+q source buffers
  135. * @timeout: transfer timeout in msec, -1 for infinite timeout
  136. */
  137. struct dmatest_params {
  138. unsigned int buf_size;
  139. char channel[20];
  140. char device[20];
  141. unsigned int threads_per_chan;
  142. unsigned int max_channels;
  143. unsigned int iterations;
  144. unsigned int xor_sources;
  145. unsigned int pq_sources;
  146. int timeout;
  147. };
  148. /**
  149. * struct dmatest_info - test information.
  150. * @params: test parameters
  151. * @lock: access protection to the fields of this structure
  152. */
  153. struct dmatest_info {
  154. /* Test parameters */
  155. struct dmatest_params params;
  156. /* Internal state */
  157. struct list_head channels;
  158. unsigned int nr_channels;
  159. struct mutex lock;
  160. /* debugfs related stuff */
  161. struct dentry *root;
  162. /* Test results */
  163. struct list_head results;
  164. struct mutex results_lock;
  165. };
  166. static struct dmatest_info test_info;
  167. static bool dmatest_match_channel(struct dmatest_params *params,
  168. struct dma_chan *chan)
  169. {
  170. if (params->channel[0] == '\0')
  171. return true;
  172. return strcmp(dma_chan_name(chan), params->channel) == 0;
  173. }
  174. static bool dmatest_match_device(struct dmatest_params *params,
  175. struct dma_device *device)
  176. {
  177. if (params->device[0] == '\0')
  178. return true;
  179. return strcmp(dev_name(device->dev), params->device) == 0;
  180. }
  181. static unsigned long dmatest_random(void)
  182. {
  183. unsigned long buf;
  184. get_random_bytes(&buf, sizeof(buf));
  185. return buf;
  186. }
  187. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  188. unsigned int buf_size)
  189. {
  190. unsigned int i;
  191. u8 *buf;
  192. for (; (buf = *bufs); bufs++) {
  193. for (i = 0; i < start; i++)
  194. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  195. for ( ; i < start + len; i++)
  196. buf[i] = PATTERN_SRC | PATTERN_COPY
  197. | (~i & PATTERN_COUNT_MASK);
  198. for ( ; i < buf_size; i++)
  199. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  200. buf++;
  201. }
  202. }
  203. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  204. unsigned int buf_size)
  205. {
  206. unsigned int i;
  207. u8 *buf;
  208. for (; (buf = *bufs); bufs++) {
  209. for (i = 0; i < start; i++)
  210. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  211. for ( ; i < start + len; i++)
  212. buf[i] = PATTERN_DST | PATTERN_OVERWRITE
  213. | (~i & PATTERN_COUNT_MASK);
  214. for ( ; i < buf_size; i++)
  215. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  216. }
  217. }
  218. static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
  219. unsigned int counter, bool is_srcbuf)
  220. {
  221. u8 diff = actual ^ pattern;
  222. u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
  223. const char *thread_name = current->comm;
  224. if (is_srcbuf)
  225. pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
  226. thread_name, index, expected, actual);
  227. else if ((pattern & PATTERN_COPY)
  228. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  229. pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
  230. thread_name, index, expected, actual);
  231. else if (diff & PATTERN_SRC)
  232. pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
  233. thread_name, index, expected, actual);
  234. else
  235. pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
  236. thread_name, index, expected, actual);
  237. }
  238. static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
  239. unsigned int end, unsigned int counter, u8 pattern,
  240. bool is_srcbuf)
  241. {
  242. unsigned int i;
  243. unsigned int error_count = 0;
  244. u8 actual;
  245. u8 expected;
  246. u8 *buf;
  247. unsigned int counter_orig = counter;
  248. for (; (buf = *bufs); bufs++) {
  249. counter = counter_orig;
  250. for (i = start; i < end; i++) {
  251. actual = buf[i];
  252. expected = pattern | (~counter & PATTERN_COUNT_MASK);
  253. if (actual != expected) {
  254. if (error_count < MAX_ERROR_COUNT)
  255. dmatest_mismatch(actual, pattern, i,
  256. counter, is_srcbuf);
  257. error_count++;
  258. }
  259. counter++;
  260. }
  261. }
  262. if (error_count > MAX_ERROR_COUNT)
  263. pr_warn("%s: %u errors suppressed\n",
  264. current->comm, error_count - MAX_ERROR_COUNT);
  265. return error_count;
  266. }
  267. /* poor man's completion - we want to use wait_event_freezable() on it */
  268. struct dmatest_done {
  269. bool done;
  270. wait_queue_head_t *wait;
  271. };
  272. static void dmatest_callback(void *arg)
  273. {
  274. struct dmatest_done *done = arg;
  275. done->done = true;
  276. wake_up_all(done->wait);
  277. }
  278. static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
  279. unsigned int count)
  280. {
  281. while (count--)
  282. dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
  283. }
  284. static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
  285. unsigned int count)
  286. {
  287. while (count--)
  288. dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
  289. }
  290. static unsigned int min_odd(unsigned int x, unsigned int y)
  291. {
  292. unsigned int val = min(x, y);
  293. return val % 2 ? val : val - 1;
  294. }
  295. static char *thread_result_get(const char *name,
  296. struct dmatest_thread_result *tr)
  297. {
  298. static const char * const messages[] = {
  299. [DMATEST_ET_OK] = "No errors",
  300. [DMATEST_ET_MAP_SRC] = "src mapping error",
  301. [DMATEST_ET_MAP_DST] = "dst mapping error",
  302. [DMATEST_ET_PREP] = "prep error",
  303. [DMATEST_ET_SUBMIT] = "submit error",
  304. [DMATEST_ET_TIMEOUT] = "test timed out",
  305. [DMATEST_ET_DMA_ERROR] =
  306. "got completion callback (DMA_ERROR)",
  307. [DMATEST_ET_DMA_IN_PROGRESS] =
  308. "got completion callback (DMA_IN_PROGRESS)",
  309. [DMATEST_ET_VERIFY] = "errors",
  310. };
  311. static char buf[512];
  312. snprintf(buf, sizeof(buf) - 1,
  313. "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
  314. name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
  315. tr->len, tr->data);
  316. return buf;
  317. }
  318. static int thread_result_add(struct dmatest_info *info,
  319. struct dmatest_result *r, enum dmatest_error_type type,
  320. unsigned int n, unsigned int src_off, unsigned int dst_off,
  321. unsigned int len, unsigned long data)
  322. {
  323. struct dmatest_thread_result *tr;
  324. tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  325. if (!tr)
  326. return -ENOMEM;
  327. tr->type = type;
  328. tr->n = n;
  329. tr->src_off = src_off;
  330. tr->dst_off = dst_off;
  331. tr->len = len;
  332. tr->data = data;
  333. mutex_lock(&info->results_lock);
  334. list_add_tail(&tr->node, &r->results);
  335. mutex_unlock(&info->results_lock);
  336. if (tr->type == DMATEST_ET_OK)
  337. pr_debug("%s\n", thread_result_get(r->name, tr));
  338. else
  339. pr_warn("%s\n", thread_result_get(r->name, tr));
  340. return 0;
  341. }
  342. static void result_free(struct dmatest_info *info, const char *name)
  343. {
  344. struct dmatest_result *r, *_r;
  345. mutex_lock(&info->results_lock);
  346. list_for_each_entry_safe(r, _r, &info->results, node) {
  347. struct dmatest_thread_result *tr, *_tr;
  348. if (name && strcmp(r->name, name))
  349. continue;
  350. list_for_each_entry_safe(tr, _tr, &r->results, node) {
  351. list_del(&tr->node);
  352. kfree(tr);
  353. }
  354. kfree(r->name);
  355. list_del(&r->node);
  356. kfree(r);
  357. }
  358. mutex_unlock(&info->results_lock);
  359. }
  360. static struct dmatest_result *result_init(struct dmatest_info *info,
  361. const char *name)
  362. {
  363. struct dmatest_result *r;
  364. r = kzalloc(sizeof(*r), GFP_KERNEL);
  365. if (r) {
  366. r->name = kstrdup(name, GFP_KERNEL);
  367. INIT_LIST_HEAD(&r->results);
  368. mutex_lock(&info->results_lock);
  369. list_add_tail(&r->node, &info->results);
  370. mutex_unlock(&info->results_lock);
  371. }
  372. return r;
  373. }
  374. /*
  375. * This function repeatedly tests DMA transfers of various lengths and
  376. * offsets for a given operation type until it is told to exit by
  377. * kthread_stop(). There may be multiple threads running this function
  378. * in parallel for a single channel, and there may be multiple channels
  379. * being tested in parallel.
  380. *
  381. * Before each test, the source and destination buffer is initialized
  382. * with a known pattern. This pattern is different depending on
  383. * whether it's in an area which is supposed to be copied or
  384. * overwritten, and different in the source and destination buffers.
  385. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  386. * we'll notice.
  387. */
  388. static int dmatest_func(void *data)
  389. {
  390. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
  391. struct dmatest_thread *thread = data;
  392. struct dmatest_done done = { .wait = &done_wait };
  393. struct dmatest_info *info;
  394. struct dmatest_params *params;
  395. struct dma_chan *chan;
  396. struct dma_device *dev;
  397. const char *thread_name;
  398. unsigned int src_off, dst_off, len;
  399. unsigned int error_count;
  400. unsigned int failed_tests = 0;
  401. unsigned int total_tests = 0;
  402. dma_cookie_t cookie;
  403. enum dma_status status;
  404. enum dma_ctrl_flags flags;
  405. u8 *pq_coefs = NULL;
  406. int ret;
  407. int src_cnt;
  408. int dst_cnt;
  409. int i;
  410. struct dmatest_result *result;
  411. thread_name = current->comm;
  412. set_freezable();
  413. ret = -ENOMEM;
  414. smp_rmb();
  415. info = thread->info;
  416. params = &info->params;
  417. chan = thread->chan;
  418. dev = chan->device;
  419. if (thread->type == DMA_MEMCPY)
  420. src_cnt = dst_cnt = 1;
  421. else if (thread->type == DMA_XOR) {
  422. /* force odd to ensure dst = src */
  423. src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  424. dst_cnt = 1;
  425. } else if (thread->type == DMA_PQ) {
  426. /* force odd to ensure dst = src */
  427. src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  428. dst_cnt = 2;
  429. pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
  430. if (!pq_coefs)
  431. goto err_thread_type;
  432. for (i = 0; i < src_cnt; i++)
  433. pq_coefs[i] = 1;
  434. } else
  435. goto err_thread_type;
  436. result = result_init(info, thread_name);
  437. if (!result)
  438. goto err_srcs;
  439. thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
  440. if (!thread->srcs)
  441. goto err_srcs;
  442. for (i = 0; i < src_cnt; i++) {
  443. thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
  444. if (!thread->srcs[i])
  445. goto err_srcbuf;
  446. }
  447. thread->srcs[i] = NULL;
  448. thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
  449. if (!thread->dsts)
  450. goto err_dsts;
  451. for (i = 0; i < dst_cnt; i++) {
  452. thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
  453. if (!thread->dsts[i])
  454. goto err_dstbuf;
  455. }
  456. thread->dsts[i] = NULL;
  457. set_user_nice(current, 10);
  458. /*
  459. * src and dst buffers are freed by ourselves below
  460. */
  461. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  462. while (!kthread_should_stop()
  463. && !(params->iterations && total_tests >= params->iterations)) {
  464. struct dma_async_tx_descriptor *tx = NULL;
  465. dma_addr_t dma_srcs[src_cnt];
  466. dma_addr_t dma_dsts[dst_cnt];
  467. u8 align = 0;
  468. total_tests++;
  469. /* honor alignment restrictions */
  470. if (thread->type == DMA_MEMCPY)
  471. align = dev->copy_align;
  472. else if (thread->type == DMA_XOR)
  473. align = dev->xor_align;
  474. else if (thread->type == DMA_PQ)
  475. align = dev->pq_align;
  476. if (1 << align > params->buf_size) {
  477. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  478. params->buf_size, 1 << align);
  479. break;
  480. }
  481. len = dmatest_random() % params->buf_size + 1;
  482. len = (len >> align) << align;
  483. if (!len)
  484. len = 1 << align;
  485. src_off = dmatest_random() % (params->buf_size - len + 1);
  486. dst_off = dmatest_random() % (params->buf_size - len + 1);
  487. src_off = (src_off >> align) << align;
  488. dst_off = (dst_off >> align) << align;
  489. dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
  490. dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
  491. for (i = 0; i < src_cnt; i++) {
  492. u8 *buf = thread->srcs[i] + src_off;
  493. dma_srcs[i] = dma_map_single(dev->dev, buf, len,
  494. DMA_TO_DEVICE);
  495. ret = dma_mapping_error(dev->dev, dma_srcs[i]);
  496. if (ret) {
  497. unmap_src(dev->dev, dma_srcs, len, i);
  498. thread_result_add(info, result,
  499. DMATEST_ET_MAP_SRC,
  500. total_tests, src_off, dst_off,
  501. len, ret);
  502. failed_tests++;
  503. continue;
  504. }
  505. }
  506. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  507. for (i = 0; i < dst_cnt; i++) {
  508. dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
  509. params->buf_size,
  510. DMA_BIDIRECTIONAL);
  511. ret = dma_mapping_error(dev->dev, dma_dsts[i]);
  512. if (ret) {
  513. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  514. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  515. i);
  516. thread_result_add(info, result,
  517. DMATEST_ET_MAP_DST,
  518. total_tests, src_off, dst_off,
  519. len, ret);
  520. failed_tests++;
  521. continue;
  522. }
  523. }
  524. if (thread->type == DMA_MEMCPY)
  525. tx = dev->device_prep_dma_memcpy(chan,
  526. dma_dsts[0] + dst_off,
  527. dma_srcs[0], len,
  528. flags);
  529. else if (thread->type == DMA_XOR)
  530. tx = dev->device_prep_dma_xor(chan,
  531. dma_dsts[0] + dst_off,
  532. dma_srcs, src_cnt,
  533. len, flags);
  534. else if (thread->type == DMA_PQ) {
  535. dma_addr_t dma_pq[dst_cnt];
  536. for (i = 0; i < dst_cnt; i++)
  537. dma_pq[i] = dma_dsts[i] + dst_off;
  538. tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
  539. src_cnt, pq_coefs,
  540. len, flags);
  541. }
  542. if (!tx) {
  543. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  544. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  545. dst_cnt);
  546. thread_result_add(info, result, DMATEST_ET_PREP,
  547. total_tests, src_off, dst_off,
  548. len, 0);
  549. msleep(100);
  550. failed_tests++;
  551. continue;
  552. }
  553. done.done = false;
  554. tx->callback = dmatest_callback;
  555. tx->callback_param = &done;
  556. cookie = tx->tx_submit(tx);
  557. if (dma_submit_error(cookie)) {
  558. thread_result_add(info, result, DMATEST_ET_SUBMIT,
  559. total_tests, src_off, dst_off,
  560. len, cookie);
  561. msleep(100);
  562. failed_tests++;
  563. continue;
  564. }
  565. dma_async_issue_pending(chan);
  566. wait_event_freezable_timeout(done_wait, done.done,
  567. msecs_to_jiffies(params->timeout));
  568. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  569. if (!done.done) {
  570. /*
  571. * We're leaving the timed out dma operation with
  572. * dangling pointer to done_wait. To make this
  573. * correct, we'll need to allocate wait_done for
  574. * each test iteration and perform "who's gonna
  575. * free it this time?" dancing. For now, just
  576. * leave it dangling.
  577. */
  578. thread_result_add(info, result, DMATEST_ET_TIMEOUT,
  579. total_tests, src_off, dst_off,
  580. len, 0);
  581. failed_tests++;
  582. continue;
  583. } else if (status != DMA_SUCCESS) {
  584. enum dmatest_error_type type = (status == DMA_ERROR) ?
  585. DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
  586. thread_result_add(info, result, type,
  587. total_tests, src_off, dst_off,
  588. len, status);
  589. failed_tests++;
  590. continue;
  591. }
  592. /* Unmap by myself */
  593. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  594. unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
  595. error_count = 0;
  596. pr_debug("%s: verifying source buffer...\n", thread_name);
  597. error_count += dmatest_verify(thread->srcs, 0, src_off,
  598. 0, PATTERN_SRC, true);
  599. error_count += dmatest_verify(thread->srcs, src_off,
  600. src_off + len, src_off,
  601. PATTERN_SRC | PATTERN_COPY, true);
  602. error_count += dmatest_verify(thread->srcs, src_off + len,
  603. params->buf_size, src_off + len,
  604. PATTERN_SRC, true);
  605. pr_debug("%s: verifying dest buffer...\n",
  606. thread->task->comm);
  607. error_count += dmatest_verify(thread->dsts, 0, dst_off,
  608. 0, PATTERN_DST, false);
  609. error_count += dmatest_verify(thread->dsts, dst_off,
  610. dst_off + len, src_off,
  611. PATTERN_SRC | PATTERN_COPY, false);
  612. error_count += dmatest_verify(thread->dsts, dst_off + len,
  613. params->buf_size, dst_off + len,
  614. PATTERN_DST, false);
  615. if (error_count) {
  616. thread_result_add(info, result, DMATEST_ET_VERIFY,
  617. total_tests, src_off, dst_off,
  618. len, error_count);
  619. failed_tests++;
  620. } else {
  621. thread_result_add(info, result, DMATEST_ET_OK,
  622. total_tests, src_off, dst_off,
  623. len, 0);
  624. }
  625. }
  626. ret = 0;
  627. for (i = 0; thread->dsts[i]; i++)
  628. kfree(thread->dsts[i]);
  629. err_dstbuf:
  630. kfree(thread->dsts);
  631. err_dsts:
  632. for (i = 0; thread->srcs[i]; i++)
  633. kfree(thread->srcs[i]);
  634. err_srcbuf:
  635. kfree(thread->srcs);
  636. err_srcs:
  637. kfree(pq_coefs);
  638. err_thread_type:
  639. pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
  640. thread_name, total_tests, failed_tests, ret);
  641. /* terminate all transfers on specified channels */
  642. if (ret)
  643. dmaengine_terminate_all(chan);
  644. thread->done = true;
  645. if (params->iterations > 0)
  646. while (!kthread_should_stop()) {
  647. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
  648. interruptible_sleep_on(&wait_dmatest_exit);
  649. }
  650. return ret;
  651. }
  652. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  653. {
  654. struct dmatest_thread *thread;
  655. struct dmatest_thread *_thread;
  656. int ret;
  657. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  658. ret = kthread_stop(thread->task);
  659. pr_debug("dmatest: thread %s exited with status %d\n",
  660. thread->task->comm, ret);
  661. list_del(&thread->node);
  662. kfree(thread);
  663. }
  664. /* terminate all transfers on specified channels */
  665. dmaengine_terminate_all(dtc->chan);
  666. kfree(dtc);
  667. }
  668. static int dmatest_add_threads(struct dmatest_info *info,
  669. struct dmatest_chan *dtc, enum dma_transaction_type type)
  670. {
  671. struct dmatest_params *params = &info->params;
  672. struct dmatest_thread *thread;
  673. struct dma_chan *chan = dtc->chan;
  674. char *op;
  675. unsigned int i;
  676. if (type == DMA_MEMCPY)
  677. op = "copy";
  678. else if (type == DMA_XOR)
  679. op = "xor";
  680. else if (type == DMA_PQ)
  681. op = "pq";
  682. else
  683. return -EINVAL;
  684. for (i = 0; i < params->threads_per_chan; i++) {
  685. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  686. if (!thread) {
  687. pr_warning("dmatest: No memory for %s-%s%u\n",
  688. dma_chan_name(chan), op, i);
  689. break;
  690. }
  691. thread->info = info;
  692. thread->chan = dtc->chan;
  693. thread->type = type;
  694. smp_wmb();
  695. thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
  696. dma_chan_name(chan), op, i);
  697. if (IS_ERR(thread->task)) {
  698. pr_warning("dmatest: Failed to run thread %s-%s%u\n",
  699. dma_chan_name(chan), op, i);
  700. kfree(thread);
  701. break;
  702. }
  703. /* srcbuf and dstbuf are allocated by the thread itself */
  704. list_add_tail(&thread->node, &dtc->threads);
  705. }
  706. return i;
  707. }
  708. static int dmatest_add_channel(struct dmatest_info *info,
  709. struct dma_chan *chan)
  710. {
  711. struct dmatest_chan *dtc;
  712. struct dma_device *dma_dev = chan->device;
  713. unsigned int thread_count = 0;
  714. int cnt;
  715. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  716. if (!dtc) {
  717. pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
  718. return -ENOMEM;
  719. }
  720. dtc->chan = chan;
  721. INIT_LIST_HEAD(&dtc->threads);
  722. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  723. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  724. thread_count += cnt > 0 ? cnt : 0;
  725. }
  726. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  727. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  728. thread_count += cnt > 0 ? cnt : 0;
  729. }
  730. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  731. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  732. thread_count += cnt > 0 ? cnt : 0;
  733. }
  734. pr_info("dmatest: Started %u threads using %s\n",
  735. thread_count, dma_chan_name(chan));
  736. list_add_tail(&dtc->node, &info->channels);
  737. info->nr_channels++;
  738. return 0;
  739. }
  740. static bool filter(struct dma_chan *chan, void *param)
  741. {
  742. struct dmatest_params *params = param;
  743. if (!dmatest_match_channel(params, chan) ||
  744. !dmatest_match_device(params, chan->device))
  745. return false;
  746. else
  747. return true;
  748. }
  749. static int __run_threaded_test(struct dmatest_info *info)
  750. {
  751. dma_cap_mask_t mask;
  752. struct dma_chan *chan;
  753. struct dmatest_params *params = &info->params;
  754. int err = 0;
  755. dma_cap_zero(mask);
  756. dma_cap_set(DMA_MEMCPY, mask);
  757. for (;;) {
  758. chan = dma_request_channel(mask, filter, params);
  759. if (chan) {
  760. err = dmatest_add_channel(info, chan);
  761. if (err) {
  762. dma_release_channel(chan);
  763. break; /* add_channel failed, punt */
  764. }
  765. } else
  766. break; /* no more channels available */
  767. if (params->max_channels &&
  768. info->nr_channels >= params->max_channels)
  769. break; /* we have all we need */
  770. }
  771. return err;
  772. }
  773. #ifndef MODULE
  774. static int run_threaded_test(struct dmatest_info *info)
  775. {
  776. int ret;
  777. mutex_lock(&info->lock);
  778. ret = __run_threaded_test(info);
  779. mutex_unlock(&info->lock);
  780. return ret;
  781. }
  782. #endif
  783. static void __stop_threaded_test(struct dmatest_info *info)
  784. {
  785. struct dmatest_chan *dtc, *_dtc;
  786. struct dma_chan *chan;
  787. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  788. list_del(&dtc->node);
  789. chan = dtc->chan;
  790. dmatest_cleanup_channel(dtc);
  791. pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
  792. dma_release_channel(chan);
  793. }
  794. info->nr_channels = 0;
  795. }
  796. static void stop_threaded_test(struct dmatest_info *info)
  797. {
  798. mutex_lock(&info->lock);
  799. __stop_threaded_test(info);
  800. mutex_unlock(&info->lock);
  801. }
  802. static int __restart_threaded_test(struct dmatest_info *info, bool run)
  803. {
  804. struct dmatest_params *params = &info->params;
  805. /* Stop any running test first */
  806. __stop_threaded_test(info);
  807. if (run == false)
  808. return 0;
  809. /* Clear results from previous run */
  810. result_free(info, NULL);
  811. /* Copy test parameters */
  812. params->buf_size = test_buf_size;
  813. strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
  814. strlcpy(params->device, strim(test_device), sizeof(params->device));
  815. params->threads_per_chan = threads_per_chan;
  816. params->max_channels = max_channels;
  817. params->iterations = iterations;
  818. params->xor_sources = xor_sources;
  819. params->pq_sources = pq_sources;
  820. params->timeout = timeout;
  821. /* Run test with new parameters */
  822. return __run_threaded_test(info);
  823. }
  824. static bool __is_threaded_test_run(struct dmatest_info *info)
  825. {
  826. struct dmatest_chan *dtc;
  827. list_for_each_entry(dtc, &info->channels, node) {
  828. struct dmatest_thread *thread;
  829. list_for_each_entry(thread, &dtc->threads, node) {
  830. if (!thread->done)
  831. return true;
  832. }
  833. }
  834. return false;
  835. }
  836. static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
  837. size_t count, loff_t *ppos)
  838. {
  839. struct dmatest_info *info = file->private_data;
  840. char buf[3];
  841. mutex_lock(&info->lock);
  842. if (__is_threaded_test_run(info)) {
  843. buf[0] = 'Y';
  844. } else {
  845. __stop_threaded_test(info);
  846. buf[0] = 'N';
  847. }
  848. mutex_unlock(&info->lock);
  849. buf[1] = '\n';
  850. buf[2] = 0x00;
  851. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  852. }
  853. static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
  854. size_t count, loff_t *ppos)
  855. {
  856. struct dmatest_info *info = file->private_data;
  857. char buf[16];
  858. bool bv;
  859. int ret = 0;
  860. if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
  861. return -EFAULT;
  862. if (strtobool(buf, &bv) == 0) {
  863. mutex_lock(&info->lock);
  864. if (__is_threaded_test_run(info))
  865. ret = -EBUSY;
  866. else
  867. ret = __restart_threaded_test(info, bv);
  868. mutex_unlock(&info->lock);
  869. }
  870. return ret ? ret : count;
  871. }
  872. static const struct file_operations dtf_run_fops = {
  873. .read = dtf_read_run,
  874. .write = dtf_write_run,
  875. .open = simple_open,
  876. .llseek = default_llseek,
  877. };
  878. static int dtf_results_show(struct seq_file *sf, void *data)
  879. {
  880. struct dmatest_info *info = sf->private;
  881. struct dmatest_result *result;
  882. struct dmatest_thread_result *tr;
  883. mutex_lock(&info->results_lock);
  884. list_for_each_entry(result, &info->results, node) {
  885. list_for_each_entry(tr, &result->results, node)
  886. seq_printf(sf, "%s\n",
  887. thread_result_get(result->name, tr));
  888. }
  889. mutex_unlock(&info->results_lock);
  890. return 0;
  891. }
  892. static int dtf_results_open(struct inode *inode, struct file *file)
  893. {
  894. return single_open(file, dtf_results_show, inode->i_private);
  895. }
  896. static const struct file_operations dtf_results_fops = {
  897. .open = dtf_results_open,
  898. .read = seq_read,
  899. .llseek = seq_lseek,
  900. .release = single_release,
  901. };
  902. static int dmatest_register_dbgfs(struct dmatest_info *info)
  903. {
  904. struct dentry *d;
  905. d = debugfs_create_dir("dmatest", NULL);
  906. if (IS_ERR(d))
  907. return PTR_ERR(d);
  908. if (!d)
  909. goto err_root;
  910. info->root = d;
  911. /* Run or stop threaded test */
  912. debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
  913. &dtf_run_fops);
  914. /* Results of test in progress */
  915. debugfs_create_file("results", S_IRUGO, info->root, info,
  916. &dtf_results_fops);
  917. return 0;
  918. err_root:
  919. pr_err("dmatest: Failed to initialize debugfs\n");
  920. return -ENOMEM;
  921. }
  922. static int __init dmatest_init(void)
  923. {
  924. struct dmatest_info *info = &test_info;
  925. int ret;
  926. memset(info, 0, sizeof(*info));
  927. mutex_init(&info->lock);
  928. INIT_LIST_HEAD(&info->channels);
  929. mutex_init(&info->results_lock);
  930. INIT_LIST_HEAD(&info->results);
  931. ret = dmatest_register_dbgfs(info);
  932. if (ret)
  933. return ret;
  934. #ifdef MODULE
  935. return 0;
  936. #else
  937. return run_threaded_test(info);
  938. #endif
  939. }
  940. /* when compiled-in wait for drivers to load first */
  941. late_initcall(dmatest_init);
  942. static void __exit dmatest_exit(void)
  943. {
  944. struct dmatest_info *info = &test_info;
  945. debugfs_remove_recursive(info->root);
  946. stop_threaded_test(info);
  947. result_free(info, NULL);
  948. }
  949. module_exit(dmatest_exit);
  950. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  951. MODULE_LICENSE("GPL v2");