dmatest.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. /*
  2. * DMA Engine test module
  3. *
  4. * Copyright (C) 2007 Atmel Corporation
  5. * Copyright (C) 2013 Intel Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/freezer.h>
  15. #include <linux/init.h>
  16. #include <linux/kthread.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. #include <linux/ctype.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/seq_file.h>
  26. static unsigned int test_buf_size = 16384;
  27. module_param(test_buf_size, uint, S_IRUGO);
  28. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  29. static char test_channel[20];
  30. module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
  31. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  32. static char test_device[20];
  33. module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
  34. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  35. static unsigned int threads_per_chan = 1;
  36. module_param(threads_per_chan, uint, S_IRUGO);
  37. MODULE_PARM_DESC(threads_per_chan,
  38. "Number of threads to start per channel (default: 1)");
  39. static unsigned int max_channels;
  40. module_param(max_channels, uint, S_IRUGO);
  41. MODULE_PARM_DESC(max_channels,
  42. "Maximum number of channels to use (default: all)");
  43. static unsigned int iterations;
  44. module_param(iterations, uint, S_IRUGO);
  45. MODULE_PARM_DESC(iterations,
  46. "Iterations before stopping test (default: infinite)");
  47. static unsigned int xor_sources = 3;
  48. module_param(xor_sources, uint, S_IRUGO);
  49. MODULE_PARM_DESC(xor_sources,
  50. "Number of xor source buffers (default: 3)");
  51. static unsigned int pq_sources = 3;
  52. module_param(pq_sources, uint, S_IRUGO);
  53. MODULE_PARM_DESC(pq_sources,
  54. "Number of p+q source buffers (default: 3)");
  55. static int timeout = 3000;
  56. module_param(timeout, uint, S_IRUGO);
  57. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  58. "Pass -1 for infinite timeout");
  59. /* Maximum amount of mismatched bytes in buffer to print */
  60. #define MAX_ERROR_COUNT 32
  61. /*
  62. * Initialization patterns. All bytes in the source buffer has bit 7
  63. * set, all bytes in the destination buffer has bit 7 cleared.
  64. *
  65. * Bit 6 is set for all bytes which are to be copied by the DMA
  66. * engine. Bit 5 is set for all bytes which are to be overwritten by
  67. * the DMA engine.
  68. *
  69. * The remaining bits are the inverse of a counter which increments by
  70. * one for each byte address.
  71. */
  72. #define PATTERN_SRC 0x80
  73. #define PATTERN_DST 0x00
  74. #define PATTERN_COPY 0x40
  75. #define PATTERN_OVERWRITE 0x20
  76. #define PATTERN_COUNT_MASK 0x1f
  77. enum dmatest_error_type {
  78. DMATEST_ET_OK,
  79. DMATEST_ET_MAP_SRC,
  80. DMATEST_ET_MAP_DST,
  81. DMATEST_ET_PREP,
  82. DMATEST_ET_SUBMIT,
  83. DMATEST_ET_TIMEOUT,
  84. DMATEST_ET_DMA_ERROR,
  85. DMATEST_ET_DMA_IN_PROGRESS,
  86. DMATEST_ET_VERIFY,
  87. };
  88. struct dmatest_thread_result {
  89. struct list_head node;
  90. unsigned int n;
  91. unsigned int src_off;
  92. unsigned int dst_off;
  93. unsigned int len;
  94. enum dmatest_error_type type;
  95. union {
  96. unsigned long data;
  97. dma_cookie_t cookie;
  98. enum dma_status status;
  99. int error;
  100. };
  101. };
  102. struct dmatest_result {
  103. struct list_head node;
  104. char *name;
  105. struct list_head results;
  106. };
  107. struct dmatest_info;
  108. struct dmatest_thread {
  109. struct list_head node;
  110. struct dmatest_info *info;
  111. struct task_struct *task;
  112. struct dma_chan *chan;
  113. u8 **srcs;
  114. u8 **dsts;
  115. enum dma_transaction_type type;
  116. bool done;
  117. };
  118. struct dmatest_chan {
  119. struct list_head node;
  120. struct dma_chan *chan;
  121. struct list_head threads;
  122. };
  123. /**
  124. * struct dmatest_params - test parameters.
  125. * @buf_size: size of the memcpy test buffer
  126. * @channel: bus ID of the channel to test
  127. * @device: bus ID of the DMA Engine to test
  128. * @threads_per_chan: number of threads to start per channel
  129. * @max_channels: maximum number of channels to use
  130. * @iterations: iterations before stopping test
  131. * @xor_sources: number of xor source buffers
  132. * @pq_sources: number of p+q source buffers
  133. * @timeout: transfer timeout in msec, -1 for infinite timeout
  134. */
  135. struct dmatest_params {
  136. unsigned int buf_size;
  137. char channel[20];
  138. char device[20];
  139. unsigned int threads_per_chan;
  140. unsigned int max_channels;
  141. unsigned int iterations;
  142. unsigned int xor_sources;
  143. unsigned int pq_sources;
  144. int timeout;
  145. };
  146. /**
  147. * struct dmatest_info - test information.
  148. * @params: test parameters
  149. * @lock: access protection to the fields of this structure
  150. */
  151. struct dmatest_info {
  152. /* Test parameters */
  153. struct dmatest_params params;
  154. /* Internal state */
  155. struct list_head channels;
  156. unsigned int nr_channels;
  157. struct mutex lock;
  158. /* debugfs related stuff */
  159. struct dentry *root;
  160. struct dmatest_params dbgfs_params;
  161. /* Test results */
  162. struct list_head results;
  163. struct mutex results_lock;
  164. };
  165. static struct dmatest_info test_info;
  166. static bool dmatest_match_channel(struct dmatest_params *params,
  167. struct dma_chan *chan)
  168. {
  169. if (params->channel[0] == '\0')
  170. return true;
  171. return strcmp(dma_chan_name(chan), params->channel) == 0;
  172. }
  173. static bool dmatest_match_device(struct dmatest_params *params,
  174. struct dma_device *device)
  175. {
  176. if (params->device[0] == '\0')
  177. return true;
  178. return strcmp(dev_name(device->dev), params->device) == 0;
  179. }
  180. static unsigned long dmatest_random(void)
  181. {
  182. unsigned long buf;
  183. get_random_bytes(&buf, sizeof(buf));
  184. return buf;
  185. }
  186. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  187. unsigned int buf_size)
  188. {
  189. unsigned int i;
  190. u8 *buf;
  191. for (; (buf = *bufs); bufs++) {
  192. for (i = 0; i < start; i++)
  193. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  194. for ( ; i < start + len; i++)
  195. buf[i] = PATTERN_SRC | PATTERN_COPY
  196. | (~i & PATTERN_COUNT_MASK);
  197. for ( ; i < buf_size; i++)
  198. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  199. buf++;
  200. }
  201. }
  202. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  203. unsigned int buf_size)
  204. {
  205. unsigned int i;
  206. u8 *buf;
  207. for (; (buf = *bufs); bufs++) {
  208. for (i = 0; i < start; i++)
  209. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  210. for ( ; i < start + len; i++)
  211. buf[i] = PATTERN_DST | PATTERN_OVERWRITE
  212. | (~i & PATTERN_COUNT_MASK);
  213. for ( ; i < buf_size; i++)
  214. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  215. }
  216. }
  217. static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
  218. unsigned int counter, bool is_srcbuf)
  219. {
  220. u8 diff = actual ^ pattern;
  221. u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
  222. const char *thread_name = current->comm;
  223. if (is_srcbuf)
  224. pr_warning("%s: srcbuf[0x%x] overwritten!"
  225. " Expected %02x, got %02x\n",
  226. thread_name, index, expected, actual);
  227. else if ((pattern & PATTERN_COPY)
  228. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  229. pr_warning("%s: dstbuf[0x%x] not copied!"
  230. " Expected %02x, got %02x\n",
  231. thread_name, index, expected, actual);
  232. else if (diff & PATTERN_SRC)
  233. pr_warning("%s: dstbuf[0x%x] was copied!"
  234. " Expected %02x, got %02x\n",
  235. thread_name, index, expected, actual);
  236. else
  237. pr_warning("%s: dstbuf[0x%x] mismatch!"
  238. " Expected %02x, got %02x\n",
  239. thread_name, index, expected, actual);
  240. }
  241. static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
  242. unsigned int end, unsigned int counter, u8 pattern,
  243. bool is_srcbuf)
  244. {
  245. unsigned int i;
  246. unsigned int error_count = 0;
  247. u8 actual;
  248. u8 expected;
  249. u8 *buf;
  250. unsigned int counter_orig = counter;
  251. for (; (buf = *bufs); bufs++) {
  252. counter = counter_orig;
  253. for (i = start; i < end; i++) {
  254. actual = buf[i];
  255. expected = pattern | (~counter & PATTERN_COUNT_MASK);
  256. if (actual != expected) {
  257. if (error_count < MAX_ERROR_COUNT)
  258. dmatest_mismatch(actual, pattern, i,
  259. counter, is_srcbuf);
  260. error_count++;
  261. }
  262. counter++;
  263. }
  264. }
  265. if (error_count > MAX_ERROR_COUNT)
  266. pr_warning("%s: %u errors suppressed\n",
  267. current->comm, error_count - MAX_ERROR_COUNT);
  268. return error_count;
  269. }
  270. /* poor man's completion - we want to use wait_event_freezable() on it */
  271. struct dmatest_done {
  272. bool done;
  273. wait_queue_head_t *wait;
  274. };
  275. static void dmatest_callback(void *arg)
  276. {
  277. struct dmatest_done *done = arg;
  278. done->done = true;
  279. wake_up_all(done->wait);
  280. }
  281. static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
  282. unsigned int count)
  283. {
  284. while (count--)
  285. dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
  286. }
  287. static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
  288. unsigned int count)
  289. {
  290. while (count--)
  291. dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
  292. }
  293. static unsigned int min_odd(unsigned int x, unsigned int y)
  294. {
  295. unsigned int val = min(x, y);
  296. return val % 2 ? val : val - 1;
  297. }
  298. static char *thread_result_get(const char *name,
  299. struct dmatest_thread_result *tr)
  300. {
  301. static const char * const messages[] = {
  302. [DMATEST_ET_OK] = "No errors",
  303. [DMATEST_ET_MAP_SRC] = "src mapping error",
  304. [DMATEST_ET_MAP_DST] = "dst mapping error",
  305. [DMATEST_ET_PREP] = "prep error",
  306. [DMATEST_ET_SUBMIT] = "submit error",
  307. [DMATEST_ET_TIMEOUT] = "test timed out",
  308. [DMATEST_ET_DMA_ERROR] =
  309. "got completion callback (DMA_ERROR)",
  310. [DMATEST_ET_DMA_IN_PROGRESS] =
  311. "got completion callback (DMA_IN_PROGRESS)",
  312. [DMATEST_ET_VERIFY] = "errors",
  313. };
  314. static char buf[512];
  315. snprintf(buf, sizeof(buf) - 1,
  316. "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
  317. name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
  318. tr->len, tr->data);
  319. return buf;
  320. }
  321. static int thread_result_add(struct dmatest_info *info,
  322. struct dmatest_result *r, enum dmatest_error_type type,
  323. unsigned int n, unsigned int src_off, unsigned int dst_off,
  324. unsigned int len, unsigned long data)
  325. {
  326. struct dmatest_thread_result *tr;
  327. tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  328. if (!tr)
  329. return -ENOMEM;
  330. tr->type = type;
  331. tr->n = n;
  332. tr->src_off = src_off;
  333. tr->dst_off = dst_off;
  334. tr->len = len;
  335. tr->data = data;
  336. mutex_lock(&info->results_lock);
  337. list_add_tail(&tr->node, &r->results);
  338. mutex_unlock(&info->results_lock);
  339. pr_warn("%s\n", thread_result_get(r->name, tr));
  340. return 0;
  341. }
  342. static void result_free(struct dmatest_info *info, const char *name)
  343. {
  344. struct dmatest_result *r, *_r;
  345. mutex_lock(&info->results_lock);
  346. list_for_each_entry_safe(r, _r, &info->results, node) {
  347. struct dmatest_thread_result *tr, *_tr;
  348. if (name && strcmp(r->name, name))
  349. continue;
  350. list_for_each_entry_safe(tr, _tr, &r->results, node) {
  351. list_del(&tr->node);
  352. kfree(tr);
  353. }
  354. kfree(r->name);
  355. list_del(&r->node);
  356. kfree(r);
  357. }
  358. mutex_unlock(&info->results_lock);
  359. }
  360. static struct dmatest_result *result_init(struct dmatest_info *info,
  361. const char *name)
  362. {
  363. struct dmatest_result *r;
  364. r = kzalloc(sizeof(*r), GFP_KERNEL);
  365. if (r) {
  366. r->name = kstrdup(name, GFP_KERNEL);
  367. INIT_LIST_HEAD(&r->results);
  368. mutex_lock(&info->results_lock);
  369. list_add_tail(&r->node, &info->results);
  370. mutex_unlock(&info->results_lock);
  371. }
  372. return r;
  373. }
  374. /*
  375. * This function repeatedly tests DMA transfers of various lengths and
  376. * offsets for a given operation type until it is told to exit by
  377. * kthread_stop(). There may be multiple threads running this function
  378. * in parallel for a single channel, and there may be multiple channels
  379. * being tested in parallel.
  380. *
  381. * Before each test, the source and destination buffer is initialized
  382. * with a known pattern. This pattern is different depending on
  383. * whether it's in an area which is supposed to be copied or
  384. * overwritten, and different in the source and destination buffers.
  385. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  386. * we'll notice.
  387. */
  388. static int dmatest_func(void *data)
  389. {
  390. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
  391. struct dmatest_thread *thread = data;
  392. struct dmatest_done done = { .wait = &done_wait };
  393. struct dmatest_info *info;
  394. struct dmatest_params *params;
  395. struct dma_chan *chan;
  396. struct dma_device *dev;
  397. const char *thread_name;
  398. unsigned int src_off, dst_off, len;
  399. unsigned int error_count;
  400. unsigned int failed_tests = 0;
  401. unsigned int total_tests = 0;
  402. dma_cookie_t cookie;
  403. enum dma_status status;
  404. enum dma_ctrl_flags flags;
  405. u8 *pq_coefs = NULL;
  406. int ret;
  407. int src_cnt;
  408. int dst_cnt;
  409. int i;
  410. struct dmatest_result *result;
  411. thread_name = current->comm;
  412. set_freezable();
  413. ret = -ENOMEM;
  414. smp_rmb();
  415. info = thread->info;
  416. params = &info->params;
  417. chan = thread->chan;
  418. dev = chan->device;
  419. if (thread->type == DMA_MEMCPY)
  420. src_cnt = dst_cnt = 1;
  421. else if (thread->type == DMA_XOR) {
  422. /* force odd to ensure dst = src */
  423. src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  424. dst_cnt = 1;
  425. } else if (thread->type == DMA_PQ) {
  426. /* force odd to ensure dst = src */
  427. src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  428. dst_cnt = 2;
  429. pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
  430. if (!pq_coefs)
  431. goto err_thread_type;
  432. for (i = 0; i < src_cnt; i++)
  433. pq_coefs[i] = 1;
  434. } else
  435. goto err_thread_type;
  436. result = result_init(info, thread_name);
  437. if (!result)
  438. goto err_srcs;
  439. thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
  440. if (!thread->srcs)
  441. goto err_srcs;
  442. for (i = 0; i < src_cnt; i++) {
  443. thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
  444. if (!thread->srcs[i])
  445. goto err_srcbuf;
  446. }
  447. thread->srcs[i] = NULL;
  448. thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
  449. if (!thread->dsts)
  450. goto err_dsts;
  451. for (i = 0; i < dst_cnt; i++) {
  452. thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
  453. if (!thread->dsts[i])
  454. goto err_dstbuf;
  455. }
  456. thread->dsts[i] = NULL;
  457. set_user_nice(current, 10);
  458. /*
  459. * src buffers are freed by the DMAEngine code with dma_unmap_single()
  460. * dst buffers are freed by ourselves below
  461. */
  462. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
  463. | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
  464. while (!kthread_should_stop()
  465. && !(params->iterations && total_tests >= params->iterations)) {
  466. struct dma_async_tx_descriptor *tx = NULL;
  467. dma_addr_t dma_srcs[src_cnt];
  468. dma_addr_t dma_dsts[dst_cnt];
  469. u8 align = 0;
  470. total_tests++;
  471. /* honor alignment restrictions */
  472. if (thread->type == DMA_MEMCPY)
  473. align = dev->copy_align;
  474. else if (thread->type == DMA_XOR)
  475. align = dev->xor_align;
  476. else if (thread->type == DMA_PQ)
  477. align = dev->pq_align;
  478. if (1 << align > params->buf_size) {
  479. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  480. params->buf_size, 1 << align);
  481. break;
  482. }
  483. len = dmatest_random() % params->buf_size + 1;
  484. len = (len >> align) << align;
  485. if (!len)
  486. len = 1 << align;
  487. src_off = dmatest_random() % (params->buf_size - len + 1);
  488. dst_off = dmatest_random() % (params->buf_size - len + 1);
  489. src_off = (src_off >> align) << align;
  490. dst_off = (dst_off >> align) << align;
  491. dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
  492. dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
  493. for (i = 0; i < src_cnt; i++) {
  494. u8 *buf = thread->srcs[i] + src_off;
  495. dma_srcs[i] = dma_map_single(dev->dev, buf, len,
  496. DMA_TO_DEVICE);
  497. ret = dma_mapping_error(dev->dev, dma_srcs[i]);
  498. if (ret) {
  499. unmap_src(dev->dev, dma_srcs, len, i);
  500. thread_result_add(info, result,
  501. DMATEST_ET_MAP_SRC,
  502. total_tests, src_off, dst_off,
  503. len, ret);
  504. failed_tests++;
  505. continue;
  506. }
  507. }
  508. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  509. for (i = 0; i < dst_cnt; i++) {
  510. dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
  511. params->buf_size,
  512. DMA_BIDIRECTIONAL);
  513. ret = dma_mapping_error(dev->dev, dma_dsts[i]);
  514. if (ret) {
  515. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  516. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  517. i);
  518. thread_result_add(info, result,
  519. DMATEST_ET_MAP_DST,
  520. total_tests, src_off, dst_off,
  521. len, ret);
  522. failed_tests++;
  523. continue;
  524. }
  525. }
  526. if (thread->type == DMA_MEMCPY)
  527. tx = dev->device_prep_dma_memcpy(chan,
  528. dma_dsts[0] + dst_off,
  529. dma_srcs[0], len,
  530. flags);
  531. else if (thread->type == DMA_XOR)
  532. tx = dev->device_prep_dma_xor(chan,
  533. dma_dsts[0] + dst_off,
  534. dma_srcs, src_cnt,
  535. len, flags);
  536. else if (thread->type == DMA_PQ) {
  537. dma_addr_t dma_pq[dst_cnt];
  538. for (i = 0; i < dst_cnt; i++)
  539. dma_pq[i] = dma_dsts[i] + dst_off;
  540. tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
  541. src_cnt, pq_coefs,
  542. len, flags);
  543. }
  544. if (!tx) {
  545. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  546. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  547. dst_cnt);
  548. thread_result_add(info, result, DMATEST_ET_PREP,
  549. total_tests, src_off, dst_off,
  550. len, 0);
  551. msleep(100);
  552. failed_tests++;
  553. continue;
  554. }
  555. done.done = false;
  556. tx->callback = dmatest_callback;
  557. tx->callback_param = &done;
  558. cookie = tx->tx_submit(tx);
  559. if (dma_submit_error(cookie)) {
  560. thread_result_add(info, result, DMATEST_ET_SUBMIT,
  561. total_tests, src_off, dst_off,
  562. len, cookie);
  563. msleep(100);
  564. failed_tests++;
  565. continue;
  566. }
  567. dma_async_issue_pending(chan);
  568. wait_event_freezable_timeout(done_wait,
  569. done.done || kthread_should_stop(),
  570. msecs_to_jiffies(params->timeout));
  571. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  572. if (!done.done) {
  573. /*
  574. * We're leaving the timed out dma operation with
  575. * dangling pointer to done_wait. To make this
  576. * correct, we'll need to allocate wait_done for
  577. * each test iteration and perform "who's gonna
  578. * free it this time?" dancing. For now, just
  579. * leave it dangling.
  580. */
  581. thread_result_add(info, result, DMATEST_ET_TIMEOUT,
  582. total_tests, src_off, dst_off,
  583. len, 0);
  584. failed_tests++;
  585. continue;
  586. } else if (status != DMA_SUCCESS) {
  587. enum dmatest_error_type type = (status == DMA_ERROR) ?
  588. DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
  589. thread_result_add(info, result, type,
  590. total_tests, src_off, dst_off,
  591. len, status);
  592. failed_tests++;
  593. continue;
  594. }
  595. /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
  596. unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
  597. error_count = 0;
  598. pr_debug("%s: verifying source buffer...\n", thread_name);
  599. error_count += dmatest_verify(thread->srcs, 0, src_off,
  600. 0, PATTERN_SRC, true);
  601. error_count += dmatest_verify(thread->srcs, src_off,
  602. src_off + len, src_off,
  603. PATTERN_SRC | PATTERN_COPY, true);
  604. error_count += dmatest_verify(thread->srcs, src_off + len,
  605. params->buf_size, src_off + len,
  606. PATTERN_SRC, true);
  607. pr_debug("%s: verifying dest buffer...\n",
  608. thread->task->comm);
  609. error_count += dmatest_verify(thread->dsts, 0, dst_off,
  610. 0, PATTERN_DST, false);
  611. error_count += dmatest_verify(thread->dsts, dst_off,
  612. dst_off + len, src_off,
  613. PATTERN_SRC | PATTERN_COPY, false);
  614. error_count += dmatest_verify(thread->dsts, dst_off + len,
  615. params->buf_size, dst_off + len,
  616. PATTERN_DST, false);
  617. if (error_count) {
  618. thread_result_add(info, result, DMATEST_ET_VERIFY,
  619. total_tests, src_off, dst_off,
  620. len, error_count);
  621. failed_tests++;
  622. } else {
  623. thread_result_add(info, result, DMATEST_ET_OK,
  624. total_tests, src_off, dst_off,
  625. len, 0);
  626. }
  627. }
  628. ret = 0;
  629. for (i = 0; thread->dsts[i]; i++)
  630. kfree(thread->dsts[i]);
  631. err_dstbuf:
  632. kfree(thread->dsts);
  633. err_dsts:
  634. for (i = 0; thread->srcs[i]; i++)
  635. kfree(thread->srcs[i]);
  636. err_srcbuf:
  637. kfree(thread->srcs);
  638. err_srcs:
  639. kfree(pq_coefs);
  640. err_thread_type:
  641. pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
  642. thread_name, total_tests, failed_tests, ret);
  643. /* terminate all transfers on specified channels */
  644. if (ret)
  645. dmaengine_terminate_all(chan);
  646. thread->done = true;
  647. if (params->iterations > 0)
  648. while (!kthread_should_stop()) {
  649. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
  650. interruptible_sleep_on(&wait_dmatest_exit);
  651. }
  652. return ret;
  653. }
  654. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  655. {
  656. struct dmatest_thread *thread;
  657. struct dmatest_thread *_thread;
  658. int ret;
  659. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  660. ret = kthread_stop(thread->task);
  661. pr_debug("dmatest: thread %s exited with status %d\n",
  662. thread->task->comm, ret);
  663. list_del(&thread->node);
  664. kfree(thread);
  665. }
  666. /* terminate all transfers on specified channels */
  667. dmaengine_terminate_all(dtc->chan);
  668. kfree(dtc);
  669. }
  670. static int dmatest_add_threads(struct dmatest_info *info,
  671. struct dmatest_chan *dtc, enum dma_transaction_type type)
  672. {
  673. struct dmatest_params *params = &info->params;
  674. struct dmatest_thread *thread;
  675. struct dma_chan *chan = dtc->chan;
  676. char *op;
  677. unsigned int i;
  678. if (type == DMA_MEMCPY)
  679. op = "copy";
  680. else if (type == DMA_XOR)
  681. op = "xor";
  682. else if (type == DMA_PQ)
  683. op = "pq";
  684. else
  685. return -EINVAL;
  686. for (i = 0; i < params->threads_per_chan; i++) {
  687. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  688. if (!thread) {
  689. pr_warning("dmatest: No memory for %s-%s%u\n",
  690. dma_chan_name(chan), op, i);
  691. break;
  692. }
  693. thread->info = info;
  694. thread->chan = dtc->chan;
  695. thread->type = type;
  696. smp_wmb();
  697. thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
  698. dma_chan_name(chan), op, i);
  699. if (IS_ERR(thread->task)) {
  700. pr_warning("dmatest: Failed to run thread %s-%s%u\n",
  701. dma_chan_name(chan), op, i);
  702. kfree(thread);
  703. break;
  704. }
  705. /* srcbuf and dstbuf are allocated by the thread itself */
  706. list_add_tail(&thread->node, &dtc->threads);
  707. }
  708. return i;
  709. }
  710. static int dmatest_add_channel(struct dmatest_info *info,
  711. struct dma_chan *chan)
  712. {
  713. struct dmatest_chan *dtc;
  714. struct dma_device *dma_dev = chan->device;
  715. unsigned int thread_count = 0;
  716. int cnt;
  717. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  718. if (!dtc) {
  719. pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
  720. return -ENOMEM;
  721. }
  722. dtc->chan = chan;
  723. INIT_LIST_HEAD(&dtc->threads);
  724. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  725. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  726. thread_count += cnt > 0 ? cnt : 0;
  727. }
  728. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  729. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  730. thread_count += cnt > 0 ? cnt : 0;
  731. }
  732. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  733. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  734. thread_count += cnt > 0 ? cnt : 0;
  735. }
  736. pr_info("dmatest: Started %u threads using %s\n",
  737. thread_count, dma_chan_name(chan));
  738. list_add_tail(&dtc->node, &info->channels);
  739. info->nr_channels++;
  740. return 0;
  741. }
  742. static bool filter(struct dma_chan *chan, void *param)
  743. {
  744. struct dmatest_params *params = param;
  745. if (!dmatest_match_channel(params, chan) ||
  746. !dmatest_match_device(params, chan->device))
  747. return false;
  748. else
  749. return true;
  750. }
  751. static int __run_threaded_test(struct dmatest_info *info)
  752. {
  753. dma_cap_mask_t mask;
  754. struct dma_chan *chan;
  755. struct dmatest_params *params = &info->params;
  756. int err = 0;
  757. dma_cap_zero(mask);
  758. dma_cap_set(DMA_MEMCPY, mask);
  759. for (;;) {
  760. chan = dma_request_channel(mask, filter, params);
  761. if (chan) {
  762. err = dmatest_add_channel(info, chan);
  763. if (err) {
  764. dma_release_channel(chan);
  765. break; /* add_channel failed, punt */
  766. }
  767. } else
  768. break; /* no more channels available */
  769. if (params->max_channels &&
  770. info->nr_channels >= params->max_channels)
  771. break; /* we have all we need */
  772. }
  773. return err;
  774. }
  775. #ifndef MODULE
  776. static int run_threaded_test(struct dmatest_info *info)
  777. {
  778. int ret;
  779. mutex_lock(&info->lock);
  780. ret = __run_threaded_test(info);
  781. mutex_unlock(&info->lock);
  782. return ret;
  783. }
  784. #endif
  785. static void __stop_threaded_test(struct dmatest_info *info)
  786. {
  787. struct dmatest_chan *dtc, *_dtc;
  788. struct dma_chan *chan;
  789. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  790. list_del(&dtc->node);
  791. chan = dtc->chan;
  792. dmatest_cleanup_channel(dtc);
  793. pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
  794. dma_release_channel(chan);
  795. }
  796. info->nr_channels = 0;
  797. }
  798. static void stop_threaded_test(struct dmatest_info *info)
  799. {
  800. mutex_lock(&info->lock);
  801. __stop_threaded_test(info);
  802. mutex_unlock(&info->lock);
  803. }
  804. static int __restart_threaded_test(struct dmatest_info *info, bool run)
  805. {
  806. struct dmatest_params *params = &info->params;
  807. int ret;
  808. /* Stop any running test first */
  809. __stop_threaded_test(info);
  810. if (run == false)
  811. return 0;
  812. /* Clear results from previous run */
  813. result_free(info, NULL);
  814. /* Copy test parameters */
  815. memcpy(params, &info->dbgfs_params, sizeof(*params));
  816. /* Run test with new parameters */
  817. ret = __run_threaded_test(info);
  818. if (ret) {
  819. __stop_threaded_test(info);
  820. pr_err("dmatest: Can't run test\n");
  821. }
  822. return ret;
  823. }
  824. static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
  825. const void __user *from, size_t count)
  826. {
  827. char tmp[20];
  828. ssize_t len;
  829. len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
  830. if (len >= 0) {
  831. tmp[len] = '\0';
  832. strlcpy(to, strim(tmp), available);
  833. }
  834. return len;
  835. }
  836. static ssize_t dtf_read_channel(struct file *file, char __user *buf,
  837. size_t count, loff_t *ppos)
  838. {
  839. struct dmatest_info *info = file->private_data;
  840. return simple_read_from_buffer(buf, count, ppos,
  841. info->dbgfs_params.channel,
  842. strlen(info->dbgfs_params.channel));
  843. }
  844. static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
  845. size_t size, loff_t *ppos)
  846. {
  847. struct dmatest_info *info = file->private_data;
  848. return dtf_write_string(info->dbgfs_params.channel,
  849. sizeof(info->dbgfs_params.channel),
  850. ppos, buf, size);
  851. }
  852. static const struct file_operations dtf_channel_fops = {
  853. .read = dtf_read_channel,
  854. .write = dtf_write_channel,
  855. .open = simple_open,
  856. .llseek = default_llseek,
  857. };
  858. static ssize_t dtf_read_device(struct file *file, char __user *buf,
  859. size_t count, loff_t *ppos)
  860. {
  861. struct dmatest_info *info = file->private_data;
  862. return simple_read_from_buffer(buf, count, ppos,
  863. info->dbgfs_params.device,
  864. strlen(info->dbgfs_params.device));
  865. }
  866. static ssize_t dtf_write_device(struct file *file, const char __user *buf,
  867. size_t size, loff_t *ppos)
  868. {
  869. struct dmatest_info *info = file->private_data;
  870. return dtf_write_string(info->dbgfs_params.device,
  871. sizeof(info->dbgfs_params.device),
  872. ppos, buf, size);
  873. }
  874. static const struct file_operations dtf_device_fops = {
  875. .read = dtf_read_device,
  876. .write = dtf_write_device,
  877. .open = simple_open,
  878. .llseek = default_llseek,
  879. };
  880. static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
  881. size_t count, loff_t *ppos)
  882. {
  883. struct dmatest_info *info = file->private_data;
  884. char buf[3];
  885. struct dmatest_chan *dtc;
  886. bool alive = false;
  887. mutex_lock(&info->lock);
  888. list_for_each_entry(dtc, &info->channels, node) {
  889. struct dmatest_thread *thread;
  890. list_for_each_entry(thread, &dtc->threads, node) {
  891. if (!thread->done) {
  892. alive = true;
  893. break;
  894. }
  895. }
  896. }
  897. if (alive) {
  898. buf[0] = 'Y';
  899. } else {
  900. __stop_threaded_test(info);
  901. buf[0] = 'N';
  902. }
  903. mutex_unlock(&info->lock);
  904. buf[1] = '\n';
  905. buf[2] = 0x00;
  906. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  907. }
  908. static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
  909. size_t count, loff_t *ppos)
  910. {
  911. struct dmatest_info *info = file->private_data;
  912. char buf[16];
  913. bool bv;
  914. int ret = 0;
  915. if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
  916. return -EFAULT;
  917. if (strtobool(buf, &bv) == 0) {
  918. mutex_lock(&info->lock);
  919. ret = __restart_threaded_test(info, bv);
  920. mutex_unlock(&info->lock);
  921. }
  922. return ret ? ret : count;
  923. }
  924. static const struct file_operations dtf_run_fops = {
  925. .read = dtf_read_run,
  926. .write = dtf_write_run,
  927. .open = simple_open,
  928. .llseek = default_llseek,
  929. };
  930. static int dtf_results_show(struct seq_file *sf, void *data)
  931. {
  932. struct dmatest_info *info = sf->private;
  933. struct dmatest_result *result;
  934. struct dmatest_thread_result *tr;
  935. mutex_lock(&info->results_lock);
  936. list_for_each_entry(result, &info->results, node) {
  937. list_for_each_entry(tr, &result->results, node)
  938. seq_printf(sf, "%s\n",
  939. thread_result_get(result->name, tr));
  940. }
  941. mutex_unlock(&info->results_lock);
  942. return 0;
  943. }
  944. static int dtf_results_open(struct inode *inode, struct file *file)
  945. {
  946. return single_open(file, dtf_results_show, inode->i_private);
  947. }
  948. static const struct file_operations dtf_results_fops = {
  949. .open = dtf_results_open,
  950. .read = seq_read,
  951. .llseek = seq_lseek,
  952. .release = single_release,
  953. };
  954. static int dmatest_register_dbgfs(struct dmatest_info *info)
  955. {
  956. struct dentry *d;
  957. struct dmatest_params *params = &info->dbgfs_params;
  958. int ret = -ENOMEM;
  959. d = debugfs_create_dir("dmatest", NULL);
  960. if (IS_ERR(d))
  961. return PTR_ERR(d);
  962. if (!d)
  963. goto err_root;
  964. info->root = d;
  965. /* Copy initial values */
  966. memcpy(params, &info->params, sizeof(*params));
  967. /* Test parameters */
  968. d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
  969. (u32 *)&params->buf_size);
  970. if (IS_ERR_OR_NULL(d))
  971. goto err_node;
  972. d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
  973. info, &dtf_channel_fops);
  974. if (IS_ERR_OR_NULL(d))
  975. goto err_node;
  976. d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
  977. info, &dtf_device_fops);
  978. if (IS_ERR_OR_NULL(d))
  979. goto err_node;
  980. d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
  981. (u32 *)&params->threads_per_chan);
  982. if (IS_ERR_OR_NULL(d))
  983. goto err_node;
  984. d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
  985. (u32 *)&params->max_channels);
  986. if (IS_ERR_OR_NULL(d))
  987. goto err_node;
  988. d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
  989. (u32 *)&params->iterations);
  990. if (IS_ERR_OR_NULL(d))
  991. goto err_node;
  992. d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
  993. (u32 *)&params->xor_sources);
  994. if (IS_ERR_OR_NULL(d))
  995. goto err_node;
  996. d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
  997. (u32 *)&params->pq_sources);
  998. if (IS_ERR_OR_NULL(d))
  999. goto err_node;
  1000. d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
  1001. (u32 *)&params->timeout);
  1002. if (IS_ERR_OR_NULL(d))
  1003. goto err_node;
  1004. /* Run or stop threaded test */
  1005. d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
  1006. info, &dtf_run_fops);
  1007. if (IS_ERR_OR_NULL(d))
  1008. goto err_node;
  1009. /* Results of test in progress */
  1010. d = debugfs_create_file("results", S_IRUGO, info->root, info,
  1011. &dtf_results_fops);
  1012. if (IS_ERR_OR_NULL(d))
  1013. goto err_node;
  1014. return 0;
  1015. err_node:
  1016. debugfs_remove_recursive(info->root);
  1017. err_root:
  1018. pr_err("dmatest: Failed to initialize debugfs\n");
  1019. return ret;
  1020. }
  1021. static int __init dmatest_init(void)
  1022. {
  1023. struct dmatest_info *info = &test_info;
  1024. struct dmatest_params *params = &info->params;
  1025. int ret;
  1026. memset(info, 0, sizeof(*info));
  1027. mutex_init(&info->lock);
  1028. INIT_LIST_HEAD(&info->channels);
  1029. mutex_init(&info->results_lock);
  1030. INIT_LIST_HEAD(&info->results);
  1031. /* Set default parameters */
  1032. params->buf_size = test_buf_size;
  1033. strlcpy(params->channel, test_channel, sizeof(params->channel));
  1034. strlcpy(params->device, test_device, sizeof(params->device));
  1035. params->threads_per_chan = threads_per_chan;
  1036. params->max_channels = max_channels;
  1037. params->iterations = iterations;
  1038. params->xor_sources = xor_sources;
  1039. params->pq_sources = pq_sources;
  1040. params->timeout = timeout;
  1041. ret = dmatest_register_dbgfs(info);
  1042. if (ret)
  1043. return ret;
  1044. #ifdef MODULE
  1045. return 0;
  1046. #else
  1047. return run_threaded_test(info);
  1048. #endif
  1049. }
  1050. /* when compiled-in wait for drivers to load first */
  1051. late_initcall(dmatest_init);
  1052. static void __exit dmatest_exit(void)
  1053. {
  1054. struct dmatest_info *info = &test_info;
  1055. debugfs_remove_recursive(info->root);
  1056. stop_threaded_test(info);
  1057. result_free(info, NULL);
  1058. }
  1059. module_exit(dmatest_exit);
  1060. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  1061. MODULE_LICENSE("GPL v2");