dmatest.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198
  1. /*
  2. * DMA Engine test module
  3. *
  4. * Copyright (C) 2007 Atmel Corporation
  5. * Copyright (C) 2013 Intel Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/freezer.h>
  15. #include <linux/init.h>
  16. #include <linux/kthread.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. #include <linux/ctype.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/seq_file.h>
  26. static unsigned int test_buf_size = 16384;
  27. module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
  28. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  29. static char test_channel[20];
  30. module_param_string(channel, test_channel, sizeof(test_channel),
  31. S_IRUGO | S_IWUSR);
  32. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  33. static char test_device[20];
  34. module_param_string(device, test_device, sizeof(test_device),
  35. S_IRUGO | S_IWUSR);
  36. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  37. static unsigned int threads_per_chan = 1;
  38. module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
  39. MODULE_PARM_DESC(threads_per_chan,
  40. "Number of threads to start per channel (default: 1)");
  41. static unsigned int max_channels;
  42. module_param(max_channels, uint, S_IRUGO | S_IWUSR);
  43. MODULE_PARM_DESC(max_channels,
  44. "Maximum number of channels to use (default: all)");
  45. static unsigned int iterations;
  46. module_param(iterations, uint, S_IRUGO | S_IWUSR);
  47. MODULE_PARM_DESC(iterations,
  48. "Iterations before stopping test (default: infinite)");
  49. static unsigned int xor_sources = 3;
  50. module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
  51. MODULE_PARM_DESC(xor_sources,
  52. "Number of xor source buffers (default: 3)");
  53. static unsigned int pq_sources = 3;
  54. module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
  55. MODULE_PARM_DESC(pq_sources,
  56. "Number of p+q source buffers (default: 3)");
  57. static int timeout = 3000;
  58. module_param(timeout, uint, S_IRUGO | S_IWUSR);
  59. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  60. "Pass -1 for infinite timeout");
  61. /* Maximum amount of mismatched bytes in buffer to print */
  62. #define MAX_ERROR_COUNT 32
  63. /*
  64. * Initialization patterns. All bytes in the source buffer has bit 7
  65. * set, all bytes in the destination buffer has bit 7 cleared.
  66. *
  67. * Bit 6 is set for all bytes which are to be copied by the DMA
  68. * engine. Bit 5 is set for all bytes which are to be overwritten by
  69. * the DMA engine.
  70. *
  71. * The remaining bits are the inverse of a counter which increments by
  72. * one for each byte address.
  73. */
  74. #define PATTERN_SRC 0x80
  75. #define PATTERN_DST 0x00
  76. #define PATTERN_COPY 0x40
  77. #define PATTERN_OVERWRITE 0x20
  78. #define PATTERN_COUNT_MASK 0x1f
  79. enum dmatest_error_type {
  80. DMATEST_ET_OK,
  81. DMATEST_ET_MAP_SRC,
  82. DMATEST_ET_MAP_DST,
  83. DMATEST_ET_PREP,
  84. DMATEST_ET_SUBMIT,
  85. DMATEST_ET_TIMEOUT,
  86. DMATEST_ET_DMA_ERROR,
  87. DMATEST_ET_DMA_IN_PROGRESS,
  88. DMATEST_ET_VERIFY,
  89. DMATEST_ET_VERIFY_BUF,
  90. };
  91. struct dmatest_verify_buffer {
  92. unsigned int index;
  93. u8 expected;
  94. u8 actual;
  95. };
  96. struct dmatest_verify_result {
  97. unsigned int error_count;
  98. struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
  99. u8 pattern;
  100. bool is_srcbuf;
  101. };
  102. struct dmatest_thread_result {
  103. struct list_head node;
  104. unsigned int n;
  105. unsigned int src_off;
  106. unsigned int dst_off;
  107. unsigned int len;
  108. enum dmatest_error_type type;
  109. union {
  110. unsigned long data;
  111. dma_cookie_t cookie;
  112. enum dma_status status;
  113. int error;
  114. struct dmatest_verify_result *vr;
  115. };
  116. };
  117. struct dmatest_result {
  118. struct list_head node;
  119. char *name;
  120. struct list_head results;
  121. };
  122. struct dmatest_info;
  123. struct dmatest_thread {
  124. struct list_head node;
  125. struct dmatest_info *info;
  126. struct task_struct *task;
  127. struct dma_chan *chan;
  128. u8 **srcs;
  129. u8 **dsts;
  130. enum dma_transaction_type type;
  131. bool done;
  132. };
  133. struct dmatest_chan {
  134. struct list_head node;
  135. struct dma_chan *chan;
  136. struct list_head threads;
  137. };
  138. /**
  139. * struct dmatest_params - test parameters.
  140. * @buf_size: size of the memcpy test buffer
  141. * @channel: bus ID of the channel to test
  142. * @device: bus ID of the DMA Engine to test
  143. * @threads_per_chan: number of threads to start per channel
  144. * @max_channels: maximum number of channels to use
  145. * @iterations: iterations before stopping test
  146. * @xor_sources: number of xor source buffers
  147. * @pq_sources: number of p+q source buffers
  148. * @timeout: transfer timeout in msec, -1 for infinite timeout
  149. */
  150. struct dmatest_params {
  151. unsigned int buf_size;
  152. char channel[20];
  153. char device[20];
  154. unsigned int threads_per_chan;
  155. unsigned int max_channels;
  156. unsigned int iterations;
  157. unsigned int xor_sources;
  158. unsigned int pq_sources;
  159. int timeout;
  160. };
  161. /**
  162. * struct dmatest_info - test information.
  163. * @params: test parameters
  164. * @lock: access protection to the fields of this structure
  165. */
  166. struct dmatest_info {
  167. /* Test parameters */
  168. struct dmatest_params params;
  169. /* Internal state */
  170. struct list_head channels;
  171. unsigned int nr_channels;
  172. struct mutex lock;
  173. /* debugfs related stuff */
  174. struct dentry *root;
  175. /* Test results */
  176. struct list_head results;
  177. struct mutex results_lock;
  178. };
  179. static struct dmatest_info test_info;
  180. static bool dmatest_match_channel(struct dmatest_params *params,
  181. struct dma_chan *chan)
  182. {
  183. if (params->channel[0] == '\0')
  184. return true;
  185. return strcmp(dma_chan_name(chan), params->channel) == 0;
  186. }
  187. static bool dmatest_match_device(struct dmatest_params *params,
  188. struct dma_device *device)
  189. {
  190. if (params->device[0] == '\0')
  191. return true;
  192. return strcmp(dev_name(device->dev), params->device) == 0;
  193. }
  194. static unsigned long dmatest_random(void)
  195. {
  196. unsigned long buf;
  197. get_random_bytes(&buf, sizeof(buf));
  198. return buf;
  199. }
  200. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  201. unsigned int buf_size)
  202. {
  203. unsigned int i;
  204. u8 *buf;
  205. for (; (buf = *bufs); bufs++) {
  206. for (i = 0; i < start; i++)
  207. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  208. for ( ; i < start + len; i++)
  209. buf[i] = PATTERN_SRC | PATTERN_COPY
  210. | (~i & PATTERN_COUNT_MASK);
  211. for ( ; i < buf_size; i++)
  212. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  213. buf++;
  214. }
  215. }
  216. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  217. unsigned int buf_size)
  218. {
  219. unsigned int i;
  220. u8 *buf;
  221. for (; (buf = *bufs); bufs++) {
  222. for (i = 0; i < start; i++)
  223. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  224. for ( ; i < start + len; i++)
  225. buf[i] = PATTERN_DST | PATTERN_OVERWRITE
  226. | (~i & PATTERN_COUNT_MASK);
  227. for ( ; i < buf_size; i++)
  228. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  229. }
  230. }
  231. static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
  232. unsigned int start, unsigned int end, unsigned int counter,
  233. u8 pattern, bool is_srcbuf)
  234. {
  235. unsigned int i;
  236. unsigned int error_count = 0;
  237. u8 actual;
  238. u8 expected;
  239. u8 *buf;
  240. unsigned int counter_orig = counter;
  241. struct dmatest_verify_buffer *vb;
  242. for (; (buf = *bufs); bufs++) {
  243. counter = counter_orig;
  244. for (i = start; i < end; i++) {
  245. actual = buf[i];
  246. expected = pattern | (~counter & PATTERN_COUNT_MASK);
  247. if (actual != expected) {
  248. if (error_count < MAX_ERROR_COUNT && vr) {
  249. vb = &vr->data[error_count];
  250. vb->index = i;
  251. vb->expected = expected;
  252. vb->actual = actual;
  253. }
  254. error_count++;
  255. }
  256. counter++;
  257. }
  258. }
  259. if (error_count > MAX_ERROR_COUNT)
  260. pr_warning("%s: %u errors suppressed\n",
  261. current->comm, error_count - MAX_ERROR_COUNT);
  262. return error_count;
  263. }
  264. /* poor man's completion - we want to use wait_event_freezable() on it */
  265. struct dmatest_done {
  266. bool done;
  267. wait_queue_head_t *wait;
  268. };
  269. static void dmatest_callback(void *arg)
  270. {
  271. struct dmatest_done *done = arg;
  272. done->done = true;
  273. wake_up_all(done->wait);
  274. }
  275. static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
  276. unsigned int count)
  277. {
  278. while (count--)
  279. dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
  280. }
  281. static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
  282. unsigned int count)
  283. {
  284. while (count--)
  285. dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
  286. }
  287. static unsigned int min_odd(unsigned int x, unsigned int y)
  288. {
  289. unsigned int val = min(x, y);
  290. return val % 2 ? val : val - 1;
  291. }
  292. static char *verify_result_get_one(struct dmatest_verify_result *vr,
  293. unsigned int i)
  294. {
  295. struct dmatest_verify_buffer *vb = &vr->data[i];
  296. u8 diff = vb->actual ^ vr->pattern;
  297. static char buf[512];
  298. char *msg;
  299. if (vr->is_srcbuf)
  300. msg = "srcbuf overwritten!";
  301. else if ((vr->pattern & PATTERN_COPY)
  302. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  303. msg = "dstbuf not copied!";
  304. else if (diff & PATTERN_SRC)
  305. msg = "dstbuf was copied!";
  306. else
  307. msg = "dstbuf mismatch!";
  308. snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
  309. vb->index, vb->expected, vb->actual);
  310. return buf;
  311. }
  312. static char *thread_result_get(const char *name,
  313. struct dmatest_thread_result *tr)
  314. {
  315. static const char * const messages[] = {
  316. [DMATEST_ET_OK] = "No errors",
  317. [DMATEST_ET_MAP_SRC] = "src mapping error",
  318. [DMATEST_ET_MAP_DST] = "dst mapping error",
  319. [DMATEST_ET_PREP] = "prep error",
  320. [DMATEST_ET_SUBMIT] = "submit error",
  321. [DMATEST_ET_TIMEOUT] = "test timed out",
  322. [DMATEST_ET_DMA_ERROR] =
  323. "got completion callback (DMA_ERROR)",
  324. [DMATEST_ET_DMA_IN_PROGRESS] =
  325. "got completion callback (DMA_IN_PROGRESS)",
  326. [DMATEST_ET_VERIFY] = "errors",
  327. [DMATEST_ET_VERIFY_BUF] = "verify errors",
  328. };
  329. static char buf[512];
  330. snprintf(buf, sizeof(buf) - 1,
  331. "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
  332. name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
  333. tr->len, tr->data);
  334. return buf;
  335. }
  336. static int thread_result_add(struct dmatest_info *info,
  337. struct dmatest_result *r, enum dmatest_error_type type,
  338. unsigned int n, unsigned int src_off, unsigned int dst_off,
  339. unsigned int len, unsigned long data)
  340. {
  341. struct dmatest_thread_result *tr;
  342. tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  343. if (!tr)
  344. return -ENOMEM;
  345. tr->type = type;
  346. tr->n = n;
  347. tr->src_off = src_off;
  348. tr->dst_off = dst_off;
  349. tr->len = len;
  350. tr->data = data;
  351. mutex_lock(&info->results_lock);
  352. list_add_tail(&tr->node, &r->results);
  353. mutex_unlock(&info->results_lock);
  354. if (tr->type == DMATEST_ET_OK)
  355. pr_debug("%s\n", thread_result_get(r->name, tr));
  356. else
  357. pr_warn("%s\n", thread_result_get(r->name, tr));
  358. return 0;
  359. }
  360. static unsigned int verify_result_add(struct dmatest_info *info,
  361. struct dmatest_result *r, unsigned int n,
  362. unsigned int src_off, unsigned int dst_off, unsigned int len,
  363. u8 **bufs, int whence, unsigned int counter, u8 pattern,
  364. bool is_srcbuf)
  365. {
  366. struct dmatest_verify_result *vr;
  367. unsigned int error_count;
  368. unsigned int buf_off = is_srcbuf ? src_off : dst_off;
  369. unsigned int start, end;
  370. if (whence < 0) {
  371. start = 0;
  372. end = buf_off;
  373. } else if (whence > 0) {
  374. start = buf_off + len;
  375. end = info->params.buf_size;
  376. } else {
  377. start = buf_off;
  378. end = buf_off + len;
  379. }
  380. vr = kmalloc(sizeof(*vr), GFP_KERNEL);
  381. if (!vr) {
  382. pr_warn("dmatest: No memory to store verify result\n");
  383. return dmatest_verify(NULL, bufs, start, end, counter, pattern,
  384. is_srcbuf);
  385. }
  386. vr->pattern = pattern;
  387. vr->is_srcbuf = is_srcbuf;
  388. error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
  389. is_srcbuf);
  390. if (error_count) {
  391. vr->error_count = error_count;
  392. thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
  393. dst_off, len, (unsigned long)vr);
  394. return error_count;
  395. }
  396. kfree(vr);
  397. return 0;
  398. }
  399. static void result_free(struct dmatest_info *info, const char *name)
  400. {
  401. struct dmatest_result *r, *_r;
  402. mutex_lock(&info->results_lock);
  403. list_for_each_entry_safe(r, _r, &info->results, node) {
  404. struct dmatest_thread_result *tr, *_tr;
  405. if (name && strcmp(r->name, name))
  406. continue;
  407. list_for_each_entry_safe(tr, _tr, &r->results, node) {
  408. if (tr->type == DMATEST_ET_VERIFY_BUF)
  409. kfree(tr->vr);
  410. list_del(&tr->node);
  411. kfree(tr);
  412. }
  413. kfree(r->name);
  414. list_del(&r->node);
  415. kfree(r);
  416. }
  417. mutex_unlock(&info->results_lock);
  418. }
  419. static struct dmatest_result *result_init(struct dmatest_info *info,
  420. const char *name)
  421. {
  422. struct dmatest_result *r;
  423. r = kzalloc(sizeof(*r), GFP_KERNEL);
  424. if (r) {
  425. r->name = kstrdup(name, GFP_KERNEL);
  426. INIT_LIST_HEAD(&r->results);
  427. mutex_lock(&info->results_lock);
  428. list_add_tail(&r->node, &info->results);
  429. mutex_unlock(&info->results_lock);
  430. }
  431. return r;
  432. }
  433. /*
  434. * This function repeatedly tests DMA transfers of various lengths and
  435. * offsets for a given operation type until it is told to exit by
  436. * kthread_stop(). There may be multiple threads running this function
  437. * in parallel for a single channel, and there may be multiple channels
  438. * being tested in parallel.
  439. *
  440. * Before each test, the source and destination buffer is initialized
  441. * with a known pattern. This pattern is different depending on
  442. * whether it's in an area which is supposed to be copied or
  443. * overwritten, and different in the source and destination buffers.
  444. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  445. * we'll notice.
  446. */
  447. static int dmatest_func(void *data)
  448. {
  449. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
  450. struct dmatest_thread *thread = data;
  451. struct dmatest_done done = { .wait = &done_wait };
  452. struct dmatest_info *info;
  453. struct dmatest_params *params;
  454. struct dma_chan *chan;
  455. struct dma_device *dev;
  456. const char *thread_name;
  457. unsigned int src_off, dst_off, len;
  458. unsigned int error_count;
  459. unsigned int failed_tests = 0;
  460. unsigned int total_tests = 0;
  461. dma_cookie_t cookie;
  462. enum dma_status status;
  463. enum dma_ctrl_flags flags;
  464. u8 *pq_coefs = NULL;
  465. int ret;
  466. int src_cnt;
  467. int dst_cnt;
  468. int i;
  469. struct dmatest_result *result;
  470. thread_name = current->comm;
  471. set_freezable();
  472. ret = -ENOMEM;
  473. smp_rmb();
  474. info = thread->info;
  475. params = &info->params;
  476. chan = thread->chan;
  477. dev = chan->device;
  478. if (thread->type == DMA_MEMCPY)
  479. src_cnt = dst_cnt = 1;
  480. else if (thread->type == DMA_XOR) {
  481. /* force odd to ensure dst = src */
  482. src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  483. dst_cnt = 1;
  484. } else if (thread->type == DMA_PQ) {
  485. /* force odd to ensure dst = src */
  486. src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  487. dst_cnt = 2;
  488. pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
  489. if (!pq_coefs)
  490. goto err_thread_type;
  491. for (i = 0; i < src_cnt; i++)
  492. pq_coefs[i] = 1;
  493. } else
  494. goto err_thread_type;
  495. result = result_init(info, thread_name);
  496. if (!result)
  497. goto err_srcs;
  498. thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
  499. if (!thread->srcs)
  500. goto err_srcs;
  501. for (i = 0; i < src_cnt; i++) {
  502. thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
  503. if (!thread->srcs[i])
  504. goto err_srcbuf;
  505. }
  506. thread->srcs[i] = NULL;
  507. thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
  508. if (!thread->dsts)
  509. goto err_dsts;
  510. for (i = 0; i < dst_cnt; i++) {
  511. thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
  512. if (!thread->dsts[i])
  513. goto err_dstbuf;
  514. }
  515. thread->dsts[i] = NULL;
  516. set_user_nice(current, 10);
  517. /*
  518. * src and dst buffers are freed by ourselves below
  519. */
  520. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  521. while (!kthread_should_stop()
  522. && !(params->iterations && total_tests >= params->iterations)) {
  523. struct dma_async_tx_descriptor *tx = NULL;
  524. dma_addr_t dma_srcs[src_cnt];
  525. dma_addr_t dma_dsts[dst_cnt];
  526. u8 align = 0;
  527. total_tests++;
  528. /* honor alignment restrictions */
  529. if (thread->type == DMA_MEMCPY)
  530. align = dev->copy_align;
  531. else if (thread->type == DMA_XOR)
  532. align = dev->xor_align;
  533. else if (thread->type == DMA_PQ)
  534. align = dev->pq_align;
  535. if (1 << align > params->buf_size) {
  536. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  537. params->buf_size, 1 << align);
  538. break;
  539. }
  540. len = dmatest_random() % params->buf_size + 1;
  541. len = (len >> align) << align;
  542. if (!len)
  543. len = 1 << align;
  544. src_off = dmatest_random() % (params->buf_size - len + 1);
  545. dst_off = dmatest_random() % (params->buf_size - len + 1);
  546. src_off = (src_off >> align) << align;
  547. dst_off = (dst_off >> align) << align;
  548. dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
  549. dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
  550. for (i = 0; i < src_cnt; i++) {
  551. u8 *buf = thread->srcs[i] + src_off;
  552. dma_srcs[i] = dma_map_single(dev->dev, buf, len,
  553. DMA_TO_DEVICE);
  554. ret = dma_mapping_error(dev->dev, dma_srcs[i]);
  555. if (ret) {
  556. unmap_src(dev->dev, dma_srcs, len, i);
  557. thread_result_add(info, result,
  558. DMATEST_ET_MAP_SRC,
  559. total_tests, src_off, dst_off,
  560. len, ret);
  561. failed_tests++;
  562. continue;
  563. }
  564. }
  565. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  566. for (i = 0; i < dst_cnt; i++) {
  567. dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
  568. params->buf_size,
  569. DMA_BIDIRECTIONAL);
  570. ret = dma_mapping_error(dev->dev, dma_dsts[i]);
  571. if (ret) {
  572. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  573. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  574. i);
  575. thread_result_add(info, result,
  576. DMATEST_ET_MAP_DST,
  577. total_tests, src_off, dst_off,
  578. len, ret);
  579. failed_tests++;
  580. continue;
  581. }
  582. }
  583. if (thread->type == DMA_MEMCPY)
  584. tx = dev->device_prep_dma_memcpy(chan,
  585. dma_dsts[0] + dst_off,
  586. dma_srcs[0], len,
  587. flags);
  588. else if (thread->type == DMA_XOR)
  589. tx = dev->device_prep_dma_xor(chan,
  590. dma_dsts[0] + dst_off,
  591. dma_srcs, src_cnt,
  592. len, flags);
  593. else if (thread->type == DMA_PQ) {
  594. dma_addr_t dma_pq[dst_cnt];
  595. for (i = 0; i < dst_cnt; i++)
  596. dma_pq[i] = dma_dsts[i] + dst_off;
  597. tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
  598. src_cnt, pq_coefs,
  599. len, flags);
  600. }
  601. if (!tx) {
  602. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  603. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  604. dst_cnt);
  605. thread_result_add(info, result, DMATEST_ET_PREP,
  606. total_tests, src_off, dst_off,
  607. len, 0);
  608. msleep(100);
  609. failed_tests++;
  610. continue;
  611. }
  612. done.done = false;
  613. tx->callback = dmatest_callback;
  614. tx->callback_param = &done;
  615. cookie = tx->tx_submit(tx);
  616. if (dma_submit_error(cookie)) {
  617. thread_result_add(info, result, DMATEST_ET_SUBMIT,
  618. total_tests, src_off, dst_off,
  619. len, cookie);
  620. msleep(100);
  621. failed_tests++;
  622. continue;
  623. }
  624. dma_async_issue_pending(chan);
  625. wait_event_freezable_timeout(done_wait, done.done,
  626. msecs_to_jiffies(params->timeout));
  627. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  628. if (!done.done) {
  629. /*
  630. * We're leaving the timed out dma operation with
  631. * dangling pointer to done_wait. To make this
  632. * correct, we'll need to allocate wait_done for
  633. * each test iteration and perform "who's gonna
  634. * free it this time?" dancing. For now, just
  635. * leave it dangling.
  636. */
  637. thread_result_add(info, result, DMATEST_ET_TIMEOUT,
  638. total_tests, src_off, dst_off,
  639. len, 0);
  640. failed_tests++;
  641. continue;
  642. } else if (status != DMA_SUCCESS) {
  643. enum dmatest_error_type type = (status == DMA_ERROR) ?
  644. DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
  645. thread_result_add(info, result, type,
  646. total_tests, src_off, dst_off,
  647. len, status);
  648. failed_tests++;
  649. continue;
  650. }
  651. /* Unmap by myself */
  652. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  653. unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
  654. error_count = 0;
  655. pr_debug("%s: verifying source buffer...\n", thread_name);
  656. error_count += verify_result_add(info, result, total_tests,
  657. src_off, dst_off, len, thread->srcs, -1,
  658. 0, PATTERN_SRC, true);
  659. error_count += verify_result_add(info, result, total_tests,
  660. src_off, dst_off, len, thread->srcs, 0,
  661. src_off, PATTERN_SRC | PATTERN_COPY, true);
  662. error_count += verify_result_add(info, result, total_tests,
  663. src_off, dst_off, len, thread->srcs, 1,
  664. src_off + len, PATTERN_SRC, true);
  665. pr_debug("%s: verifying dest buffer...\n", thread_name);
  666. error_count += verify_result_add(info, result, total_tests,
  667. src_off, dst_off, len, thread->dsts, -1,
  668. 0, PATTERN_DST, false);
  669. error_count += verify_result_add(info, result, total_tests,
  670. src_off, dst_off, len, thread->dsts, 0,
  671. src_off, PATTERN_SRC | PATTERN_COPY, false);
  672. error_count += verify_result_add(info, result, total_tests,
  673. src_off, dst_off, len, thread->dsts, 1,
  674. dst_off + len, PATTERN_DST, false);
  675. if (error_count) {
  676. thread_result_add(info, result, DMATEST_ET_VERIFY,
  677. total_tests, src_off, dst_off,
  678. len, error_count);
  679. failed_tests++;
  680. } else {
  681. thread_result_add(info, result, DMATEST_ET_OK,
  682. total_tests, src_off, dst_off,
  683. len, 0);
  684. }
  685. }
  686. ret = 0;
  687. for (i = 0; thread->dsts[i]; i++)
  688. kfree(thread->dsts[i]);
  689. err_dstbuf:
  690. kfree(thread->dsts);
  691. err_dsts:
  692. for (i = 0; thread->srcs[i]; i++)
  693. kfree(thread->srcs[i]);
  694. err_srcbuf:
  695. kfree(thread->srcs);
  696. err_srcs:
  697. kfree(pq_coefs);
  698. err_thread_type:
  699. pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
  700. thread_name, total_tests, failed_tests, ret);
  701. /* terminate all transfers on specified channels */
  702. if (ret)
  703. dmaengine_terminate_all(chan);
  704. thread->done = true;
  705. if (params->iterations > 0)
  706. while (!kthread_should_stop()) {
  707. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
  708. interruptible_sleep_on(&wait_dmatest_exit);
  709. }
  710. return ret;
  711. }
  712. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  713. {
  714. struct dmatest_thread *thread;
  715. struct dmatest_thread *_thread;
  716. int ret;
  717. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  718. ret = kthread_stop(thread->task);
  719. pr_debug("dmatest: thread %s exited with status %d\n",
  720. thread->task->comm, ret);
  721. list_del(&thread->node);
  722. kfree(thread);
  723. }
  724. /* terminate all transfers on specified channels */
  725. dmaengine_terminate_all(dtc->chan);
  726. kfree(dtc);
  727. }
  728. static int dmatest_add_threads(struct dmatest_info *info,
  729. struct dmatest_chan *dtc, enum dma_transaction_type type)
  730. {
  731. struct dmatest_params *params = &info->params;
  732. struct dmatest_thread *thread;
  733. struct dma_chan *chan = dtc->chan;
  734. char *op;
  735. unsigned int i;
  736. if (type == DMA_MEMCPY)
  737. op = "copy";
  738. else if (type == DMA_XOR)
  739. op = "xor";
  740. else if (type == DMA_PQ)
  741. op = "pq";
  742. else
  743. return -EINVAL;
  744. for (i = 0; i < params->threads_per_chan; i++) {
  745. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  746. if (!thread) {
  747. pr_warning("dmatest: No memory for %s-%s%u\n",
  748. dma_chan_name(chan), op, i);
  749. break;
  750. }
  751. thread->info = info;
  752. thread->chan = dtc->chan;
  753. thread->type = type;
  754. smp_wmb();
  755. thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
  756. dma_chan_name(chan), op, i);
  757. if (IS_ERR(thread->task)) {
  758. pr_warning("dmatest: Failed to run thread %s-%s%u\n",
  759. dma_chan_name(chan), op, i);
  760. kfree(thread);
  761. break;
  762. }
  763. /* srcbuf and dstbuf are allocated by the thread itself */
  764. list_add_tail(&thread->node, &dtc->threads);
  765. }
  766. return i;
  767. }
  768. static int dmatest_add_channel(struct dmatest_info *info,
  769. struct dma_chan *chan)
  770. {
  771. struct dmatest_chan *dtc;
  772. struct dma_device *dma_dev = chan->device;
  773. unsigned int thread_count = 0;
  774. int cnt;
  775. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  776. if (!dtc) {
  777. pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
  778. return -ENOMEM;
  779. }
  780. dtc->chan = chan;
  781. INIT_LIST_HEAD(&dtc->threads);
  782. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  783. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  784. thread_count += cnt > 0 ? cnt : 0;
  785. }
  786. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  787. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  788. thread_count += cnt > 0 ? cnt : 0;
  789. }
  790. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  791. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  792. thread_count += cnt > 0 ? cnt : 0;
  793. }
  794. pr_info("dmatest: Started %u threads using %s\n",
  795. thread_count, dma_chan_name(chan));
  796. list_add_tail(&dtc->node, &info->channels);
  797. info->nr_channels++;
  798. return 0;
  799. }
  800. static bool filter(struct dma_chan *chan, void *param)
  801. {
  802. struct dmatest_params *params = param;
  803. if (!dmatest_match_channel(params, chan) ||
  804. !dmatest_match_device(params, chan->device))
  805. return false;
  806. else
  807. return true;
  808. }
  809. static int __run_threaded_test(struct dmatest_info *info)
  810. {
  811. dma_cap_mask_t mask;
  812. struct dma_chan *chan;
  813. struct dmatest_params *params = &info->params;
  814. int err = 0;
  815. dma_cap_zero(mask);
  816. dma_cap_set(DMA_MEMCPY, mask);
  817. for (;;) {
  818. chan = dma_request_channel(mask, filter, params);
  819. if (chan) {
  820. err = dmatest_add_channel(info, chan);
  821. if (err) {
  822. dma_release_channel(chan);
  823. break; /* add_channel failed, punt */
  824. }
  825. } else
  826. break; /* no more channels available */
  827. if (params->max_channels &&
  828. info->nr_channels >= params->max_channels)
  829. break; /* we have all we need */
  830. }
  831. return err;
  832. }
  833. #ifndef MODULE
  834. static int run_threaded_test(struct dmatest_info *info)
  835. {
  836. int ret;
  837. mutex_lock(&info->lock);
  838. ret = __run_threaded_test(info);
  839. mutex_unlock(&info->lock);
  840. return ret;
  841. }
  842. #endif
  843. static void __stop_threaded_test(struct dmatest_info *info)
  844. {
  845. struct dmatest_chan *dtc, *_dtc;
  846. struct dma_chan *chan;
  847. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  848. list_del(&dtc->node);
  849. chan = dtc->chan;
  850. dmatest_cleanup_channel(dtc);
  851. pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
  852. dma_release_channel(chan);
  853. }
  854. info->nr_channels = 0;
  855. }
  856. static void stop_threaded_test(struct dmatest_info *info)
  857. {
  858. mutex_lock(&info->lock);
  859. __stop_threaded_test(info);
  860. mutex_unlock(&info->lock);
  861. }
  862. static int __restart_threaded_test(struct dmatest_info *info, bool run)
  863. {
  864. struct dmatest_params *params = &info->params;
  865. /* Stop any running test first */
  866. __stop_threaded_test(info);
  867. if (run == false)
  868. return 0;
  869. /* Clear results from previous run */
  870. result_free(info, NULL);
  871. /* Copy test parameters */
  872. params->buf_size = test_buf_size;
  873. strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
  874. strlcpy(params->device, strim(test_device), sizeof(params->device));
  875. params->threads_per_chan = threads_per_chan;
  876. params->max_channels = max_channels;
  877. params->iterations = iterations;
  878. params->xor_sources = xor_sources;
  879. params->pq_sources = pq_sources;
  880. params->timeout = timeout;
  881. /* Run test with new parameters */
  882. return __run_threaded_test(info);
  883. }
  884. static bool __is_threaded_test_run(struct dmatest_info *info)
  885. {
  886. struct dmatest_chan *dtc;
  887. list_for_each_entry(dtc, &info->channels, node) {
  888. struct dmatest_thread *thread;
  889. list_for_each_entry(thread, &dtc->threads, node) {
  890. if (!thread->done)
  891. return true;
  892. }
  893. }
  894. return false;
  895. }
  896. static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
  897. size_t count, loff_t *ppos)
  898. {
  899. struct dmatest_info *info = file->private_data;
  900. char buf[3];
  901. mutex_lock(&info->lock);
  902. if (__is_threaded_test_run(info)) {
  903. buf[0] = 'Y';
  904. } else {
  905. __stop_threaded_test(info);
  906. buf[0] = 'N';
  907. }
  908. mutex_unlock(&info->lock);
  909. buf[1] = '\n';
  910. buf[2] = 0x00;
  911. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  912. }
  913. static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
  914. size_t count, loff_t *ppos)
  915. {
  916. struct dmatest_info *info = file->private_data;
  917. char buf[16];
  918. bool bv;
  919. int ret = 0;
  920. if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
  921. return -EFAULT;
  922. if (strtobool(buf, &bv) == 0) {
  923. mutex_lock(&info->lock);
  924. if (__is_threaded_test_run(info))
  925. ret = -EBUSY;
  926. else
  927. ret = __restart_threaded_test(info, bv);
  928. mutex_unlock(&info->lock);
  929. }
  930. return ret ? ret : count;
  931. }
  932. static const struct file_operations dtf_run_fops = {
  933. .read = dtf_read_run,
  934. .write = dtf_write_run,
  935. .open = simple_open,
  936. .llseek = default_llseek,
  937. };
  938. static int dtf_results_show(struct seq_file *sf, void *data)
  939. {
  940. struct dmatest_info *info = sf->private;
  941. struct dmatest_result *result;
  942. struct dmatest_thread_result *tr;
  943. unsigned int i;
  944. mutex_lock(&info->results_lock);
  945. list_for_each_entry(result, &info->results, node) {
  946. list_for_each_entry(tr, &result->results, node) {
  947. seq_printf(sf, "%s\n",
  948. thread_result_get(result->name, tr));
  949. if (tr->type == DMATEST_ET_VERIFY_BUF) {
  950. for (i = 0; i < tr->vr->error_count; i++) {
  951. seq_printf(sf, "\t%s\n",
  952. verify_result_get_one(tr->vr, i));
  953. }
  954. }
  955. }
  956. }
  957. mutex_unlock(&info->results_lock);
  958. return 0;
  959. }
  960. static int dtf_results_open(struct inode *inode, struct file *file)
  961. {
  962. return single_open(file, dtf_results_show, inode->i_private);
  963. }
  964. static const struct file_operations dtf_results_fops = {
  965. .open = dtf_results_open,
  966. .read = seq_read,
  967. .llseek = seq_lseek,
  968. .release = single_release,
  969. };
  970. static int dmatest_register_dbgfs(struct dmatest_info *info)
  971. {
  972. struct dentry *d;
  973. d = debugfs_create_dir("dmatest", NULL);
  974. if (IS_ERR(d))
  975. return PTR_ERR(d);
  976. if (!d)
  977. goto err_root;
  978. info->root = d;
  979. /* Run or stop threaded test */
  980. debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
  981. &dtf_run_fops);
  982. /* Results of test in progress */
  983. debugfs_create_file("results", S_IRUGO, info->root, info,
  984. &dtf_results_fops);
  985. return 0;
  986. err_root:
  987. pr_err("dmatest: Failed to initialize debugfs\n");
  988. return -ENOMEM;
  989. }
  990. static int __init dmatest_init(void)
  991. {
  992. struct dmatest_info *info = &test_info;
  993. int ret;
  994. memset(info, 0, sizeof(*info));
  995. mutex_init(&info->lock);
  996. INIT_LIST_HEAD(&info->channels);
  997. mutex_init(&info->results_lock);
  998. INIT_LIST_HEAD(&info->results);
  999. ret = dmatest_register_dbgfs(info);
  1000. if (ret)
  1001. return ret;
  1002. #ifdef MODULE
  1003. return 0;
  1004. #else
  1005. return run_threaded_test(info);
  1006. #endif
  1007. }
  1008. /* when compiled-in wait for drivers to load first */
  1009. late_initcall(dmatest_init);
  1010. static void __exit dmatest_exit(void)
  1011. {
  1012. struct dmatest_info *info = &test_info;
  1013. debugfs_remove_recursive(info->root);
  1014. stop_threaded_test(info);
  1015. result_free(info, NULL);
  1016. }
  1017. module_exit(dmatest_exit);
  1018. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  1019. MODULE_LICENSE("GPL v2");