dmatest.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /*
  2. * DMA Engine test module
  3. *
  4. * Copyright (C) 2007 Atmel Corporation
  5. * Copyright (C) 2013 Intel Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/freezer.h>
  15. #include <linux/init.h>
  16. #include <linux/kthread.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/random.h>
  20. #include <linux/slab.h>
  21. #include <linux/wait.h>
  22. #include <linux/ctype.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/seq_file.h>
  26. static unsigned int test_buf_size = 16384;
  27. module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
  28. MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  29. static char test_channel[20];
  30. module_param_string(channel, test_channel, sizeof(test_channel),
  31. S_IRUGO | S_IWUSR);
  32. MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  33. static char test_device[20];
  34. module_param_string(device, test_device, sizeof(test_device),
  35. S_IRUGO | S_IWUSR);
  36. MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  37. static unsigned int threads_per_chan = 1;
  38. module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
  39. MODULE_PARM_DESC(threads_per_chan,
  40. "Number of threads to start per channel (default: 1)");
  41. static unsigned int max_channels;
  42. module_param(max_channels, uint, S_IRUGO | S_IWUSR);
  43. MODULE_PARM_DESC(max_channels,
  44. "Maximum number of channels to use (default: all)");
  45. static unsigned int iterations;
  46. module_param(iterations, uint, S_IRUGO | S_IWUSR);
  47. MODULE_PARM_DESC(iterations,
  48. "Iterations before stopping test (default: infinite)");
  49. static unsigned int xor_sources = 3;
  50. module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
  51. MODULE_PARM_DESC(xor_sources,
  52. "Number of xor source buffers (default: 3)");
  53. static unsigned int pq_sources = 3;
  54. module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
  55. MODULE_PARM_DESC(pq_sources,
  56. "Number of p+q source buffers (default: 3)");
  57. static int timeout = 3000;
  58. module_param(timeout, uint, S_IRUGO | S_IWUSR);
  59. MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  60. "Pass -1 for infinite timeout");
  61. /* Maximum amount of mismatched bytes in buffer to print */
  62. #define MAX_ERROR_COUNT 32
  63. /*
  64. * Initialization patterns. All bytes in the source buffer has bit 7
  65. * set, all bytes in the destination buffer has bit 7 cleared.
  66. *
  67. * Bit 6 is set for all bytes which are to be copied by the DMA
  68. * engine. Bit 5 is set for all bytes which are to be overwritten by
  69. * the DMA engine.
  70. *
  71. * The remaining bits are the inverse of a counter which increments by
  72. * one for each byte address.
  73. */
  74. #define PATTERN_SRC 0x80
  75. #define PATTERN_DST 0x00
  76. #define PATTERN_COPY 0x40
  77. #define PATTERN_OVERWRITE 0x20
  78. #define PATTERN_COUNT_MASK 0x1f
  79. enum dmatest_error_type {
  80. DMATEST_ET_OK,
  81. DMATEST_ET_MAP_SRC,
  82. DMATEST_ET_MAP_DST,
  83. DMATEST_ET_PREP,
  84. DMATEST_ET_SUBMIT,
  85. DMATEST_ET_TIMEOUT,
  86. DMATEST_ET_DMA_ERROR,
  87. DMATEST_ET_DMA_IN_PROGRESS,
  88. DMATEST_ET_VERIFY,
  89. DMATEST_ET_VERIFY_BUF,
  90. };
  91. struct dmatest_verify_buffer {
  92. unsigned int index;
  93. u8 expected;
  94. u8 actual;
  95. };
  96. struct dmatest_verify_result {
  97. unsigned int error_count;
  98. struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
  99. u8 pattern;
  100. bool is_srcbuf;
  101. };
  102. struct dmatest_thread_result {
  103. struct list_head node;
  104. unsigned int n;
  105. unsigned int src_off;
  106. unsigned int dst_off;
  107. unsigned int len;
  108. enum dmatest_error_type type;
  109. union {
  110. unsigned long data;
  111. dma_cookie_t cookie;
  112. enum dma_status status;
  113. int error;
  114. struct dmatest_verify_result *vr;
  115. };
  116. };
  117. struct dmatest_result {
  118. struct list_head node;
  119. char *name;
  120. struct list_head results;
  121. };
  122. struct dmatest_info;
  123. struct dmatest_thread {
  124. struct list_head node;
  125. struct dmatest_info *info;
  126. struct task_struct *task;
  127. struct dma_chan *chan;
  128. u8 **srcs;
  129. u8 **dsts;
  130. enum dma_transaction_type type;
  131. bool done;
  132. };
  133. struct dmatest_chan {
  134. struct list_head node;
  135. struct dma_chan *chan;
  136. struct list_head threads;
  137. };
  138. /**
  139. * struct dmatest_params - test parameters.
  140. * @buf_size: size of the memcpy test buffer
  141. * @channel: bus ID of the channel to test
  142. * @device: bus ID of the DMA Engine to test
  143. * @threads_per_chan: number of threads to start per channel
  144. * @max_channels: maximum number of channels to use
  145. * @iterations: iterations before stopping test
  146. * @xor_sources: number of xor source buffers
  147. * @pq_sources: number of p+q source buffers
  148. * @timeout: transfer timeout in msec, -1 for infinite timeout
  149. */
  150. struct dmatest_params {
  151. unsigned int buf_size;
  152. char channel[20];
  153. char device[20];
  154. unsigned int threads_per_chan;
  155. unsigned int max_channels;
  156. unsigned int iterations;
  157. unsigned int xor_sources;
  158. unsigned int pq_sources;
  159. int timeout;
  160. };
  161. /**
  162. * struct dmatest_info - test information.
  163. * @params: test parameters
  164. * @lock: access protection to the fields of this structure
  165. */
  166. struct dmatest_info {
  167. /* Test parameters */
  168. struct dmatest_params params;
  169. /* Internal state */
  170. struct list_head channels;
  171. unsigned int nr_channels;
  172. struct mutex lock;
  173. /* debugfs related stuff */
  174. struct dentry *root;
  175. /* Test results */
  176. struct list_head results;
  177. struct mutex results_lock;
  178. };
  179. static struct dmatest_info test_info;
  180. static bool dmatest_match_channel(struct dmatest_params *params,
  181. struct dma_chan *chan)
  182. {
  183. if (params->channel[0] == '\0')
  184. return true;
  185. return strcmp(dma_chan_name(chan), params->channel) == 0;
  186. }
  187. static bool dmatest_match_device(struct dmatest_params *params,
  188. struct dma_device *device)
  189. {
  190. if (params->device[0] == '\0')
  191. return true;
  192. return strcmp(dev_name(device->dev), params->device) == 0;
  193. }
  194. static unsigned long dmatest_random(void)
  195. {
  196. unsigned long buf;
  197. get_random_bytes(&buf, sizeof(buf));
  198. return buf;
  199. }
  200. static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
  201. unsigned int buf_size)
  202. {
  203. unsigned int i;
  204. u8 *buf;
  205. for (; (buf = *bufs); bufs++) {
  206. for (i = 0; i < start; i++)
  207. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  208. for ( ; i < start + len; i++)
  209. buf[i] = PATTERN_SRC | PATTERN_COPY
  210. | (~i & PATTERN_COUNT_MASK);
  211. for ( ; i < buf_size; i++)
  212. buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
  213. buf++;
  214. }
  215. }
  216. static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
  217. unsigned int buf_size)
  218. {
  219. unsigned int i;
  220. u8 *buf;
  221. for (; (buf = *bufs); bufs++) {
  222. for (i = 0; i < start; i++)
  223. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  224. for ( ; i < start + len; i++)
  225. buf[i] = PATTERN_DST | PATTERN_OVERWRITE
  226. | (~i & PATTERN_COUNT_MASK);
  227. for ( ; i < buf_size; i++)
  228. buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
  229. }
  230. }
  231. static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
  232. unsigned int start, unsigned int end, unsigned int counter,
  233. u8 pattern, bool is_srcbuf)
  234. {
  235. unsigned int i;
  236. unsigned int error_count = 0;
  237. u8 actual;
  238. u8 expected;
  239. u8 *buf;
  240. unsigned int counter_orig = counter;
  241. struct dmatest_verify_buffer *vb;
  242. for (; (buf = *bufs); bufs++) {
  243. counter = counter_orig;
  244. for (i = start; i < end; i++) {
  245. actual = buf[i];
  246. expected = pattern | (~counter & PATTERN_COUNT_MASK);
  247. if (actual != expected) {
  248. if (error_count < MAX_ERROR_COUNT && vr) {
  249. vb = &vr->data[error_count];
  250. vb->index = i;
  251. vb->expected = expected;
  252. vb->actual = actual;
  253. }
  254. error_count++;
  255. }
  256. counter++;
  257. }
  258. }
  259. if (error_count > MAX_ERROR_COUNT)
  260. pr_warning("%s: %u errors suppressed\n",
  261. current->comm, error_count - MAX_ERROR_COUNT);
  262. return error_count;
  263. }
  264. /* poor man's completion - we want to use wait_event_freezable() on it */
  265. struct dmatest_done {
  266. bool done;
  267. wait_queue_head_t *wait;
  268. };
  269. static void dmatest_callback(void *arg)
  270. {
  271. struct dmatest_done *done = arg;
  272. done->done = true;
  273. wake_up_all(done->wait);
  274. }
  275. static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
  276. unsigned int count)
  277. {
  278. while (count--)
  279. dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
  280. }
  281. static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
  282. unsigned int count)
  283. {
  284. while (count--)
  285. dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
  286. }
  287. static unsigned int min_odd(unsigned int x, unsigned int y)
  288. {
  289. unsigned int val = min(x, y);
  290. return val % 2 ? val : val - 1;
  291. }
  292. static char *verify_result_get_one(struct dmatest_verify_result *vr,
  293. unsigned int i)
  294. {
  295. struct dmatest_verify_buffer *vb = &vr->data[i];
  296. u8 diff = vb->actual ^ vr->pattern;
  297. static char buf[512];
  298. char *msg;
  299. if (vr->is_srcbuf)
  300. msg = "srcbuf overwritten!";
  301. else if ((vr->pattern & PATTERN_COPY)
  302. && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
  303. msg = "dstbuf not copied!";
  304. else if (diff & PATTERN_SRC)
  305. msg = "dstbuf was copied!";
  306. else
  307. msg = "dstbuf mismatch!";
  308. snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
  309. vb->index, vb->expected, vb->actual);
  310. return buf;
  311. }
  312. static char *thread_result_get(const char *name,
  313. struct dmatest_thread_result *tr)
  314. {
  315. static const char * const messages[] = {
  316. [DMATEST_ET_OK] = "No errors",
  317. [DMATEST_ET_MAP_SRC] = "src mapping error",
  318. [DMATEST_ET_MAP_DST] = "dst mapping error",
  319. [DMATEST_ET_PREP] = "prep error",
  320. [DMATEST_ET_SUBMIT] = "submit error",
  321. [DMATEST_ET_TIMEOUT] = "test timed out",
  322. [DMATEST_ET_DMA_ERROR] =
  323. "got completion callback (DMA_ERROR)",
  324. [DMATEST_ET_DMA_IN_PROGRESS] =
  325. "got completion callback (DMA_IN_PROGRESS)",
  326. [DMATEST_ET_VERIFY] = "errors",
  327. [DMATEST_ET_VERIFY_BUF] = "verify errors",
  328. };
  329. static char buf[512];
  330. snprintf(buf, sizeof(buf) - 1,
  331. "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
  332. name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
  333. tr->len, tr->data);
  334. return buf;
  335. }
  336. static int thread_result_add(struct dmatest_info *info,
  337. struct dmatest_result *r, enum dmatest_error_type type,
  338. unsigned int n, unsigned int src_off, unsigned int dst_off,
  339. unsigned int len, unsigned long data)
  340. {
  341. struct dmatest_thread_result *tr;
  342. tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  343. if (!tr)
  344. return -ENOMEM;
  345. tr->type = type;
  346. tr->n = n;
  347. tr->src_off = src_off;
  348. tr->dst_off = dst_off;
  349. tr->len = len;
  350. tr->data = data;
  351. mutex_lock(&info->results_lock);
  352. list_add_tail(&tr->node, &r->results);
  353. mutex_unlock(&info->results_lock);
  354. if (tr->type == DMATEST_ET_OK)
  355. pr_debug("%s\n", thread_result_get(r->name, tr));
  356. else
  357. pr_warn("%s\n", thread_result_get(r->name, tr));
  358. return 0;
  359. }
  360. static unsigned int verify_result_add(struct dmatest_info *info,
  361. struct dmatest_result *r, unsigned int n,
  362. unsigned int src_off, unsigned int dst_off, unsigned int len,
  363. u8 **bufs, int whence, unsigned int counter, u8 pattern,
  364. bool is_srcbuf)
  365. {
  366. struct dmatest_verify_result *vr;
  367. unsigned int error_count;
  368. unsigned int buf_off = is_srcbuf ? src_off : dst_off;
  369. unsigned int start, end;
  370. if (whence < 0) {
  371. start = 0;
  372. end = buf_off;
  373. } else if (whence > 0) {
  374. start = buf_off + len;
  375. end = info->params.buf_size;
  376. } else {
  377. start = buf_off;
  378. end = buf_off + len;
  379. }
  380. vr = kmalloc(sizeof(*vr), GFP_KERNEL);
  381. if (!vr) {
  382. pr_warn("dmatest: No memory to store verify result\n");
  383. return dmatest_verify(NULL, bufs, start, end, counter, pattern,
  384. is_srcbuf);
  385. }
  386. vr->pattern = pattern;
  387. vr->is_srcbuf = is_srcbuf;
  388. error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
  389. is_srcbuf);
  390. if (error_count) {
  391. vr->error_count = error_count;
  392. thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
  393. dst_off, len, (unsigned long)vr);
  394. return error_count;
  395. }
  396. kfree(vr);
  397. return 0;
  398. }
  399. static void result_free(struct dmatest_info *info, const char *name)
  400. {
  401. struct dmatest_result *r, *_r;
  402. mutex_lock(&info->results_lock);
  403. list_for_each_entry_safe(r, _r, &info->results, node) {
  404. struct dmatest_thread_result *tr, *_tr;
  405. if (name && strcmp(r->name, name))
  406. continue;
  407. list_for_each_entry_safe(tr, _tr, &r->results, node) {
  408. if (tr->type == DMATEST_ET_VERIFY_BUF)
  409. kfree(tr->vr);
  410. list_del(&tr->node);
  411. kfree(tr);
  412. }
  413. kfree(r->name);
  414. list_del(&r->node);
  415. kfree(r);
  416. }
  417. mutex_unlock(&info->results_lock);
  418. }
  419. static struct dmatest_result *result_init(struct dmatest_info *info,
  420. const char *name)
  421. {
  422. struct dmatest_result *r;
  423. r = kzalloc(sizeof(*r), GFP_KERNEL);
  424. if (r) {
  425. r->name = kstrdup(name, GFP_KERNEL);
  426. INIT_LIST_HEAD(&r->results);
  427. mutex_lock(&info->results_lock);
  428. list_add_tail(&r->node, &info->results);
  429. mutex_unlock(&info->results_lock);
  430. }
  431. return r;
  432. }
  433. /*
  434. * This function repeatedly tests DMA transfers of various lengths and
  435. * offsets for a given operation type until it is told to exit by
  436. * kthread_stop(). There may be multiple threads running this function
  437. * in parallel for a single channel, and there may be multiple channels
  438. * being tested in parallel.
  439. *
  440. * Before each test, the source and destination buffer is initialized
  441. * with a known pattern. This pattern is different depending on
  442. * whether it's in an area which is supposed to be copied or
  443. * overwritten, and different in the source and destination buffers.
  444. * So if the DMA engine doesn't copy exactly what we tell it to copy,
  445. * we'll notice.
  446. */
  447. static int dmatest_func(void *data)
  448. {
  449. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
  450. struct dmatest_thread *thread = data;
  451. struct dmatest_done done = { .wait = &done_wait };
  452. struct dmatest_info *info;
  453. struct dmatest_params *params;
  454. struct dma_chan *chan;
  455. struct dma_device *dev;
  456. const char *thread_name;
  457. unsigned int src_off, dst_off, len;
  458. unsigned int error_count;
  459. unsigned int failed_tests = 0;
  460. unsigned int total_tests = 0;
  461. dma_cookie_t cookie;
  462. enum dma_status status;
  463. enum dma_ctrl_flags flags;
  464. u8 *pq_coefs = NULL;
  465. int ret;
  466. int src_cnt;
  467. int dst_cnt;
  468. int i;
  469. struct dmatest_result *result;
  470. thread_name = current->comm;
  471. set_freezable();
  472. ret = -ENOMEM;
  473. smp_rmb();
  474. info = thread->info;
  475. params = &info->params;
  476. chan = thread->chan;
  477. dev = chan->device;
  478. if (thread->type == DMA_MEMCPY)
  479. src_cnt = dst_cnt = 1;
  480. else if (thread->type == DMA_XOR) {
  481. /* force odd to ensure dst = src */
  482. src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
  483. dst_cnt = 1;
  484. } else if (thread->type == DMA_PQ) {
  485. /* force odd to ensure dst = src */
  486. src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
  487. dst_cnt = 2;
  488. pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
  489. if (!pq_coefs)
  490. goto err_thread_type;
  491. for (i = 0; i < src_cnt; i++)
  492. pq_coefs[i] = 1;
  493. } else
  494. goto err_thread_type;
  495. result = result_init(info, thread_name);
  496. if (!result)
  497. goto err_srcs;
  498. thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
  499. if (!thread->srcs)
  500. goto err_srcs;
  501. for (i = 0; i < src_cnt; i++) {
  502. thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
  503. if (!thread->srcs[i])
  504. goto err_srcbuf;
  505. }
  506. thread->srcs[i] = NULL;
  507. thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
  508. if (!thread->dsts)
  509. goto err_dsts;
  510. for (i = 0; i < dst_cnt; i++) {
  511. thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
  512. if (!thread->dsts[i])
  513. goto err_dstbuf;
  514. }
  515. thread->dsts[i] = NULL;
  516. set_user_nice(current, 10);
  517. /*
  518. * src buffers are freed by the DMAEngine code with dma_unmap_single()
  519. * dst buffers are freed by ourselves below
  520. */
  521. flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
  522. | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
  523. while (!kthread_should_stop()
  524. && !(params->iterations && total_tests >= params->iterations)) {
  525. struct dma_async_tx_descriptor *tx = NULL;
  526. dma_addr_t dma_srcs[src_cnt];
  527. dma_addr_t dma_dsts[dst_cnt];
  528. u8 align = 0;
  529. total_tests++;
  530. /* honor alignment restrictions */
  531. if (thread->type == DMA_MEMCPY)
  532. align = dev->copy_align;
  533. else if (thread->type == DMA_XOR)
  534. align = dev->xor_align;
  535. else if (thread->type == DMA_PQ)
  536. align = dev->pq_align;
  537. if (1 << align > params->buf_size) {
  538. pr_err("%u-byte buffer too small for %d-byte alignment\n",
  539. params->buf_size, 1 << align);
  540. break;
  541. }
  542. len = dmatest_random() % params->buf_size + 1;
  543. len = (len >> align) << align;
  544. if (!len)
  545. len = 1 << align;
  546. src_off = dmatest_random() % (params->buf_size - len + 1);
  547. dst_off = dmatest_random() % (params->buf_size - len + 1);
  548. src_off = (src_off >> align) << align;
  549. dst_off = (dst_off >> align) << align;
  550. dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
  551. dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
  552. for (i = 0; i < src_cnt; i++) {
  553. u8 *buf = thread->srcs[i] + src_off;
  554. dma_srcs[i] = dma_map_single(dev->dev, buf, len,
  555. DMA_TO_DEVICE);
  556. ret = dma_mapping_error(dev->dev, dma_srcs[i]);
  557. if (ret) {
  558. unmap_src(dev->dev, dma_srcs, len, i);
  559. thread_result_add(info, result,
  560. DMATEST_ET_MAP_SRC,
  561. total_tests, src_off, dst_off,
  562. len, ret);
  563. failed_tests++;
  564. continue;
  565. }
  566. }
  567. /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
  568. for (i = 0; i < dst_cnt; i++) {
  569. dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
  570. params->buf_size,
  571. DMA_BIDIRECTIONAL);
  572. ret = dma_mapping_error(dev->dev, dma_dsts[i]);
  573. if (ret) {
  574. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  575. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  576. i);
  577. thread_result_add(info, result,
  578. DMATEST_ET_MAP_DST,
  579. total_tests, src_off, dst_off,
  580. len, ret);
  581. failed_tests++;
  582. continue;
  583. }
  584. }
  585. if (thread->type == DMA_MEMCPY)
  586. tx = dev->device_prep_dma_memcpy(chan,
  587. dma_dsts[0] + dst_off,
  588. dma_srcs[0], len,
  589. flags);
  590. else if (thread->type == DMA_XOR)
  591. tx = dev->device_prep_dma_xor(chan,
  592. dma_dsts[0] + dst_off,
  593. dma_srcs, src_cnt,
  594. len, flags);
  595. else if (thread->type == DMA_PQ) {
  596. dma_addr_t dma_pq[dst_cnt];
  597. for (i = 0; i < dst_cnt; i++)
  598. dma_pq[i] = dma_dsts[i] + dst_off;
  599. tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
  600. src_cnt, pq_coefs,
  601. len, flags);
  602. }
  603. if (!tx) {
  604. unmap_src(dev->dev, dma_srcs, len, src_cnt);
  605. unmap_dst(dev->dev, dma_dsts, params->buf_size,
  606. dst_cnt);
  607. thread_result_add(info, result, DMATEST_ET_PREP,
  608. total_tests, src_off, dst_off,
  609. len, 0);
  610. msleep(100);
  611. failed_tests++;
  612. continue;
  613. }
  614. done.done = false;
  615. tx->callback = dmatest_callback;
  616. tx->callback_param = &done;
  617. cookie = tx->tx_submit(tx);
  618. if (dma_submit_error(cookie)) {
  619. thread_result_add(info, result, DMATEST_ET_SUBMIT,
  620. total_tests, src_off, dst_off,
  621. len, cookie);
  622. msleep(100);
  623. failed_tests++;
  624. continue;
  625. }
  626. dma_async_issue_pending(chan);
  627. wait_event_freezable_timeout(done_wait, done.done,
  628. msecs_to_jiffies(params->timeout));
  629. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  630. if (!done.done) {
  631. /*
  632. * We're leaving the timed out dma operation with
  633. * dangling pointer to done_wait. To make this
  634. * correct, we'll need to allocate wait_done for
  635. * each test iteration and perform "who's gonna
  636. * free it this time?" dancing. For now, just
  637. * leave it dangling.
  638. */
  639. thread_result_add(info, result, DMATEST_ET_TIMEOUT,
  640. total_tests, src_off, dst_off,
  641. len, 0);
  642. failed_tests++;
  643. continue;
  644. } else if (status != DMA_SUCCESS) {
  645. enum dmatest_error_type type = (status == DMA_ERROR) ?
  646. DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
  647. thread_result_add(info, result, type,
  648. total_tests, src_off, dst_off,
  649. len, status);
  650. failed_tests++;
  651. continue;
  652. }
  653. /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
  654. unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
  655. error_count = 0;
  656. pr_debug("%s: verifying source buffer...\n", thread_name);
  657. error_count += verify_result_add(info, result, total_tests,
  658. src_off, dst_off, len, thread->srcs, -1,
  659. 0, PATTERN_SRC, true);
  660. error_count += verify_result_add(info, result, total_tests,
  661. src_off, dst_off, len, thread->srcs, 0,
  662. src_off, PATTERN_SRC | PATTERN_COPY, true);
  663. error_count += verify_result_add(info, result, total_tests,
  664. src_off, dst_off, len, thread->srcs, 1,
  665. src_off + len, PATTERN_SRC, true);
  666. pr_debug("%s: verifying dest buffer...\n", thread_name);
  667. error_count += verify_result_add(info, result, total_tests,
  668. src_off, dst_off, len, thread->dsts, -1,
  669. 0, PATTERN_DST, false);
  670. error_count += verify_result_add(info, result, total_tests,
  671. src_off, dst_off, len, thread->dsts, 0,
  672. src_off, PATTERN_SRC | PATTERN_COPY, false);
  673. error_count += verify_result_add(info, result, total_tests,
  674. src_off, dst_off, len, thread->dsts, 1,
  675. dst_off + len, PATTERN_DST, false);
  676. if (error_count) {
  677. thread_result_add(info, result, DMATEST_ET_VERIFY,
  678. total_tests, src_off, dst_off,
  679. len, error_count);
  680. failed_tests++;
  681. } else {
  682. thread_result_add(info, result, DMATEST_ET_OK,
  683. total_tests, src_off, dst_off,
  684. len, 0);
  685. }
  686. }
  687. ret = 0;
  688. for (i = 0; thread->dsts[i]; i++)
  689. kfree(thread->dsts[i]);
  690. err_dstbuf:
  691. kfree(thread->dsts);
  692. err_dsts:
  693. for (i = 0; thread->srcs[i]; i++)
  694. kfree(thread->srcs[i]);
  695. err_srcbuf:
  696. kfree(thread->srcs);
  697. err_srcs:
  698. kfree(pq_coefs);
  699. err_thread_type:
  700. pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
  701. thread_name, total_tests, failed_tests, ret);
  702. /* terminate all transfers on specified channels */
  703. if (ret)
  704. dmaengine_terminate_all(chan);
  705. thread->done = true;
  706. if (params->iterations > 0)
  707. while (!kthread_should_stop()) {
  708. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
  709. interruptible_sleep_on(&wait_dmatest_exit);
  710. }
  711. return ret;
  712. }
  713. static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
  714. {
  715. struct dmatest_thread *thread;
  716. struct dmatest_thread *_thread;
  717. int ret;
  718. list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
  719. ret = kthread_stop(thread->task);
  720. pr_debug("dmatest: thread %s exited with status %d\n",
  721. thread->task->comm, ret);
  722. list_del(&thread->node);
  723. kfree(thread);
  724. }
  725. /* terminate all transfers on specified channels */
  726. dmaengine_terminate_all(dtc->chan);
  727. kfree(dtc);
  728. }
  729. static int dmatest_add_threads(struct dmatest_info *info,
  730. struct dmatest_chan *dtc, enum dma_transaction_type type)
  731. {
  732. struct dmatest_params *params = &info->params;
  733. struct dmatest_thread *thread;
  734. struct dma_chan *chan = dtc->chan;
  735. char *op;
  736. unsigned int i;
  737. if (type == DMA_MEMCPY)
  738. op = "copy";
  739. else if (type == DMA_XOR)
  740. op = "xor";
  741. else if (type == DMA_PQ)
  742. op = "pq";
  743. else
  744. return -EINVAL;
  745. for (i = 0; i < params->threads_per_chan; i++) {
  746. thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
  747. if (!thread) {
  748. pr_warning("dmatest: No memory for %s-%s%u\n",
  749. dma_chan_name(chan), op, i);
  750. break;
  751. }
  752. thread->info = info;
  753. thread->chan = dtc->chan;
  754. thread->type = type;
  755. smp_wmb();
  756. thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
  757. dma_chan_name(chan), op, i);
  758. if (IS_ERR(thread->task)) {
  759. pr_warning("dmatest: Failed to run thread %s-%s%u\n",
  760. dma_chan_name(chan), op, i);
  761. kfree(thread);
  762. break;
  763. }
  764. /* srcbuf and dstbuf are allocated by the thread itself */
  765. list_add_tail(&thread->node, &dtc->threads);
  766. }
  767. return i;
  768. }
  769. static int dmatest_add_channel(struct dmatest_info *info,
  770. struct dma_chan *chan)
  771. {
  772. struct dmatest_chan *dtc;
  773. struct dma_device *dma_dev = chan->device;
  774. unsigned int thread_count = 0;
  775. int cnt;
  776. dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
  777. if (!dtc) {
  778. pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
  779. return -ENOMEM;
  780. }
  781. dtc->chan = chan;
  782. INIT_LIST_HEAD(&dtc->threads);
  783. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  784. cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
  785. thread_count += cnt > 0 ? cnt : 0;
  786. }
  787. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  788. cnt = dmatest_add_threads(info, dtc, DMA_XOR);
  789. thread_count += cnt > 0 ? cnt : 0;
  790. }
  791. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  792. cnt = dmatest_add_threads(info, dtc, DMA_PQ);
  793. thread_count += cnt > 0 ? cnt : 0;
  794. }
  795. pr_info("dmatest: Started %u threads using %s\n",
  796. thread_count, dma_chan_name(chan));
  797. list_add_tail(&dtc->node, &info->channels);
  798. info->nr_channels++;
  799. return 0;
  800. }
  801. static bool filter(struct dma_chan *chan, void *param)
  802. {
  803. struct dmatest_params *params = param;
  804. if (!dmatest_match_channel(params, chan) ||
  805. !dmatest_match_device(params, chan->device))
  806. return false;
  807. else
  808. return true;
  809. }
  810. static int __run_threaded_test(struct dmatest_info *info)
  811. {
  812. dma_cap_mask_t mask;
  813. struct dma_chan *chan;
  814. struct dmatest_params *params = &info->params;
  815. int err = 0;
  816. dma_cap_zero(mask);
  817. dma_cap_set(DMA_MEMCPY, mask);
  818. for (;;) {
  819. chan = dma_request_channel(mask, filter, params);
  820. if (chan) {
  821. err = dmatest_add_channel(info, chan);
  822. if (err) {
  823. dma_release_channel(chan);
  824. break; /* add_channel failed, punt */
  825. }
  826. } else
  827. break; /* no more channels available */
  828. if (params->max_channels &&
  829. info->nr_channels >= params->max_channels)
  830. break; /* we have all we need */
  831. }
  832. return err;
  833. }
  834. #ifndef MODULE
  835. static int run_threaded_test(struct dmatest_info *info)
  836. {
  837. int ret;
  838. mutex_lock(&info->lock);
  839. ret = __run_threaded_test(info);
  840. mutex_unlock(&info->lock);
  841. return ret;
  842. }
  843. #endif
  844. static void __stop_threaded_test(struct dmatest_info *info)
  845. {
  846. struct dmatest_chan *dtc, *_dtc;
  847. struct dma_chan *chan;
  848. list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
  849. list_del(&dtc->node);
  850. chan = dtc->chan;
  851. dmatest_cleanup_channel(dtc);
  852. pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
  853. dma_release_channel(chan);
  854. }
  855. info->nr_channels = 0;
  856. }
  857. static void stop_threaded_test(struct dmatest_info *info)
  858. {
  859. mutex_lock(&info->lock);
  860. __stop_threaded_test(info);
  861. mutex_unlock(&info->lock);
  862. }
  863. static int __restart_threaded_test(struct dmatest_info *info, bool run)
  864. {
  865. struct dmatest_params *params = &info->params;
  866. /* Stop any running test first */
  867. __stop_threaded_test(info);
  868. if (run == false)
  869. return 0;
  870. /* Clear results from previous run */
  871. result_free(info, NULL);
  872. /* Copy test parameters */
  873. params->buf_size = test_buf_size;
  874. strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
  875. strlcpy(params->device, strim(test_device), sizeof(params->device));
  876. params->threads_per_chan = threads_per_chan;
  877. params->max_channels = max_channels;
  878. params->iterations = iterations;
  879. params->xor_sources = xor_sources;
  880. params->pq_sources = pq_sources;
  881. params->timeout = timeout;
  882. /* Run test with new parameters */
  883. return __run_threaded_test(info);
  884. }
  885. static bool __is_threaded_test_run(struct dmatest_info *info)
  886. {
  887. struct dmatest_chan *dtc;
  888. list_for_each_entry(dtc, &info->channels, node) {
  889. struct dmatest_thread *thread;
  890. list_for_each_entry(thread, &dtc->threads, node) {
  891. if (!thread->done)
  892. return true;
  893. }
  894. }
  895. return false;
  896. }
  897. static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
  898. size_t count, loff_t *ppos)
  899. {
  900. struct dmatest_info *info = file->private_data;
  901. char buf[3];
  902. mutex_lock(&info->lock);
  903. if (__is_threaded_test_run(info)) {
  904. buf[0] = 'Y';
  905. } else {
  906. __stop_threaded_test(info);
  907. buf[0] = 'N';
  908. }
  909. mutex_unlock(&info->lock);
  910. buf[1] = '\n';
  911. buf[2] = 0x00;
  912. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  913. }
  914. static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
  915. size_t count, loff_t *ppos)
  916. {
  917. struct dmatest_info *info = file->private_data;
  918. char buf[16];
  919. bool bv;
  920. int ret = 0;
  921. if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
  922. return -EFAULT;
  923. if (strtobool(buf, &bv) == 0) {
  924. mutex_lock(&info->lock);
  925. if (__is_threaded_test_run(info))
  926. ret = -EBUSY;
  927. else
  928. ret = __restart_threaded_test(info, bv);
  929. mutex_unlock(&info->lock);
  930. }
  931. return ret ? ret : count;
  932. }
  933. static const struct file_operations dtf_run_fops = {
  934. .read = dtf_read_run,
  935. .write = dtf_write_run,
  936. .open = simple_open,
  937. .llseek = default_llseek,
  938. };
  939. static int dtf_results_show(struct seq_file *sf, void *data)
  940. {
  941. struct dmatest_info *info = sf->private;
  942. struct dmatest_result *result;
  943. struct dmatest_thread_result *tr;
  944. unsigned int i;
  945. mutex_lock(&info->results_lock);
  946. list_for_each_entry(result, &info->results, node) {
  947. list_for_each_entry(tr, &result->results, node) {
  948. seq_printf(sf, "%s\n",
  949. thread_result_get(result->name, tr));
  950. if (tr->type == DMATEST_ET_VERIFY_BUF) {
  951. for (i = 0; i < tr->vr->error_count; i++) {
  952. seq_printf(sf, "\t%s\n",
  953. verify_result_get_one(tr->vr, i));
  954. }
  955. }
  956. }
  957. }
  958. mutex_unlock(&info->results_lock);
  959. return 0;
  960. }
  961. static int dtf_results_open(struct inode *inode, struct file *file)
  962. {
  963. return single_open(file, dtf_results_show, inode->i_private);
  964. }
  965. static const struct file_operations dtf_results_fops = {
  966. .open = dtf_results_open,
  967. .read = seq_read,
  968. .llseek = seq_lseek,
  969. .release = single_release,
  970. };
  971. static int dmatest_register_dbgfs(struct dmatest_info *info)
  972. {
  973. struct dentry *d;
  974. d = debugfs_create_dir("dmatest", NULL);
  975. if (IS_ERR(d))
  976. return PTR_ERR(d);
  977. if (!d)
  978. goto err_root;
  979. info->root = d;
  980. /* Run or stop threaded test */
  981. debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
  982. &dtf_run_fops);
  983. /* Results of test in progress */
  984. debugfs_create_file("results", S_IRUGO, info->root, info,
  985. &dtf_results_fops);
  986. return 0;
  987. err_root:
  988. pr_err("dmatest: Failed to initialize debugfs\n");
  989. return -ENOMEM;
  990. }
  991. static int __init dmatest_init(void)
  992. {
  993. struct dmatest_info *info = &test_info;
  994. int ret;
  995. memset(info, 0, sizeof(*info));
  996. mutex_init(&info->lock);
  997. INIT_LIST_HEAD(&info->channels);
  998. mutex_init(&info->results_lock);
  999. INIT_LIST_HEAD(&info->results);
  1000. ret = dmatest_register_dbgfs(info);
  1001. if (ret)
  1002. return ret;
  1003. #ifdef MODULE
  1004. return 0;
  1005. #else
  1006. return run_threaded_test(info);
  1007. #endif
  1008. }
  1009. /* when compiled-in wait for drivers to load first */
  1010. late_initcall(dmatest_init);
  1011. static void __exit dmatest_exit(void)
  1012. {
  1013. struct dmatest_info *info = &test_info;
  1014. debugfs_remove_recursive(info->root);
  1015. stop_threaded_test(info);
  1016. result_free(info, NULL);
  1017. }
  1018. module_exit(dmatest_exit);
  1019. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  1020. MODULE_LICENSE("GPL v2");