dmaengine.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*
  2. * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 2 of the License, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59
  16. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called COPYING.
  20. */
  21. /*
  22. * This code implements the DMA subsystem. It provides a HW-neutral interface
  23. * for other kernel code to use asynchronous memory copy capabilities,
  24. * if present, and allows different HW DMA drivers to register as providing
  25. * this capability.
  26. *
  27. * Due to the fact we are accelerating what is already a relatively fast
  28. * operation, the code goes to great lengths to avoid additional overhead,
  29. * such as locking.
  30. *
  31. * LOCKING:
  32. *
  33. * The subsystem keeps a global list of dma_device structs it is protected by a
  34. * mutex, dma_list_mutex.
  35. *
  36. * A subsystem can get access to a channel by calling dmaengine_get() followed
  37. * by dma_find_channel(), or if it has need for an exclusive channel it can call
  38. * dma_request_channel(). Once a channel is allocated a reference is taken
  39. * against its corresponding driver to disable removal.
  40. *
  41. * Each device has a channels list, which runs unlocked but is never modified
  42. * once the device is registered, it's just setup by the driver.
  43. *
  44. * See Documentation/dmaengine.txt for more details
  45. */
  46. #include <linux/init.h>
  47. #include <linux/module.h>
  48. #include <linux/mm.h>
  49. #include <linux/device.h>
  50. #include <linux/dmaengine.h>
  51. #include <linux/hardirq.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/percpu.h>
  54. #include <linux/rcupdate.h>
  55. #include <linux/mutex.h>
  56. #include <linux/jiffies.h>
  57. #include <linux/rculist.h>
  58. #include <linux/idr.h>
  59. static DEFINE_MUTEX(dma_list_mutex);
  60. static LIST_HEAD(dma_device_list);
  61. static long dmaengine_ref_count;
  62. static struct idr dma_idr;
  63. /* --- sysfs implementation --- */
  64. /**
  65. * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  66. * @dev - device node
  67. *
  68. * Must be called under dma_list_mutex
  69. */
  70. static struct dma_chan *dev_to_dma_chan(struct device *dev)
  71. {
  72. struct dma_chan_dev *chan_dev;
  73. chan_dev = container_of(dev, typeof(*chan_dev), device);
  74. return chan_dev->chan;
  75. }
  76. static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
  77. {
  78. struct dma_chan *chan;
  79. unsigned long count = 0;
  80. int i;
  81. int err;
  82. mutex_lock(&dma_list_mutex);
  83. chan = dev_to_dma_chan(dev);
  84. if (chan) {
  85. for_each_possible_cpu(i)
  86. count += per_cpu_ptr(chan->local, i)->memcpy_count;
  87. err = sprintf(buf, "%lu\n", count);
  88. } else
  89. err = -ENODEV;
  90. mutex_unlock(&dma_list_mutex);
  91. return err;
  92. }
  93. static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
  94. char *buf)
  95. {
  96. struct dma_chan *chan;
  97. unsigned long count = 0;
  98. int i;
  99. int err;
  100. mutex_lock(&dma_list_mutex);
  101. chan = dev_to_dma_chan(dev);
  102. if (chan) {
  103. for_each_possible_cpu(i)
  104. count += per_cpu_ptr(chan->local, i)->bytes_transferred;
  105. err = sprintf(buf, "%lu\n", count);
  106. } else
  107. err = -ENODEV;
  108. mutex_unlock(&dma_list_mutex);
  109. return err;
  110. }
  111. static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
  112. {
  113. struct dma_chan *chan;
  114. int err;
  115. mutex_lock(&dma_list_mutex);
  116. chan = dev_to_dma_chan(dev);
  117. if (chan)
  118. err = sprintf(buf, "%d\n", chan->client_count);
  119. else
  120. err = -ENODEV;
  121. mutex_unlock(&dma_list_mutex);
  122. return err;
  123. }
  124. static struct device_attribute dma_attrs[] = {
  125. __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
  126. __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
  127. __ATTR(in_use, S_IRUGO, show_in_use, NULL),
  128. __ATTR_NULL
  129. };
  130. static void chan_dev_release(struct device *dev)
  131. {
  132. struct dma_chan_dev *chan_dev;
  133. chan_dev = container_of(dev, typeof(*chan_dev), device);
  134. if (atomic_dec_and_test(chan_dev->idr_ref)) {
  135. mutex_lock(&dma_list_mutex);
  136. idr_remove(&dma_idr, chan_dev->dev_id);
  137. mutex_unlock(&dma_list_mutex);
  138. kfree(chan_dev->idr_ref);
  139. }
  140. kfree(chan_dev);
  141. }
  142. static struct class dma_devclass = {
  143. .name = "dma",
  144. .dev_attrs = dma_attrs,
  145. .dev_release = chan_dev_release,
  146. };
  147. /* --- client and device registration --- */
  148. #define dma_device_satisfies_mask(device, mask) \
  149. __dma_device_satisfies_mask((device), &(mask))
  150. static int
  151. __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
  152. {
  153. dma_cap_mask_t has;
  154. bitmap_and(has.bits, want->bits, device->cap_mask.bits,
  155. DMA_TX_TYPE_END);
  156. return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
  157. }
  158. static struct module *dma_chan_to_owner(struct dma_chan *chan)
  159. {
  160. return chan->device->dev->driver->owner;
  161. }
  162. /**
  163. * balance_ref_count - catch up the channel reference count
  164. * @chan - channel to balance ->client_count versus dmaengine_ref_count
  165. *
  166. * balance_ref_count must be called under dma_list_mutex
  167. */
  168. static void balance_ref_count(struct dma_chan *chan)
  169. {
  170. struct module *owner = dma_chan_to_owner(chan);
  171. while (chan->client_count < dmaengine_ref_count) {
  172. __module_get(owner);
  173. chan->client_count++;
  174. }
  175. }
  176. /**
  177. * dma_chan_get - try to grab a dma channel's parent driver module
  178. * @chan - channel to grab
  179. *
  180. * Must be called under dma_list_mutex
  181. */
  182. static int dma_chan_get(struct dma_chan *chan)
  183. {
  184. int err = -ENODEV;
  185. struct module *owner = dma_chan_to_owner(chan);
  186. if (chan->client_count) {
  187. __module_get(owner);
  188. err = 0;
  189. } else if (try_module_get(owner))
  190. err = 0;
  191. if (err == 0)
  192. chan->client_count++;
  193. /* allocate upon first client reference */
  194. if (chan->client_count == 1 && err == 0) {
  195. int desc_cnt = chan->device->device_alloc_chan_resources(chan);
  196. if (desc_cnt < 0) {
  197. err = desc_cnt;
  198. chan->client_count = 0;
  199. module_put(owner);
  200. } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
  201. balance_ref_count(chan);
  202. }
  203. return err;
  204. }
  205. /**
  206. * dma_chan_put - drop a reference to a dma channel's parent driver module
  207. * @chan - channel to release
  208. *
  209. * Must be called under dma_list_mutex
  210. */
  211. static void dma_chan_put(struct dma_chan *chan)
  212. {
  213. if (!chan->client_count)
  214. return; /* this channel failed alloc_chan_resources */
  215. chan->client_count--;
  216. module_put(dma_chan_to_owner(chan));
  217. if (chan->client_count == 0)
  218. chan->device->device_free_chan_resources(chan);
  219. }
  220. enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  221. {
  222. enum dma_status status;
  223. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  224. dma_async_issue_pending(chan);
  225. do {
  226. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  227. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  228. printk(KERN_ERR "dma_sync_wait_timeout!\n");
  229. return DMA_ERROR;
  230. }
  231. } while (status == DMA_IN_PROGRESS);
  232. return status;
  233. }
  234. EXPORT_SYMBOL(dma_sync_wait);
  235. /**
  236. * dma_cap_mask_all - enable iteration over all operation types
  237. */
  238. static dma_cap_mask_t dma_cap_mask_all;
  239. /**
  240. * dma_chan_tbl_ent - tracks channel allocations per core/operation
  241. * @chan - associated channel for this entry
  242. */
  243. struct dma_chan_tbl_ent {
  244. struct dma_chan *chan;
  245. };
  246. /**
  247. * channel_table - percpu lookup table for memory-to-memory offload providers
  248. */
  249. static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
  250. static int __init dma_channel_table_init(void)
  251. {
  252. enum dma_transaction_type cap;
  253. int err = 0;
  254. bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
  255. /* 'interrupt', 'private', and 'slave' are channel capabilities,
  256. * but are not associated with an operation so they do not need
  257. * an entry in the channel_table
  258. */
  259. clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
  260. clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
  261. clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
  262. for_each_dma_cap_mask(cap, dma_cap_mask_all) {
  263. channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
  264. if (!channel_table[cap]) {
  265. err = -ENOMEM;
  266. break;
  267. }
  268. }
  269. if (err) {
  270. pr_err("dmaengine: initialization failure\n");
  271. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  272. if (channel_table[cap])
  273. free_percpu(channel_table[cap]);
  274. }
  275. return err;
  276. }
  277. arch_initcall(dma_channel_table_init);
  278. /**
  279. * dma_find_channel - find a channel to carry out the operation
  280. * @tx_type: transaction type
  281. */
  282. struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
  283. {
  284. struct dma_chan *chan;
  285. int cpu;
  286. cpu = get_cpu();
  287. chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
  288. put_cpu();
  289. return chan;
  290. }
  291. EXPORT_SYMBOL(dma_find_channel);
  292. /**
  293. * dma_issue_pending_all - flush all pending operations across all channels
  294. */
  295. void dma_issue_pending_all(void)
  296. {
  297. struct dma_device *device;
  298. struct dma_chan *chan;
  299. rcu_read_lock();
  300. list_for_each_entry_rcu(device, &dma_device_list, global_node) {
  301. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  302. continue;
  303. list_for_each_entry(chan, &device->channels, device_node)
  304. if (chan->client_count)
  305. device->device_issue_pending(chan);
  306. }
  307. rcu_read_unlock();
  308. }
  309. EXPORT_SYMBOL(dma_issue_pending_all);
  310. /**
  311. * nth_chan - returns the nth channel of the given capability
  312. * @cap: capability to match
  313. * @n: nth channel desired
  314. *
  315. * Defaults to returning the channel with the desired capability and the
  316. * lowest reference count when 'n' cannot be satisfied. Must be called
  317. * under dma_list_mutex.
  318. */
  319. static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
  320. {
  321. struct dma_device *device;
  322. struct dma_chan *chan;
  323. struct dma_chan *ret = NULL;
  324. struct dma_chan *min = NULL;
  325. list_for_each_entry(device, &dma_device_list, global_node) {
  326. if (!dma_has_cap(cap, device->cap_mask) ||
  327. dma_has_cap(DMA_PRIVATE, device->cap_mask))
  328. continue;
  329. list_for_each_entry(chan, &device->channels, device_node) {
  330. if (!chan->client_count)
  331. continue;
  332. if (!min)
  333. min = chan;
  334. else if (chan->table_count < min->table_count)
  335. min = chan;
  336. if (n-- == 0) {
  337. ret = chan;
  338. break; /* done */
  339. }
  340. }
  341. if (ret)
  342. break; /* done */
  343. }
  344. if (!ret)
  345. ret = min;
  346. if (ret)
  347. ret->table_count++;
  348. return ret;
  349. }
  350. /**
  351. * dma_channel_rebalance - redistribute the available channels
  352. *
  353. * Optimize for cpu isolation (each cpu gets a dedicated channel for an
  354. * operation type) in the SMP case, and operation isolation (avoid
  355. * multi-tasking channels) in the non-SMP case. Must be called under
  356. * dma_list_mutex.
  357. */
  358. static void dma_channel_rebalance(void)
  359. {
  360. struct dma_chan *chan;
  361. struct dma_device *device;
  362. int cpu;
  363. int cap;
  364. int n;
  365. /* undo the last distribution */
  366. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  367. for_each_possible_cpu(cpu)
  368. per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
  369. list_for_each_entry(device, &dma_device_list, global_node) {
  370. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  371. continue;
  372. list_for_each_entry(chan, &device->channels, device_node)
  373. chan->table_count = 0;
  374. }
  375. /* don't populate the channel_table if no clients are available */
  376. if (!dmaengine_ref_count)
  377. return;
  378. /* redistribute available channels */
  379. n = 0;
  380. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  381. for_each_online_cpu(cpu) {
  382. if (num_possible_cpus() > 1)
  383. chan = nth_chan(cap, n++);
  384. else
  385. chan = nth_chan(cap, -1);
  386. per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
  387. }
  388. }
  389. static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
  390. dma_filter_fn fn, void *fn_param)
  391. {
  392. struct dma_chan *chan;
  393. if (!__dma_device_satisfies_mask(dev, mask)) {
  394. pr_debug("%s: wrong capabilities\n", __func__);
  395. return NULL;
  396. }
  397. /* devices with multiple channels need special handling as we need to
  398. * ensure that all channels are either private or public.
  399. */
  400. if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
  401. list_for_each_entry(chan, &dev->channels, device_node) {
  402. /* some channels are already publicly allocated */
  403. if (chan->client_count)
  404. return NULL;
  405. }
  406. list_for_each_entry(chan, &dev->channels, device_node) {
  407. if (chan->client_count) {
  408. pr_debug("%s: %s busy\n",
  409. __func__, dma_chan_name(chan));
  410. continue;
  411. }
  412. if (fn && !fn(chan, fn_param)) {
  413. pr_debug("%s: %s filter said false\n",
  414. __func__, dma_chan_name(chan));
  415. continue;
  416. }
  417. return chan;
  418. }
  419. return NULL;
  420. }
  421. /**
  422. * dma_request_channel - try to allocate an exclusive channel
  423. * @mask: capabilities that the channel must satisfy
  424. * @fn: optional callback to disposition available channels
  425. * @fn_param: opaque parameter to pass to dma_filter_fn
  426. */
  427. struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
  428. {
  429. struct dma_device *device, *_d;
  430. struct dma_chan *chan = NULL;
  431. int err;
  432. /* Find a channel */
  433. mutex_lock(&dma_list_mutex);
  434. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  435. chan = private_candidate(mask, device, fn, fn_param);
  436. if (chan) {
  437. /* Found a suitable channel, try to grab, prep, and
  438. * return it. We first set DMA_PRIVATE to disable
  439. * balance_ref_count as this channel will not be
  440. * published in the general-purpose allocator
  441. */
  442. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  443. device->privatecnt++;
  444. err = dma_chan_get(chan);
  445. if (err == -ENODEV) {
  446. pr_debug("%s: %s module removed\n", __func__,
  447. dma_chan_name(chan));
  448. list_del_rcu(&device->global_node);
  449. } else if (err)
  450. pr_err("dmaengine: failed to get %s: (%d)\n",
  451. dma_chan_name(chan), err);
  452. else
  453. break;
  454. if (--device->privatecnt == 0)
  455. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  456. chan->private = NULL;
  457. chan = NULL;
  458. }
  459. }
  460. mutex_unlock(&dma_list_mutex);
  461. pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
  462. chan ? dma_chan_name(chan) : NULL);
  463. return chan;
  464. }
  465. EXPORT_SYMBOL_GPL(__dma_request_channel);
  466. void dma_release_channel(struct dma_chan *chan)
  467. {
  468. mutex_lock(&dma_list_mutex);
  469. WARN_ONCE(chan->client_count != 1,
  470. "chan reference count %d != 1\n", chan->client_count);
  471. dma_chan_put(chan);
  472. /* drop PRIVATE cap enabled by __dma_request_channel() */
  473. if (--chan->device->privatecnt == 0)
  474. dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
  475. chan->private = NULL;
  476. mutex_unlock(&dma_list_mutex);
  477. }
  478. EXPORT_SYMBOL_GPL(dma_release_channel);
  479. /**
  480. * dmaengine_get - register interest in dma_channels
  481. */
  482. void dmaengine_get(void)
  483. {
  484. struct dma_device *device, *_d;
  485. struct dma_chan *chan;
  486. int err;
  487. mutex_lock(&dma_list_mutex);
  488. dmaengine_ref_count++;
  489. /* try to grab channels */
  490. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  491. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  492. continue;
  493. list_for_each_entry(chan, &device->channels, device_node) {
  494. err = dma_chan_get(chan);
  495. if (err == -ENODEV) {
  496. /* module removed before we could use it */
  497. list_del_rcu(&device->global_node);
  498. break;
  499. } else if (err)
  500. pr_err("dmaengine: failed to get %s: (%d)\n",
  501. dma_chan_name(chan), err);
  502. }
  503. }
  504. /* if this is the first reference and there were channels
  505. * waiting we need to rebalance to get those channels
  506. * incorporated into the channel table
  507. */
  508. if (dmaengine_ref_count == 1)
  509. dma_channel_rebalance();
  510. mutex_unlock(&dma_list_mutex);
  511. }
  512. EXPORT_SYMBOL(dmaengine_get);
  513. /**
  514. * dmaengine_put - let dma drivers be removed when ref_count == 0
  515. */
  516. void dmaengine_put(void)
  517. {
  518. struct dma_device *device;
  519. struct dma_chan *chan;
  520. mutex_lock(&dma_list_mutex);
  521. dmaengine_ref_count--;
  522. BUG_ON(dmaengine_ref_count < 0);
  523. /* drop channel references */
  524. list_for_each_entry(device, &dma_device_list, global_node) {
  525. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  526. continue;
  527. list_for_each_entry(chan, &device->channels, device_node)
  528. dma_chan_put(chan);
  529. }
  530. mutex_unlock(&dma_list_mutex);
  531. }
  532. EXPORT_SYMBOL(dmaengine_put);
  533. static bool device_has_all_tx_types(struct dma_device *device)
  534. {
  535. /* A device that satisfies this test has channels that will never cause
  536. * an async_tx channel switch event as all possible operation types can
  537. * be handled.
  538. */
  539. #ifdef CONFIG_ASYNC_TX_DMA
  540. if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  541. return false;
  542. #endif
  543. #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
  544. if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
  545. return false;
  546. #endif
  547. #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
  548. if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
  549. return false;
  550. #endif
  551. #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
  552. if (!dma_has_cap(DMA_XOR, device->cap_mask))
  553. return false;
  554. #endif
  555. #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
  556. if (!dma_has_cap(DMA_PQ, device->cap_mask))
  557. return false;
  558. #endif
  559. return true;
  560. }
  561. static int get_dma_id(struct dma_device *device)
  562. {
  563. int rc;
  564. idr_retry:
  565. if (!idr_pre_get(&dma_idr, GFP_KERNEL))
  566. return -ENOMEM;
  567. mutex_lock(&dma_list_mutex);
  568. rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
  569. mutex_unlock(&dma_list_mutex);
  570. if (rc == -EAGAIN)
  571. goto idr_retry;
  572. else if (rc != 0)
  573. return rc;
  574. return 0;
  575. }
  576. /**
  577. * dma_async_device_register - registers DMA devices found
  578. * @device: &dma_device
  579. */
  580. int dma_async_device_register(struct dma_device *device)
  581. {
  582. int chancnt = 0, rc;
  583. struct dma_chan* chan;
  584. atomic_t *idr_ref;
  585. if (!device)
  586. return -ENODEV;
  587. /* validate device routines */
  588. BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
  589. !device->device_prep_dma_memcpy);
  590. BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
  591. !device->device_prep_dma_xor);
  592. BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
  593. !device->device_prep_dma_xor_val);
  594. BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
  595. !device->device_prep_dma_pq);
  596. BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
  597. !device->device_prep_dma_pq_val);
  598. BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
  599. !device->device_prep_dma_memset);
  600. BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
  601. !device->device_prep_dma_interrupt);
  602. BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
  603. !device->device_prep_slave_sg);
  604. BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
  605. !device->device_terminate_all);
  606. BUG_ON(!device->device_alloc_chan_resources);
  607. BUG_ON(!device->device_free_chan_resources);
  608. BUG_ON(!device->device_is_tx_complete);
  609. BUG_ON(!device->device_issue_pending);
  610. BUG_ON(!device->dev);
  611. /* note: this only matters in the
  612. * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
  613. */
  614. if (device_has_all_tx_types(device))
  615. dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
  616. idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
  617. if (!idr_ref)
  618. return -ENOMEM;
  619. rc = get_dma_id(device);
  620. if (rc != 0) {
  621. kfree(idr_ref);
  622. return rc;
  623. }
  624. atomic_set(idr_ref, 0);
  625. /* represent channels in sysfs. Probably want devs too */
  626. list_for_each_entry(chan, &device->channels, device_node) {
  627. rc = -ENOMEM;
  628. chan->local = alloc_percpu(typeof(*chan->local));
  629. if (chan->local == NULL)
  630. goto err_out;
  631. chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
  632. if (chan->dev == NULL) {
  633. free_percpu(chan->local);
  634. chan->local = NULL;
  635. goto err_out;
  636. }
  637. chan->chan_id = chancnt++;
  638. chan->dev->device.class = &dma_devclass;
  639. chan->dev->device.parent = device->dev;
  640. chan->dev->chan = chan;
  641. chan->dev->idr_ref = idr_ref;
  642. chan->dev->dev_id = device->dev_id;
  643. atomic_inc(idr_ref);
  644. dev_set_name(&chan->dev->device, "dma%dchan%d",
  645. device->dev_id, chan->chan_id);
  646. rc = device_register(&chan->dev->device);
  647. if (rc) {
  648. free_percpu(chan->local);
  649. chan->local = NULL;
  650. kfree(chan->dev);
  651. atomic_dec(idr_ref);
  652. goto err_out;
  653. }
  654. chan->client_count = 0;
  655. }
  656. device->chancnt = chancnt;
  657. mutex_lock(&dma_list_mutex);
  658. /* take references on public channels */
  659. if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
  660. list_for_each_entry(chan, &device->channels, device_node) {
  661. /* if clients are already waiting for channels we need
  662. * to take references on their behalf
  663. */
  664. if (dma_chan_get(chan) == -ENODEV) {
  665. /* note we can only get here for the first
  666. * channel as the remaining channels are
  667. * guaranteed to get a reference
  668. */
  669. rc = -ENODEV;
  670. mutex_unlock(&dma_list_mutex);
  671. goto err_out;
  672. }
  673. }
  674. list_add_tail_rcu(&device->global_node, &dma_device_list);
  675. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  676. device->privatecnt++; /* Always private */
  677. dma_channel_rebalance();
  678. mutex_unlock(&dma_list_mutex);
  679. return 0;
  680. err_out:
  681. /* if we never registered a channel just release the idr */
  682. if (atomic_read(idr_ref) == 0) {
  683. mutex_lock(&dma_list_mutex);
  684. idr_remove(&dma_idr, device->dev_id);
  685. mutex_unlock(&dma_list_mutex);
  686. kfree(idr_ref);
  687. return rc;
  688. }
  689. list_for_each_entry(chan, &device->channels, device_node) {
  690. if (chan->local == NULL)
  691. continue;
  692. mutex_lock(&dma_list_mutex);
  693. chan->dev->chan = NULL;
  694. mutex_unlock(&dma_list_mutex);
  695. device_unregister(&chan->dev->device);
  696. free_percpu(chan->local);
  697. }
  698. return rc;
  699. }
  700. EXPORT_SYMBOL(dma_async_device_register);
  701. /**
  702. * dma_async_device_unregister - unregister a DMA device
  703. * @device: &dma_device
  704. *
  705. * This routine is called by dma driver exit routines, dmaengine holds module
  706. * references to prevent it being called while channels are in use.
  707. */
  708. void dma_async_device_unregister(struct dma_device *device)
  709. {
  710. struct dma_chan *chan;
  711. mutex_lock(&dma_list_mutex);
  712. list_del_rcu(&device->global_node);
  713. dma_channel_rebalance();
  714. mutex_unlock(&dma_list_mutex);
  715. list_for_each_entry(chan, &device->channels, device_node) {
  716. WARN_ONCE(chan->client_count,
  717. "%s called while %d clients hold a reference\n",
  718. __func__, chan->client_count);
  719. mutex_lock(&dma_list_mutex);
  720. chan->dev->chan = NULL;
  721. mutex_unlock(&dma_list_mutex);
  722. device_unregister(&chan->dev->device);
  723. }
  724. }
  725. EXPORT_SYMBOL(dma_async_device_unregister);
  726. /**
  727. * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
  728. * @chan: DMA channel to offload copy to
  729. * @dest: destination address (virtual)
  730. * @src: source address (virtual)
  731. * @len: length
  732. *
  733. * Both @dest and @src must be mappable to a bus address according to the
  734. * DMA mapping API rules for streaming mappings.
  735. * Both @dest and @src must stay memory resident (kernel memory or locked
  736. * user space pages).
  737. */
  738. dma_cookie_t
  739. dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
  740. void *src, size_t len)
  741. {
  742. struct dma_device *dev = chan->device;
  743. struct dma_async_tx_descriptor *tx;
  744. dma_addr_t dma_dest, dma_src;
  745. dma_cookie_t cookie;
  746. int cpu;
  747. unsigned long flags;
  748. dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
  749. dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
  750. flags = DMA_CTRL_ACK |
  751. DMA_COMPL_SRC_UNMAP_SINGLE |
  752. DMA_COMPL_DEST_UNMAP_SINGLE;
  753. tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
  754. if (!tx) {
  755. dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
  756. dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
  757. return -ENOMEM;
  758. }
  759. tx->callback = NULL;
  760. cookie = tx->tx_submit(tx);
  761. cpu = get_cpu();
  762. per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
  763. per_cpu_ptr(chan->local, cpu)->memcpy_count++;
  764. put_cpu();
  765. return cookie;
  766. }
  767. EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
  768. /**
  769. * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
  770. * @chan: DMA channel to offload copy to
  771. * @page: destination page
  772. * @offset: offset in page to copy to
  773. * @kdata: source address (virtual)
  774. * @len: length
  775. *
  776. * Both @page/@offset and @kdata must be mappable to a bus address according
  777. * to the DMA mapping API rules for streaming mappings.
  778. * Both @page/@offset and @kdata must stay memory resident (kernel memory or
  779. * locked user space pages)
  780. */
  781. dma_cookie_t
  782. dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
  783. unsigned int offset, void *kdata, size_t len)
  784. {
  785. struct dma_device *dev = chan->device;
  786. struct dma_async_tx_descriptor *tx;
  787. dma_addr_t dma_dest, dma_src;
  788. dma_cookie_t cookie;
  789. int cpu;
  790. unsigned long flags;
  791. dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
  792. dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
  793. flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
  794. tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
  795. if (!tx) {
  796. dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
  797. dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
  798. return -ENOMEM;
  799. }
  800. tx->callback = NULL;
  801. cookie = tx->tx_submit(tx);
  802. cpu = get_cpu();
  803. per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
  804. per_cpu_ptr(chan->local, cpu)->memcpy_count++;
  805. put_cpu();
  806. return cookie;
  807. }
  808. EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
  809. /**
  810. * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
  811. * @chan: DMA channel to offload copy to
  812. * @dest_pg: destination page
  813. * @dest_off: offset in page to copy to
  814. * @src_pg: source page
  815. * @src_off: offset in page to copy from
  816. * @len: length
  817. *
  818. * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
  819. * address according to the DMA mapping API rules for streaming mappings.
  820. * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
  821. * (kernel memory or locked user space pages).
  822. */
  823. dma_cookie_t
  824. dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
  825. unsigned int dest_off, struct page *src_pg, unsigned int src_off,
  826. size_t len)
  827. {
  828. struct dma_device *dev = chan->device;
  829. struct dma_async_tx_descriptor *tx;
  830. dma_addr_t dma_dest, dma_src;
  831. dma_cookie_t cookie;
  832. int cpu;
  833. unsigned long flags;
  834. dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
  835. dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
  836. DMA_FROM_DEVICE);
  837. flags = DMA_CTRL_ACK;
  838. tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
  839. if (!tx) {
  840. dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
  841. dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
  842. return -ENOMEM;
  843. }
  844. tx->callback = NULL;
  845. cookie = tx->tx_submit(tx);
  846. cpu = get_cpu();
  847. per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
  848. per_cpu_ptr(chan->local, cpu)->memcpy_count++;
  849. put_cpu();
  850. return cookie;
  851. }
  852. EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
  853. void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  854. struct dma_chan *chan)
  855. {
  856. tx->chan = chan;
  857. spin_lock_init(&tx->lock);
  858. }
  859. EXPORT_SYMBOL(dma_async_tx_descriptor_init);
  860. /* dma_wait_for_async_tx - spin wait for a transaction to complete
  861. * @tx: in-flight transaction to wait on
  862. */
  863. enum dma_status
  864. dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  865. {
  866. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  867. if (!tx)
  868. return DMA_SUCCESS;
  869. while (tx->cookie == -EBUSY) {
  870. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  871. pr_err("%s timeout waiting for descriptor submission\n",
  872. __func__);
  873. return DMA_ERROR;
  874. }
  875. cpu_relax();
  876. }
  877. return dma_sync_wait(tx->chan, tx->cookie);
  878. }
  879. EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
  880. /* dma_run_dependencies - helper routine for dma drivers to process
  881. * (start) dependent operations on their target channel
  882. * @tx: transaction with dependencies
  883. */
  884. void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
  885. {
  886. struct dma_async_tx_descriptor *dep = tx->next;
  887. struct dma_async_tx_descriptor *dep_next;
  888. struct dma_chan *chan;
  889. if (!dep)
  890. return;
  891. /* we'll submit tx->next now, so clear the link */
  892. tx->next = NULL;
  893. chan = dep->chan;
  894. /* keep submitting up until a channel switch is detected
  895. * in that case we will be called again as a result of
  896. * processing the interrupt from async_tx_channel_switch
  897. */
  898. for (; dep; dep = dep_next) {
  899. spin_lock_bh(&dep->lock);
  900. dep->parent = NULL;
  901. dep_next = dep->next;
  902. if (dep_next && dep_next->chan == chan)
  903. dep->next = NULL; /* ->next will be submitted */
  904. else
  905. dep_next = NULL; /* submit current dep and terminate */
  906. spin_unlock_bh(&dep->lock);
  907. dep->tx_submit(dep);
  908. }
  909. chan->device->device_issue_pending(chan);
  910. }
  911. EXPORT_SYMBOL_GPL(dma_run_dependencies);
  912. static int __init dma_bus_init(void)
  913. {
  914. idr_init(&dma_idr);
  915. mutex_init(&dma_list_mutex);
  916. return class_register(&dma_devclass);
  917. }
  918. arch_initcall(dma_bus_init);