dmaengine.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 2 of the License, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59
  16. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called COPYING.
  20. */
  21. /*
  22. * This code implements the DMA subsystem. It provides a HW-neutral interface
  23. * for other kernel code to use asynchronous memory copy capabilities,
  24. * if present, and allows different HW DMA drivers to register as providing
  25. * this capability.
  26. *
  27. * Due to the fact we are accelerating what is already a relatively fast
  28. * operation, the code goes to great lengths to avoid additional overhead,
  29. * such as locking.
  30. *
  31. * LOCKING:
  32. *
  33. * The subsystem keeps two global lists, dma_device_list and dma_client_list.
  34. * Both of these are protected by a mutex, dma_list_mutex.
  35. *
  36. * Each device has a channels list, which runs unlocked but is never modified
  37. * once the device is registered, it's just setup by the driver.
  38. *
  39. * Each client is responsible for keeping track of the channels it uses. See
  40. * the definition of dma_event_callback in dmaengine.h.
  41. *
  42. * Each device has a kref, which is initialized to 1 when the device is
  43. * registered. A kref_get is done for each device registered. When the
  44. * device is released, the corresponding kref_put is done in the release
  45. * method. Every time one of the device's channels is allocated to a client,
  46. * a kref_get occurs. When the channel is freed, the corresponding kref_put
  47. * happens. The device's release function does a completion, so
  48. * unregister_device does a remove event, device_unregister, a kref_put
  49. * for the first reference, then waits on the completion for all other
  50. * references to finish.
  51. *
  52. * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
  53. * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
  54. * signals that it wants to use a channel, and dma_chan_put is called when
  55. * a channel is removed or a client using it is unregistered. A client can
  56. * take extra references per outstanding transaction, as is the case with
  57. * the NET DMA client. The release function does a kref_put on the device.
  58. * -ChrisL, DanW
  59. */
  60. #include <linux/init.h>
  61. #include <linux/module.h>
  62. #include <linux/mm.h>
  63. #include <linux/device.h>
  64. #include <linux/dmaengine.h>
  65. #include <linux/hardirq.h>
  66. #include <linux/spinlock.h>
  67. #include <linux/percpu.h>
  68. #include <linux/rcupdate.h>
  69. #include <linux/mutex.h>
  70. #include <linux/jiffies.h>
  71. static DEFINE_MUTEX(dma_list_mutex);
  72. static LIST_HEAD(dma_device_list);
  73. static LIST_HEAD(dma_client_list);
  74. /* --- sysfs implementation --- */
  75. static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
  76. {
  77. struct dma_chan *chan = to_dma_chan(dev);
  78. unsigned long count = 0;
  79. int i;
  80. for_each_possible_cpu(i)
  81. count += per_cpu_ptr(chan->local, i)->memcpy_count;
  82. return sprintf(buf, "%lu\n", count);
  83. }
  84. static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
  85. char *buf)
  86. {
  87. struct dma_chan *chan = to_dma_chan(dev);
  88. unsigned long count = 0;
  89. int i;
  90. for_each_possible_cpu(i)
  91. count += per_cpu_ptr(chan->local, i)->bytes_transferred;
  92. return sprintf(buf, "%lu\n", count);
  93. }
  94. static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
  95. {
  96. struct dma_chan *chan = to_dma_chan(dev);
  97. int in_use = 0;
  98. if (unlikely(chan->slow_ref) &&
  99. atomic_read(&chan->refcount.refcount) > 1)
  100. in_use = 1;
  101. else {
  102. if (local_read(&(per_cpu_ptr(chan->local,
  103. get_cpu())->refcount)) > 0)
  104. in_use = 1;
  105. put_cpu();
  106. }
  107. return sprintf(buf, "%d\n", in_use);
  108. }
  109. static struct device_attribute dma_attrs[] = {
  110. __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
  111. __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
  112. __ATTR(in_use, S_IRUGO, show_in_use, NULL),
  113. __ATTR_NULL
  114. };
  115. static void dma_async_device_cleanup(struct kref *kref);
  116. static void dma_dev_release(struct device *dev)
  117. {
  118. struct dma_chan *chan = to_dma_chan(dev);
  119. kref_put(&chan->device->refcount, dma_async_device_cleanup);
  120. }
  121. static struct class dma_devclass = {
  122. .name = "dma",
  123. .dev_attrs = dma_attrs,
  124. .dev_release = dma_dev_release,
  125. };
  126. /* --- client and device registration --- */
  127. #define dma_chan_satisfies_mask(chan, mask) \
  128. __dma_chan_satisfies_mask((chan), &(mask))
  129. static int
  130. __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
  131. {
  132. dma_cap_mask_t has;
  133. bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
  134. DMA_TX_TYPE_END);
  135. return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
  136. }
  137. /**
  138. * dma_client_chan_alloc - try to allocate channels to a client
  139. * @client: &dma_client
  140. *
  141. * Called with dma_list_mutex held.
  142. */
  143. static void dma_client_chan_alloc(struct dma_client *client)
  144. {
  145. struct dma_device *device;
  146. struct dma_chan *chan;
  147. int desc; /* allocated descriptor count */
  148. enum dma_state_client ack;
  149. /* Find a channel */
  150. list_for_each_entry(device, &dma_device_list, global_node) {
  151. /* Does the client require a specific DMA controller? */
  152. if (client->slave && client->slave->dma_dev
  153. && client->slave->dma_dev != device->dev)
  154. continue;
  155. list_for_each_entry(chan, &device->channels, device_node) {
  156. if (!dma_chan_satisfies_mask(chan, client->cap_mask))
  157. continue;
  158. desc = chan->device->device_alloc_chan_resources(
  159. chan, client);
  160. if (desc >= 0) {
  161. ack = client->event_callback(client,
  162. chan,
  163. DMA_RESOURCE_AVAILABLE);
  164. /* we are done once this client rejects
  165. * an available resource
  166. */
  167. if (ack == DMA_ACK) {
  168. dma_chan_get(chan);
  169. chan->client_count++;
  170. } else if (ack == DMA_NAK)
  171. return;
  172. }
  173. }
  174. }
  175. }
  176. enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  177. {
  178. enum dma_status status;
  179. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  180. dma_async_issue_pending(chan);
  181. do {
  182. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  183. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  184. printk(KERN_ERR "dma_sync_wait_timeout!\n");
  185. return DMA_ERROR;
  186. }
  187. } while (status == DMA_IN_PROGRESS);
  188. return status;
  189. }
  190. EXPORT_SYMBOL(dma_sync_wait);
  191. /**
  192. * dma_chan_cleanup - release a DMA channel's resources
  193. * @kref: kernel reference structure that contains the DMA channel device
  194. */
  195. void dma_chan_cleanup(struct kref *kref)
  196. {
  197. struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
  198. chan->device->device_free_chan_resources(chan);
  199. kref_put(&chan->device->refcount, dma_async_device_cleanup);
  200. }
  201. EXPORT_SYMBOL(dma_chan_cleanup);
  202. static void dma_chan_free_rcu(struct rcu_head *rcu)
  203. {
  204. struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
  205. int bias = 0x7FFFFFFF;
  206. int i;
  207. for_each_possible_cpu(i)
  208. bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
  209. atomic_sub(bias, &chan->refcount.refcount);
  210. kref_put(&chan->refcount, dma_chan_cleanup);
  211. }
  212. static void dma_chan_release(struct dma_chan *chan)
  213. {
  214. atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
  215. chan->slow_ref = 1;
  216. call_rcu(&chan->rcu, dma_chan_free_rcu);
  217. }
  218. /**
  219. * dma_chans_notify_available - broadcast available channels to the clients
  220. */
  221. static void dma_clients_notify_available(void)
  222. {
  223. struct dma_client *client;
  224. mutex_lock(&dma_list_mutex);
  225. list_for_each_entry(client, &dma_client_list, global_node)
  226. dma_client_chan_alloc(client);
  227. mutex_unlock(&dma_list_mutex);
  228. }
  229. /**
  230. * dma_chans_notify_available - tell the clients that a channel is going away
  231. * @chan: channel on its way out
  232. */
  233. static void dma_clients_notify_removed(struct dma_chan *chan)
  234. {
  235. struct dma_client *client;
  236. enum dma_state_client ack;
  237. mutex_lock(&dma_list_mutex);
  238. list_for_each_entry(client, &dma_client_list, global_node) {
  239. ack = client->event_callback(client, chan,
  240. DMA_RESOURCE_REMOVED);
  241. /* client was holding resources for this channel so
  242. * free it
  243. */
  244. if (ack == DMA_ACK) {
  245. dma_chan_put(chan);
  246. chan->client_count--;
  247. }
  248. }
  249. mutex_unlock(&dma_list_mutex);
  250. }
  251. /**
  252. * dma_async_client_register - register a &dma_client
  253. * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
  254. */
  255. void dma_async_client_register(struct dma_client *client)
  256. {
  257. /* validate client data */
  258. BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
  259. !client->slave);
  260. mutex_lock(&dma_list_mutex);
  261. list_add_tail(&client->global_node, &dma_client_list);
  262. mutex_unlock(&dma_list_mutex);
  263. }
  264. EXPORT_SYMBOL(dma_async_client_register);
  265. /**
  266. * dma_async_client_unregister - unregister a client and free the &dma_client
  267. * @client: &dma_client to free
  268. *
  269. * Force frees any allocated DMA channels, frees the &dma_client memory
  270. */
  271. void dma_async_client_unregister(struct dma_client *client)
  272. {
  273. struct dma_device *device;
  274. struct dma_chan *chan;
  275. enum dma_state_client ack;
  276. if (!client)
  277. return;
  278. mutex_lock(&dma_list_mutex);
  279. /* free all channels the client is holding */
  280. list_for_each_entry(device, &dma_device_list, global_node)
  281. list_for_each_entry(chan, &device->channels, device_node) {
  282. ack = client->event_callback(client, chan,
  283. DMA_RESOURCE_REMOVED);
  284. if (ack == DMA_ACK) {
  285. dma_chan_put(chan);
  286. chan->client_count--;
  287. }
  288. }
  289. list_del(&client->global_node);
  290. mutex_unlock(&dma_list_mutex);
  291. }
  292. EXPORT_SYMBOL(dma_async_client_unregister);
  293. /**
  294. * dma_async_client_chan_request - send all available channels to the
  295. * client that satisfy the capability mask
  296. * @client - requester
  297. */
  298. void dma_async_client_chan_request(struct dma_client *client)
  299. {
  300. mutex_lock(&dma_list_mutex);
  301. dma_client_chan_alloc(client);
  302. mutex_unlock(&dma_list_mutex);
  303. }
  304. EXPORT_SYMBOL(dma_async_client_chan_request);
  305. /**
  306. * dma_async_device_register - registers DMA devices found
  307. * @device: &dma_device
  308. */
  309. int dma_async_device_register(struct dma_device *device)
  310. {
  311. static int id;
  312. int chancnt = 0, rc;
  313. struct dma_chan* chan;
  314. if (!device)
  315. return -ENODEV;
  316. /* validate device routines */
  317. BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
  318. !device->device_prep_dma_memcpy);
  319. BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
  320. !device->device_prep_dma_xor);
  321. BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
  322. !device->device_prep_dma_zero_sum);
  323. BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
  324. !device->device_prep_dma_memset);
  325. BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
  326. !device->device_prep_dma_interrupt);
  327. BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
  328. !device->device_prep_slave_sg);
  329. BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
  330. !device->device_terminate_all);
  331. BUG_ON(!device->device_alloc_chan_resources);
  332. BUG_ON(!device->device_free_chan_resources);
  333. BUG_ON(!device->device_is_tx_complete);
  334. BUG_ON(!device->device_issue_pending);
  335. BUG_ON(!device->dev);
  336. init_completion(&device->done);
  337. kref_init(&device->refcount);
  338. mutex_lock(&dma_list_mutex);
  339. device->dev_id = id++;
  340. mutex_unlock(&dma_list_mutex);
  341. /* represent channels in sysfs. Probably want devs too */
  342. list_for_each_entry(chan, &device->channels, device_node) {
  343. chan->local = alloc_percpu(typeof(*chan->local));
  344. if (chan->local == NULL)
  345. continue;
  346. chan->chan_id = chancnt++;
  347. chan->dev.class = &dma_devclass;
  348. chan->dev.parent = device->dev;
  349. dev_set_name(&chan->dev, "dma%dchan%d",
  350. device->dev_id, chan->chan_id);
  351. rc = device_register(&chan->dev);
  352. if (rc) {
  353. chancnt--;
  354. free_percpu(chan->local);
  355. chan->local = NULL;
  356. goto err_out;
  357. }
  358. /* One for the channel, one of the class device */
  359. kref_get(&device->refcount);
  360. kref_get(&device->refcount);
  361. kref_init(&chan->refcount);
  362. chan->client_count = 0;
  363. chan->slow_ref = 0;
  364. INIT_RCU_HEAD(&chan->rcu);
  365. }
  366. mutex_lock(&dma_list_mutex);
  367. list_add_tail(&device->global_node, &dma_device_list);
  368. mutex_unlock(&dma_list_mutex);
  369. dma_clients_notify_available();
  370. return 0;
  371. err_out:
  372. list_for_each_entry(chan, &device->channels, device_node) {
  373. if (chan->local == NULL)
  374. continue;
  375. kref_put(&device->refcount, dma_async_device_cleanup);
  376. device_unregister(&chan->dev);
  377. chancnt--;
  378. free_percpu(chan->local);
  379. }
  380. return rc;
  381. }
  382. EXPORT_SYMBOL(dma_async_device_register);
  383. /**
  384. * dma_async_device_cleanup - function called when all references are released
  385. * @kref: kernel reference object
  386. */
  387. static void dma_async_device_cleanup(struct kref *kref)
  388. {
  389. struct dma_device *device;
  390. device = container_of(kref, struct dma_device, refcount);
  391. complete(&device->done);
  392. }
  393. /**
  394. * dma_async_device_unregister - unregisters DMA devices
  395. * @device: &dma_device
  396. */
  397. void dma_async_device_unregister(struct dma_device *device)
  398. {
  399. struct dma_chan *chan;
  400. mutex_lock(&dma_list_mutex);
  401. list_del(&device->global_node);
  402. mutex_unlock(&dma_list_mutex);
  403. list_for_each_entry(chan, &device->channels, device_node) {
  404. dma_clients_notify_removed(chan);
  405. device_unregister(&chan->dev);
  406. dma_chan_release(chan);
  407. }
  408. kref_put(&device->refcount, dma_async_device_cleanup);
  409. wait_for_completion(&device->done);
  410. }
  411. EXPORT_SYMBOL(dma_async_device_unregister);
  412. /**
  413. * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
  414. * @chan: DMA channel to offload copy to
  415. * @dest: destination address (virtual)
  416. * @src: source address (virtual)
  417. * @len: length
  418. *
  419. * Both @dest and @src must be mappable to a bus address according to the
  420. * DMA mapping API rules for streaming mappings.
  421. * Both @dest and @src must stay memory resident (kernel memory or locked
  422. * user space pages).
  423. */
  424. dma_cookie_t
  425. dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
  426. void *src, size_t len)
  427. {
  428. struct dma_device *dev = chan->device;
  429. struct dma_async_tx_descriptor *tx;
  430. dma_addr_t dma_dest, dma_src;
  431. dma_cookie_t cookie;
  432. int cpu;
  433. dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
  434. dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
  435. tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
  436. DMA_CTRL_ACK);
  437. if (!tx) {
  438. dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
  439. dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
  440. return -ENOMEM;
  441. }
  442. tx->callback = NULL;
  443. cookie = tx->tx_submit(tx);
  444. cpu = get_cpu();
  445. per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
  446. per_cpu_ptr(chan->local, cpu)->memcpy_count++;
  447. put_cpu();
  448. return cookie;
  449. }
  450. EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
  451. /**
  452. * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
  453. * @chan: DMA channel to offload copy to
  454. * @page: destination page
  455. * @offset: offset in page to copy to
  456. * @kdata: source address (virtual)
  457. * @len: length
  458. *
  459. * Both @page/@offset and @kdata must be mappable to a bus address according
  460. * to the DMA mapping API rules for streaming mappings.
  461. * Both @page/@offset and @kdata must stay memory resident (kernel memory or
  462. * locked user space pages)
  463. */
  464. dma_cookie_t
  465. dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
  466. unsigned int offset, void *kdata, size_t len)
  467. {
  468. struct dma_device *dev = chan->device;
  469. struct dma_async_tx_descriptor *tx;
  470. dma_addr_t dma_dest, dma_src;
  471. dma_cookie_t cookie;
  472. int cpu;
  473. dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
  474. dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
  475. tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
  476. DMA_CTRL_ACK);
  477. if (!tx) {
  478. dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
  479. dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
  480. return -ENOMEM;
  481. }
  482. tx->callback = NULL;
  483. cookie = tx->tx_submit(tx);
  484. cpu = get_cpu();
  485. per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
  486. per_cpu_ptr(chan->local, cpu)->memcpy_count++;
  487. put_cpu();
  488. return cookie;
  489. }
  490. EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
  491. /**
  492. * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
  493. * @chan: DMA channel to offload copy to
  494. * @dest_pg: destination page
  495. * @dest_off: offset in page to copy to
  496. * @src_pg: source page
  497. * @src_off: offset in page to copy from
  498. * @len: length
  499. *
  500. * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
  501. * address according to the DMA mapping API rules for streaming mappings.
  502. * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
  503. * (kernel memory or locked user space pages).
  504. */
  505. dma_cookie_t
  506. dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
  507. unsigned int dest_off, struct page *src_pg, unsigned int src_off,
  508. size_t len)
  509. {
  510. struct dma_device *dev = chan->device;
  511. struct dma_async_tx_descriptor *tx;
  512. dma_addr_t dma_dest, dma_src;
  513. dma_cookie_t cookie;
  514. int cpu;
  515. dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
  516. dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
  517. DMA_FROM_DEVICE);
  518. tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
  519. DMA_CTRL_ACK);
  520. if (!tx) {
  521. dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
  522. dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
  523. return -ENOMEM;
  524. }
  525. tx->callback = NULL;
  526. cookie = tx->tx_submit(tx);
  527. cpu = get_cpu();
  528. per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
  529. per_cpu_ptr(chan->local, cpu)->memcpy_count++;
  530. put_cpu();
  531. return cookie;
  532. }
  533. EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
  534. void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  535. struct dma_chan *chan)
  536. {
  537. tx->chan = chan;
  538. spin_lock_init(&tx->lock);
  539. }
  540. EXPORT_SYMBOL(dma_async_tx_descriptor_init);
  541. static int __init dma_bus_init(void)
  542. {
  543. mutex_init(&dma_list_mutex);
  544. return class_register(&dma_devclass);
  545. }
  546. subsys_initcall(dma_bus_init);