memalloc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /*
  2. * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
  3. * Takashi Iwai <tiwai@suse.de>
  4. *
  5. * Generic memory allocators
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/config.h>
  24. #include <linux/module.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/init.h>
  27. #include <linux/pci.h>
  28. #include <linux/slab.h>
  29. #include <linux/mm.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/moduleparam.h>
  32. #include <asm/semaphore.h>
  33. #include <sound/memalloc.h>
  34. #ifdef CONFIG_SBUS
  35. #include <asm/sbus.h>
  36. #endif
  37. MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@suse.cz>");
  38. MODULE_DESCRIPTION("Memory allocator for ALSA system.");
  39. MODULE_LICENSE("GPL");
  40. #ifndef SNDRV_CARDS
  41. #define SNDRV_CARDS 8
  42. #endif
  43. /* FIXME: so far only some PCI devices have the preallocation table */
  44. #ifdef CONFIG_PCI
  45. static int enable[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1};
  46. module_param_array(enable, bool, NULL, 0444);
  47. MODULE_PARM_DESC(enable, "Enable cards to allocate buffers.");
  48. #endif
  49. /*
  50. */
  51. void *snd_malloc_sgbuf_pages(struct device *device,
  52. size_t size, struct snd_dma_buffer *dmab,
  53. size_t *res_size);
  54. int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
  55. /*
  56. */
  57. static DECLARE_MUTEX(list_mutex);
  58. static LIST_HEAD(mem_list_head);
  59. /* buffer preservation list */
  60. struct snd_mem_list {
  61. struct snd_dma_buffer buffer;
  62. unsigned int id;
  63. struct list_head list;
  64. };
  65. /* id for pre-allocated buffers */
  66. #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
  67. #ifdef CONFIG_SND_DEBUG
  68. #define __ASTRING__(x) #x
  69. #define snd_assert(expr, args...) do {\
  70. if (!(expr)) {\
  71. printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\
  72. args;\
  73. }\
  74. } while (0)
  75. #else
  76. #define snd_assert(expr, args...) /**/
  77. #endif
  78. /*
  79. * Hacks
  80. */
  81. #if defined(__i386__) || defined(__ppc__) || defined(__x86_64__)
  82. /*
  83. * A hack to allocate large buffers via dma_alloc_coherent()
  84. *
  85. * since dma_alloc_coherent always tries GFP_DMA when the requested
  86. * pci memory region is below 32bit, it happens quite often that even
  87. * 2 order of pages cannot be allocated.
  88. *
  89. * so in the following, we allocate at first without dma_mask, so that
  90. * allocation will be done without GFP_DMA. if the area doesn't match
  91. * with the requested region, then realloate with the original dma_mask
  92. * again.
  93. *
  94. * Really, we want to move this type of thing into dma_alloc_coherent()
  95. * so dma_mask doesn't have to be messed with.
  96. */
  97. static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
  98. dma_addr_t *dma_handle, int flags)
  99. {
  100. void *ret;
  101. u64 dma_mask, coherent_dma_mask;
  102. if (dev == NULL || !dev->dma_mask)
  103. return dma_alloc_coherent(dev, size, dma_handle, flags);
  104. dma_mask = *dev->dma_mask;
  105. coherent_dma_mask = dev->coherent_dma_mask;
  106. *dev->dma_mask = 0xffffffff; /* do without masking */
  107. dev->coherent_dma_mask = 0xffffffff; /* do without masking */
  108. ret = dma_alloc_coherent(dev, size, dma_handle, flags);
  109. *dev->dma_mask = dma_mask; /* restore */
  110. dev->coherent_dma_mask = coherent_dma_mask; /* restore */
  111. if (ret) {
  112. /* obtained address is out of range? */
  113. if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) {
  114. /* reallocate with the proper mask */
  115. dma_free_coherent(dev, size, ret, *dma_handle);
  116. ret = dma_alloc_coherent(dev, size, dma_handle, flags);
  117. }
  118. } else {
  119. /* wish to success now with the proper mask... */
  120. if (dma_mask != 0xffffffffUL) {
  121. /* allocation with GFP_ATOMIC to avoid the long stall */
  122. flags &= ~GFP_KERNEL;
  123. flags |= GFP_ATOMIC;
  124. ret = dma_alloc_coherent(dev, size, dma_handle, flags);
  125. }
  126. }
  127. return ret;
  128. }
  129. /* redefine dma_alloc_coherent for some architectures */
  130. #undef dma_alloc_coherent
  131. #define dma_alloc_coherent snd_dma_hack_alloc_coherent
  132. #endif /* arch */
  133. #if ! defined(__arm__)
  134. #define NEED_RESERVE_PAGES
  135. #endif
  136. /*
  137. *
  138. * Generic memory allocators
  139. *
  140. */
  141. static long snd_allocated_pages; /* holding the number of allocated pages */
  142. static inline void inc_snd_pages(int order)
  143. {
  144. snd_allocated_pages += 1 << order;
  145. }
  146. static inline void dec_snd_pages(int order)
  147. {
  148. snd_allocated_pages -= 1 << order;
  149. }
  150. static void mark_pages(struct page *page, int order)
  151. {
  152. struct page *last_page = page + (1 << order);
  153. while (page < last_page)
  154. SetPageReserved(page++);
  155. }
  156. static void unmark_pages(struct page *page, int order)
  157. {
  158. struct page *last_page = page + (1 << order);
  159. while (page < last_page)
  160. ClearPageReserved(page++);
  161. }
  162. /**
  163. * snd_malloc_pages - allocate pages with the given size
  164. * @size: the size to allocate in bytes
  165. * @gfp_flags: the allocation conditions, GFP_XXX
  166. *
  167. * Allocates the physically contiguous pages with the given size.
  168. *
  169. * Returns the pointer of the buffer, or NULL if no enoguh memory.
  170. */
  171. void *snd_malloc_pages(size_t size, unsigned int gfp_flags)
  172. {
  173. int pg;
  174. void *res;
  175. snd_assert(size > 0, return NULL);
  176. snd_assert(gfp_flags != 0, return NULL);
  177. pg = get_order(size);
  178. if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) {
  179. mark_pages(virt_to_page(res), pg);
  180. inc_snd_pages(pg);
  181. }
  182. return res;
  183. }
  184. /**
  185. * snd_free_pages - release the pages
  186. * @ptr: the buffer pointer to release
  187. * @size: the allocated buffer size
  188. *
  189. * Releases the buffer allocated via snd_malloc_pages().
  190. */
  191. void snd_free_pages(void *ptr, size_t size)
  192. {
  193. int pg;
  194. if (ptr == NULL)
  195. return;
  196. pg = get_order(size);
  197. dec_snd_pages(pg);
  198. unmark_pages(virt_to_page(ptr), pg);
  199. free_pages((unsigned long) ptr, pg);
  200. }
  201. /*
  202. *
  203. * Bus-specific memory allocators
  204. *
  205. */
  206. /* allocate the coherent DMA pages */
  207. static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
  208. {
  209. int pg;
  210. void *res;
  211. unsigned int gfp_flags;
  212. snd_assert(size > 0, return NULL);
  213. snd_assert(dma != NULL, return NULL);
  214. pg = get_order(size);
  215. gfp_flags = GFP_KERNEL
  216. | __GFP_NORETRY /* don't trigger OOM-killer */
  217. | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
  218. res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
  219. if (res != NULL) {
  220. #ifdef NEED_RESERVE_PAGES
  221. mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */
  222. #endif
  223. inc_snd_pages(pg);
  224. }
  225. return res;
  226. }
  227. /* free the coherent DMA pages */
  228. static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
  229. dma_addr_t dma)
  230. {
  231. int pg;
  232. if (ptr == NULL)
  233. return;
  234. pg = get_order(size);
  235. dec_snd_pages(pg);
  236. #ifdef NEED_RESERVE_PAGES
  237. unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */
  238. #endif
  239. dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
  240. }
  241. #ifdef CONFIG_SBUS
  242. static void *snd_malloc_sbus_pages(struct device *dev, size_t size,
  243. dma_addr_t *dma_addr)
  244. {
  245. struct sbus_dev *sdev = (struct sbus_dev *)dev;
  246. int pg;
  247. void *res;
  248. snd_assert(size > 0, return NULL);
  249. snd_assert(dma_addr != NULL, return NULL);
  250. pg = get_order(size);
  251. res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr);
  252. if (res != NULL)
  253. inc_snd_pages(pg);
  254. return res;
  255. }
  256. static void snd_free_sbus_pages(struct device *dev, size_t size,
  257. void *ptr, dma_addr_t dma_addr)
  258. {
  259. struct sbus_dev *sdev = (struct sbus_dev *)dev;
  260. int pg;
  261. if (ptr == NULL)
  262. return;
  263. pg = get_order(size);
  264. dec_snd_pages(pg);
  265. sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);
  266. }
  267. #endif /* CONFIG_SBUS */
  268. /*
  269. *
  270. * ALSA generic memory management
  271. *
  272. */
  273. /**
  274. * snd_dma_alloc_pages - allocate the buffer area according to the given type
  275. * @type: the DMA buffer type
  276. * @device: the device pointer
  277. * @size: the buffer size to allocate
  278. * @dmab: buffer allocation record to store the allocated data
  279. *
  280. * Calls the memory-allocator function for the corresponding
  281. * buffer type.
  282. *
  283. * Returns zero if the buffer with the given size is allocated successfuly,
  284. * other a negative value at error.
  285. */
  286. int snd_dma_alloc_pages(int type, struct device *device, size_t size,
  287. struct snd_dma_buffer *dmab)
  288. {
  289. snd_assert(size > 0, return -ENXIO);
  290. snd_assert(dmab != NULL, return -ENXIO);
  291. dmab->dev.type = type;
  292. dmab->dev.dev = device;
  293. dmab->bytes = 0;
  294. switch (type) {
  295. case SNDRV_DMA_TYPE_CONTINUOUS:
  296. dmab->area = snd_malloc_pages(size, (unsigned long)device);
  297. dmab->addr = 0;
  298. break;
  299. #ifdef CONFIG_SBUS
  300. case SNDRV_DMA_TYPE_SBUS:
  301. dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr);
  302. break;
  303. #endif
  304. case SNDRV_DMA_TYPE_DEV:
  305. dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
  306. break;
  307. case SNDRV_DMA_TYPE_DEV_SG:
  308. snd_malloc_sgbuf_pages(device, size, dmab, NULL);
  309. break;
  310. default:
  311. printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
  312. dmab->area = NULL;
  313. dmab->addr = 0;
  314. return -ENXIO;
  315. }
  316. if (! dmab->area)
  317. return -ENOMEM;
  318. dmab->bytes = size;
  319. return 0;
  320. }
  321. /**
  322. * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
  323. * @type: the DMA buffer type
  324. * @device: the device pointer
  325. * @size: the buffer size to allocate
  326. * @dmab: buffer allocation record to store the allocated data
  327. *
  328. * Calls the memory-allocator function for the corresponding
  329. * buffer type. When no space is left, this function reduces the size and
  330. * tries to allocate again. The size actually allocated is stored in
  331. * res_size argument.
  332. *
  333. * Returns zero if the buffer with the given size is allocated successfuly,
  334. * other a negative value at error.
  335. */
  336. int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
  337. struct snd_dma_buffer *dmab)
  338. {
  339. int err;
  340. snd_assert(size > 0, return -ENXIO);
  341. snd_assert(dmab != NULL, return -ENXIO);
  342. while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
  343. if (err != -ENOMEM)
  344. return err;
  345. size >>= 1;
  346. if (size <= PAGE_SIZE)
  347. return -ENOMEM;
  348. }
  349. if (! dmab->area)
  350. return -ENOMEM;
  351. return 0;
  352. }
  353. /**
  354. * snd_dma_free_pages - release the allocated buffer
  355. * @dmab: the buffer allocation record to release
  356. *
  357. * Releases the allocated buffer via snd_dma_alloc_pages().
  358. */
  359. void snd_dma_free_pages(struct snd_dma_buffer *dmab)
  360. {
  361. switch (dmab->dev.type) {
  362. case SNDRV_DMA_TYPE_CONTINUOUS:
  363. snd_free_pages(dmab->area, dmab->bytes);
  364. break;
  365. #ifdef CONFIG_SBUS
  366. case SNDRV_DMA_TYPE_SBUS:
  367. snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
  368. break;
  369. #endif
  370. case SNDRV_DMA_TYPE_DEV:
  371. snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
  372. break;
  373. case SNDRV_DMA_TYPE_DEV_SG:
  374. snd_free_sgbuf_pages(dmab);
  375. break;
  376. default:
  377. printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
  378. }
  379. }
  380. /**
  381. * snd_dma_get_reserved - get the reserved buffer for the given device
  382. * @dmab: the buffer allocation record to store
  383. * @id: the buffer id
  384. *
  385. * Looks for the reserved-buffer list and re-uses if the same buffer
  386. * is found in the list. When the buffer is found, it's removed from the free list.
  387. *
  388. * Returns the size of buffer if the buffer is found, or zero if not found.
  389. */
  390. size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
  391. {
  392. struct list_head *p;
  393. struct snd_mem_list *mem;
  394. snd_assert(dmab, return 0);
  395. down(&list_mutex);
  396. list_for_each(p, &mem_list_head) {
  397. mem = list_entry(p, struct snd_mem_list, list);
  398. if (mem->id == id &&
  399. ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev))) {
  400. list_del(p);
  401. *dmab = mem->buffer;
  402. kfree(mem);
  403. up(&list_mutex);
  404. return dmab->bytes;
  405. }
  406. }
  407. up(&list_mutex);
  408. return 0;
  409. }
  410. /**
  411. * snd_dma_reserve_buf - reserve the buffer
  412. * @dmab: the buffer to reserve
  413. * @id: the buffer id
  414. *
  415. * Reserves the given buffer as a reserved buffer.
  416. *
  417. * Returns zero if successful, or a negative code at error.
  418. */
  419. int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
  420. {
  421. struct snd_mem_list *mem;
  422. snd_assert(dmab, return -EINVAL);
  423. mem = kmalloc(sizeof(*mem), GFP_KERNEL);
  424. if (! mem)
  425. return -ENOMEM;
  426. down(&list_mutex);
  427. mem->buffer = *dmab;
  428. mem->id = id;
  429. list_add_tail(&mem->list, &mem_list_head);
  430. up(&list_mutex);
  431. return 0;
  432. }
  433. /*
  434. * purge all reserved buffers
  435. */
  436. static void free_all_reserved_pages(void)
  437. {
  438. struct list_head *p;
  439. struct snd_mem_list *mem;
  440. down(&list_mutex);
  441. while (! list_empty(&mem_list_head)) {
  442. p = mem_list_head.next;
  443. mem = list_entry(p, struct snd_mem_list, list);
  444. list_del(p);
  445. snd_dma_free_pages(&mem->buffer);
  446. kfree(mem);
  447. }
  448. up(&list_mutex);
  449. }
  450. /*
  451. * allocation of buffers for pre-defined devices
  452. */
  453. #ifdef CONFIG_PCI
  454. /* FIXME: for pci only - other bus? */
  455. struct prealloc_dev {
  456. unsigned short vendor;
  457. unsigned short device;
  458. unsigned long dma_mask;
  459. unsigned int size;
  460. unsigned int buffers;
  461. };
  462. #define HAMMERFALL_BUFFER_SIZE (16*1024*4*(26+1)+0x10000)
  463. static struct prealloc_dev prealloc_devices[] __initdata = {
  464. {
  465. /* hammerfall */
  466. .vendor = 0x10ee,
  467. .device = 0x3fc4,
  468. .dma_mask = 0xffffffff,
  469. .size = HAMMERFALL_BUFFER_SIZE,
  470. .buffers = 2
  471. },
  472. {
  473. /* HDSP */
  474. .vendor = 0x10ee,
  475. .device = 0x3fc5,
  476. .dma_mask = 0xffffffff,
  477. .size = HAMMERFALL_BUFFER_SIZE,
  478. .buffers = 2
  479. },
  480. { }, /* terminator */
  481. };
  482. static void __init preallocate_cards(void)
  483. {
  484. struct pci_dev *pci = NULL;
  485. int card;
  486. card = 0;
  487. while ((pci = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci)) != NULL) {
  488. struct prealloc_dev *dev;
  489. unsigned int i;
  490. if (card >= SNDRV_CARDS)
  491. break;
  492. for (dev = prealloc_devices; dev->vendor; dev++) {
  493. if (dev->vendor == pci->vendor && dev->device == pci->device)
  494. break;
  495. }
  496. if (! dev->vendor)
  497. continue;
  498. if (! enable[card++]) {
  499. printk(KERN_DEBUG "snd-page-alloc: skipping card %d, device %04x:%04x\n", card, pci->vendor, pci->device);
  500. continue;
  501. }
  502. if (pci_set_dma_mask(pci, dev->dma_mask) < 0 ||
  503. pci_set_consistent_dma_mask(pci, dev->dma_mask) < 0) {
  504. printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", dev->dma_mask, dev->vendor, dev->device);
  505. continue;
  506. }
  507. for (i = 0; i < dev->buffers; i++) {
  508. struct snd_dma_buffer dmab;
  509. memset(&dmab, 0, sizeof(dmab));
  510. if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
  511. dev->size, &dmab) < 0)
  512. printk(KERN_WARNING "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", dev->size);
  513. else
  514. snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
  515. }
  516. }
  517. }
  518. #else
  519. #define preallocate_cards() /* NOP */
  520. #endif
  521. #ifdef CONFIG_PROC_FS
  522. /*
  523. * proc file interface
  524. */
  525. static int snd_mem_proc_read(char *page, char **start, off_t off,
  526. int count, int *eof, void *data)
  527. {
  528. int len = 0;
  529. long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
  530. struct list_head *p;
  531. struct snd_mem_list *mem;
  532. int devno;
  533. static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
  534. down(&list_mutex);
  535. len += snprintf(page + len, count - len,
  536. "pages : %li bytes (%li pages per %likB)\n",
  537. pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
  538. devno = 0;
  539. list_for_each(p, &mem_list_head) {
  540. mem = list_entry(p, struct snd_mem_list, list);
  541. devno++;
  542. len += snprintf(page + len, count - len,
  543. "buffer %d : ID %08x : type %s\n",
  544. devno, mem->id, types[mem->buffer.dev.type]);
  545. len += snprintf(page + len, count - len,
  546. " addr = 0x%lx, size = %d bytes\n",
  547. (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
  548. }
  549. up(&list_mutex);
  550. return len;
  551. }
  552. #endif /* CONFIG_PROC_FS */
  553. /*
  554. * module entry
  555. */
  556. static int __init snd_mem_init(void)
  557. {
  558. #ifdef CONFIG_PROC_FS
  559. create_proc_read_entry("driver/snd-page-alloc", 0, NULL, snd_mem_proc_read, NULL);
  560. #endif
  561. preallocate_cards();
  562. return 0;
  563. }
  564. static void __exit snd_mem_exit(void)
  565. {
  566. remove_proc_entry("driver/snd-page-alloc", NULL);
  567. free_all_reserved_pages();
  568. if (snd_allocated_pages > 0)
  569. printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
  570. }
  571. module_init(snd_mem_init)
  572. module_exit(snd_mem_exit)
  573. /*
  574. * exports
  575. */
  576. EXPORT_SYMBOL(snd_dma_alloc_pages);
  577. EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
  578. EXPORT_SYMBOL(snd_dma_free_pages);
  579. EXPORT_SYMBOL(snd_dma_get_reserved_buf);
  580. EXPORT_SYMBOL(snd_dma_reserve_buf);
  581. EXPORT_SYMBOL(snd_malloc_pages);
  582. EXPORT_SYMBOL(snd_free_pages);