ctvmem.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /**
  2. * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
  3. *
  4. * This source file is released under GPL v2 license (no other versions).
  5. * See the COPYING file included in the main directory of this source
  6. * distribution for the license terms and conditions.
  7. *
  8. * @File ctvmem.c
  9. *
  10. * @Brief
  11. * This file contains the implementation of virtual memory management object
  12. * for card device.
  13. *
  14. * @Author Liu Chun
  15. * @Date Apr 1 2008
  16. */
  17. #include "ctvmem.h"
  18. #include <linux/slab.h>
  19. #include <linux/mm.h>
  20. #include <linux/io.h>
  21. #include <sound/pcm.h>
  22. #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
  23. #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
  24. /* *
  25. * Find or create vm block based on requested @size.
  26. * @size must be page aligned.
  27. * */
  28. static struct ct_vm_block *
  29. get_vm_block(struct ct_vm *vm, unsigned int size)
  30. {
  31. struct ct_vm_block *block = NULL, *entry;
  32. struct list_head *pos;
  33. size = CT_PAGE_ALIGN(size);
  34. if (size > vm->size) {
  35. printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural "
  36. "memory space available!\n");
  37. return NULL;
  38. }
  39. mutex_lock(&vm->lock);
  40. list_for_each(pos, &vm->unused) {
  41. entry = list_entry(pos, struct ct_vm_block, list);
  42. if (entry->size >= size)
  43. break; /* found a block that is big enough */
  44. }
  45. if (pos == &vm->unused)
  46. goto out;
  47. if (entry->size == size) {
  48. /* Move the vm node from unused list to used list directly */
  49. list_move(&entry->list, &vm->used);
  50. vm->size -= size;
  51. block = entry;
  52. goto out;
  53. }
  54. block = kzalloc(sizeof(*block), GFP_KERNEL);
  55. if (!block)
  56. goto out;
  57. block->addr = entry->addr;
  58. block->size = size;
  59. list_add(&block->list, &vm->used);
  60. entry->addr += size;
  61. entry->size -= size;
  62. vm->size -= size;
  63. out:
  64. mutex_unlock(&vm->lock);
  65. return block;
  66. }
  67. static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
  68. {
  69. struct ct_vm_block *entry, *pre_ent;
  70. struct list_head *pos, *pre;
  71. block->size = CT_PAGE_ALIGN(block->size);
  72. mutex_lock(&vm->lock);
  73. list_del(&block->list);
  74. vm->size += block->size;
  75. list_for_each(pos, &vm->unused) {
  76. entry = list_entry(pos, struct ct_vm_block, list);
  77. if (entry->addr >= (block->addr + block->size))
  78. break; /* found a position */
  79. }
  80. if (pos == &vm->unused) {
  81. list_add_tail(&block->list, &vm->unused);
  82. entry = block;
  83. } else {
  84. if ((block->addr + block->size) == entry->addr) {
  85. entry->addr = block->addr;
  86. entry->size += block->size;
  87. kfree(block);
  88. } else {
  89. __list_add(&block->list, pos->prev, pos);
  90. entry = block;
  91. }
  92. }
  93. pos = &entry->list;
  94. pre = pos->prev;
  95. while (pre != &vm->unused) {
  96. entry = list_entry(pos, struct ct_vm_block, list);
  97. pre_ent = list_entry(pre, struct ct_vm_block, list);
  98. if ((pre_ent->addr + pre_ent->size) > entry->addr)
  99. break;
  100. pre_ent->size += entry->size;
  101. list_del(pos);
  102. kfree(entry);
  103. pos = pre;
  104. pre = pos->prev;
  105. }
  106. mutex_unlock(&vm->lock);
  107. }
  108. /* Map host addr (kmalloced/vmalloced) to device logical addr. */
  109. static struct ct_vm_block *
  110. ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
  111. {
  112. struct ct_vm_block *block;
  113. unsigned int pte_start;
  114. unsigned i, pages;
  115. unsigned long *ptp;
  116. block = get_vm_block(vm, size);
  117. if (block == NULL) {
  118. printk(KERN_ERR "ctxfi: No virtual memory block that is big "
  119. "enough to allocate!\n");
  120. return NULL;
  121. }
  122. ptp = (unsigned long *)vm->ptp[0].area;
  123. pte_start = (block->addr >> CT_PAGE_SHIFT);
  124. pages = block->size >> CT_PAGE_SHIFT;
  125. for (i = 0; i < pages; i++) {
  126. unsigned long addr;
  127. addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
  128. ptp[pte_start + i] = addr;
  129. }
  130. block->size = size;
  131. return block;
  132. }
  133. static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
  134. {
  135. /* do unmapping */
  136. put_vm_block(vm, block);
  137. }
  138. /* *
  139. * return the host physical addr of the @index-th device
  140. * page table page on success, or ~0UL on failure.
  141. * The first returned ~0UL indicates the termination.
  142. * */
  143. static dma_addr_t
  144. ct_get_ptp_phys(struct ct_vm *vm, int index)
  145. {
  146. dma_addr_t addr;
  147. addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
  148. return addr;
  149. }
  150. int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
  151. {
  152. struct ct_vm *vm;
  153. struct ct_vm_block *block;
  154. int i, err = 0;
  155. *rvm = NULL;
  156. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  157. if (!vm)
  158. return -ENOMEM;
  159. mutex_init(&vm->lock);
  160. /* Allocate page table pages */
  161. for (i = 0; i < CT_PTP_NUM; i++) {
  162. err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
  163. snd_dma_pci_data(pci),
  164. PAGE_SIZE, &vm->ptp[i]);
  165. if (err < 0)
  166. break;
  167. }
  168. if (err < 0) {
  169. /* no page table pages are allocated */
  170. ct_vm_destroy(vm);
  171. return -ENOMEM;
  172. }
  173. vm->size = CT_ADDRS_PER_PAGE * i;
  174. vm->map = ct_vm_map;
  175. vm->unmap = ct_vm_unmap;
  176. vm->get_ptp_phys = ct_get_ptp_phys;
  177. INIT_LIST_HEAD(&vm->unused);
  178. INIT_LIST_HEAD(&vm->used);
  179. block = kzalloc(sizeof(*block), GFP_KERNEL);
  180. if (NULL != block) {
  181. block->addr = 0;
  182. block->size = vm->size;
  183. list_add(&block->list, &vm->unused);
  184. }
  185. *rvm = vm;
  186. return 0;
  187. }
  188. /* The caller must ensure no mapping pages are being used
  189. * by hardware before calling this function */
  190. void ct_vm_destroy(struct ct_vm *vm)
  191. {
  192. int i;
  193. struct list_head *pos;
  194. struct ct_vm_block *entry;
  195. /* free used and unused list nodes */
  196. while (!list_empty(&vm->used)) {
  197. pos = vm->used.next;
  198. list_del(pos);
  199. entry = list_entry(pos, struct ct_vm_block, list);
  200. kfree(entry);
  201. }
  202. while (!list_empty(&vm->unused)) {
  203. pos = vm->unused.next;
  204. list_del(pos);
  205. entry = list_entry(pos, struct ct_vm_block, list);
  206. kfree(entry);
  207. }
  208. /* free allocated page table pages */
  209. for (i = 0; i < CT_PTP_NUM; i++)
  210. snd_dma_free_pages(&vm->ptp[i]);
  211. vm->size = 0;
  212. kfree(vm);
  213. }