ctvmem.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /**
  2. * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
  3. *
  4. * This source file is released under GPL v2 license (no other versions).
  5. * See the COPYING file included in the main directory of this source
  6. * distribution for the license terms and conditions.
  7. *
  8. * @File ctvmem.c
  9. *
  10. * @Brief
  11. * This file contains the implementation of virtual memory management object
  12. * for card device.
  13. *
  14. * @Author Liu Chun
  15. * @Date Apr 1 2008
  16. */
  17. #include "ctvmem.h"
  18. #include <linux/slab.h>
  19. #include <linux/mm.h>
  20. #include <linux/io.h>
  21. #include <sound/pcm.h>
  22. #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
  23. #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
  24. /* *
  25. * Find or create vm block based on requested @size.
  26. * @size must be page aligned.
  27. * */
  28. static struct ct_vm_block *
  29. get_vm_block(struct ct_vm *vm, unsigned int size)
  30. {
  31. struct ct_vm_block *block = NULL, *entry;
  32. struct list_head *pos;
  33. size = CT_PAGE_ALIGN(size);
  34. if (size > vm->size) {
  35. printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural "
  36. "memory space available!\n");
  37. return NULL;
  38. }
  39. mutex_lock(&vm->lock);
  40. list_for_each(pos, &vm->unused) {
  41. entry = list_entry(pos, struct ct_vm_block, list);
  42. if (entry->size >= size)
  43. break; /* found a block that is big enough */
  44. }
  45. if (pos == &vm->unused)
  46. goto out;
  47. if (entry->size == size) {
  48. /* Move the vm node from unused list to used list directly */
  49. list_del(&entry->list);
  50. list_add(&entry->list, &vm->used);
  51. vm->size -= size;
  52. block = entry;
  53. goto out;
  54. }
  55. block = kzalloc(sizeof(*block), GFP_KERNEL);
  56. if (!block)
  57. goto out;
  58. block->addr = entry->addr;
  59. block->size = size;
  60. list_add(&block->list, &vm->used);
  61. entry->addr += size;
  62. entry->size -= size;
  63. vm->size -= size;
  64. out:
  65. mutex_unlock(&vm->lock);
  66. return block;
  67. }
  68. static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
  69. {
  70. struct ct_vm_block *entry, *pre_ent;
  71. struct list_head *pos, *pre;
  72. block->size = CT_PAGE_ALIGN(block->size);
  73. mutex_lock(&vm->lock);
  74. list_del(&block->list);
  75. vm->size += block->size;
  76. list_for_each(pos, &vm->unused) {
  77. entry = list_entry(pos, struct ct_vm_block, list);
  78. if (entry->addr >= (block->addr + block->size))
  79. break; /* found a position */
  80. }
  81. if (pos == &vm->unused) {
  82. list_add_tail(&block->list, &vm->unused);
  83. entry = block;
  84. } else {
  85. if ((block->addr + block->size) == entry->addr) {
  86. entry->addr = block->addr;
  87. entry->size += block->size;
  88. kfree(block);
  89. } else {
  90. __list_add(&block->list, pos->prev, pos);
  91. entry = block;
  92. }
  93. }
  94. pos = &entry->list;
  95. pre = pos->prev;
  96. while (pre != &vm->unused) {
  97. entry = list_entry(pos, struct ct_vm_block, list);
  98. pre_ent = list_entry(pre, struct ct_vm_block, list);
  99. if ((pre_ent->addr + pre_ent->size) > entry->addr)
  100. break;
  101. pre_ent->size += entry->size;
  102. list_del(pos);
  103. kfree(entry);
  104. pos = pre;
  105. pre = pos->prev;
  106. }
  107. mutex_unlock(&vm->lock);
  108. }
  109. /* Map host addr (kmalloced/vmalloced) to device logical addr. */
  110. static struct ct_vm_block *
  111. ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
  112. {
  113. struct ct_vm_block *block;
  114. unsigned int pte_start;
  115. unsigned i, pages;
  116. unsigned long *ptp;
  117. block = get_vm_block(vm, size);
  118. if (block == NULL) {
  119. printk(KERN_ERR "ctxfi: No virtual memory block that is big "
  120. "enough to allocate!\n");
  121. return NULL;
  122. }
  123. ptp = vm->ptp[0];
  124. pte_start = (block->addr >> CT_PAGE_SHIFT);
  125. pages = block->size >> CT_PAGE_SHIFT;
  126. for (i = 0; i < pages; i++) {
  127. unsigned long addr;
  128. addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
  129. ptp[pte_start + i] = addr;
  130. }
  131. block->size = size;
  132. return block;
  133. }
  134. static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
  135. {
  136. /* do unmapping */
  137. put_vm_block(vm, block);
  138. }
  139. /* *
  140. * return the host (kmalloced) addr of the @index-th device
  141. * page talbe page on success, or NULL on failure.
  142. * The first returned NULL indicates the termination.
  143. * */
  144. static void *
  145. ct_get_ptp_virt(struct ct_vm *vm, int index)
  146. {
  147. void *addr;
  148. addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index];
  149. return addr;
  150. }
  151. int ct_vm_create(struct ct_vm **rvm)
  152. {
  153. struct ct_vm *vm;
  154. struct ct_vm_block *block;
  155. int i;
  156. *rvm = NULL;
  157. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  158. if (!vm)
  159. return -ENOMEM;
  160. mutex_init(&vm->lock);
  161. /* Allocate page table pages */
  162. for (i = 0; i < CT_PTP_NUM; i++) {
  163. vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
  164. if (!vm->ptp[i])
  165. break;
  166. }
  167. if (!i) {
  168. /* no page table pages are allocated */
  169. kfree(vm);
  170. return -ENOMEM;
  171. }
  172. vm->size = CT_ADDRS_PER_PAGE * i;
  173. /* Initialise remaining ptps */
  174. for (; i < CT_PTP_NUM; i++)
  175. vm->ptp[i] = NULL;
  176. vm->map = ct_vm_map;
  177. vm->unmap = ct_vm_unmap;
  178. vm->get_ptp_virt = ct_get_ptp_virt;
  179. INIT_LIST_HEAD(&vm->unused);
  180. INIT_LIST_HEAD(&vm->used);
  181. block = kzalloc(sizeof(*block), GFP_KERNEL);
  182. if (NULL != block) {
  183. block->addr = 0;
  184. block->size = vm->size;
  185. list_add(&block->list, &vm->unused);
  186. }
  187. *rvm = vm;
  188. return 0;
  189. }
  190. /* The caller must ensure no mapping pages are being used
  191. * by hardware before calling this function */
  192. void ct_vm_destroy(struct ct_vm *vm)
  193. {
  194. int i;
  195. struct list_head *pos;
  196. struct ct_vm_block *entry;
  197. /* free used and unused list nodes */
  198. while (!list_empty(&vm->used)) {
  199. pos = vm->used.next;
  200. list_del(pos);
  201. entry = list_entry(pos, struct ct_vm_block, list);
  202. kfree(entry);
  203. }
  204. while (!list_empty(&vm->unused)) {
  205. pos = vm->unused.next;
  206. list_del(pos);
  207. entry = list_entry(pos, struct ct_vm_block, list);
  208. kfree(entry);
  209. }
  210. /* free allocated page table pages */
  211. for (i = 0; i < CT_PTP_NUM; i++)
  212. kfree(vm->ptp[i]);
  213. vm->size = 0;
  214. kfree(vm);
  215. }