imalloc.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * c 2001 PPC 64 Team, IBM Corp
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/vmalloc.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/pgtable.h>
  14. #include <linux/mutex.h>
  15. #include <asm/cacheflush.h>
  16. #include "mmu_decl.h"
  17. static DEFINE_MUTEX(imlist_mutex);
  18. struct vm_struct * imlist = NULL;
  19. static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
  20. {
  21. unsigned long addr;
  22. struct vm_struct **p, *tmp;
  23. addr = ioremap_bot;
  24. for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
  25. if (size + addr < (unsigned long) tmp->addr)
  26. break;
  27. if ((unsigned long)tmp->addr >= ioremap_bot)
  28. addr = tmp->size + (unsigned long) tmp->addr;
  29. if (addr >= IMALLOC_END-size)
  30. return 1;
  31. }
  32. *im_addr = addr;
  33. return 0;
  34. }
  35. /* Return whether the region described by v_addr and size is a subset
  36. * of the region described by parent
  37. */
  38. static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
  39. struct vm_struct *parent)
  40. {
  41. return (int) (v_addr >= (unsigned long) parent->addr &&
  42. v_addr < (unsigned long) parent->addr + parent->size &&
  43. size < parent->size);
  44. }
  45. /* Return whether the region described by v_addr and size is a superset
  46. * of the region described by child
  47. */
  48. static int im_region_is_superset(unsigned long v_addr, unsigned long size,
  49. struct vm_struct *child)
  50. {
  51. struct vm_struct parent;
  52. parent.addr = (void *) v_addr;
  53. parent.size = size;
  54. return im_region_is_subset((unsigned long) child->addr, child->size,
  55. &parent);
  56. }
  57. /* Return whether the region described by v_addr and size overlaps
  58. * the region described by vm. Overlapping regions meet the
  59. * following conditions:
  60. * 1) The regions share some part of the address space
  61. * 2) The regions aren't identical
  62. * 3) Neither region is a subset of the other
  63. */
  64. static int im_region_overlaps(unsigned long v_addr, unsigned long size,
  65. struct vm_struct *vm)
  66. {
  67. if (im_region_is_superset(v_addr, size, vm))
  68. return 0;
  69. return (v_addr + size > (unsigned long) vm->addr + vm->size &&
  70. v_addr < (unsigned long) vm->addr + vm->size) ||
  71. (v_addr < (unsigned long) vm->addr &&
  72. v_addr + size > (unsigned long) vm->addr);
  73. }
  74. /* Determine imalloc status of region described by v_addr and size.
  75. * Can return one of the following:
  76. * IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
  77. * IM_REGION_SUBSET - Region is a subset of a region that is already
  78. * allocated in imalloc space.
  79. * vm will be assigned to a ptr to the parent region.
  80. * IM_REGION_EXISTS - Exact region already allocated in imalloc space.
  81. * vm will be assigned to a ptr to the existing imlist
  82. * member.
  83. * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
  84. * IM_REGION_SUPERSET - Region is a superset of a region that is already
  85. * allocated in imalloc space.
  86. */
  87. static int im_region_status(unsigned long v_addr, unsigned long size,
  88. struct vm_struct **vm)
  89. {
  90. struct vm_struct *tmp;
  91. for (tmp = imlist; tmp; tmp = tmp->next)
  92. if (v_addr < (unsigned long) tmp->addr + tmp->size)
  93. break;
  94. *vm = NULL;
  95. if (tmp) {
  96. if (im_region_overlaps(v_addr, size, tmp))
  97. return IM_REGION_OVERLAP;
  98. *vm = tmp;
  99. if (im_region_is_subset(v_addr, size, tmp)) {
  100. /* Return with tmp pointing to superset */
  101. return IM_REGION_SUBSET;
  102. }
  103. if (im_region_is_superset(v_addr, size, tmp)) {
  104. /* Return with tmp pointing to first subset */
  105. return IM_REGION_SUPERSET;
  106. }
  107. else if (v_addr == (unsigned long) tmp->addr &&
  108. size == tmp->size) {
  109. /* Return with tmp pointing to exact region */
  110. return IM_REGION_EXISTS;
  111. }
  112. }
  113. return IM_REGION_UNUSED;
  114. }
  115. static struct vm_struct * split_im_region(unsigned long v_addr,
  116. unsigned long size, struct vm_struct *parent)
  117. {
  118. struct vm_struct *vm1 = NULL;
  119. struct vm_struct *vm2 = NULL;
  120. struct vm_struct *new_vm = NULL;
  121. vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL);
  122. if (vm1 == NULL) {
  123. printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
  124. return NULL;
  125. }
  126. if (v_addr == (unsigned long) parent->addr) {
  127. /* Use existing parent vm_struct to represent child, allocate
  128. * new one for the remainder of parent range
  129. */
  130. vm1->size = parent->size - size;
  131. vm1->addr = (void *) (v_addr + size);
  132. vm1->next = parent->next;
  133. parent->size = size;
  134. parent->next = vm1;
  135. new_vm = parent;
  136. } else if (v_addr + size == (unsigned long) parent->addr +
  137. parent->size) {
  138. /* Allocate new vm_struct to represent child, use existing
  139. * parent one for remainder of parent range
  140. */
  141. vm1->size = size;
  142. vm1->addr = (void *) v_addr;
  143. vm1->next = parent->next;
  144. new_vm = vm1;
  145. parent->size -= size;
  146. parent->next = vm1;
  147. } else {
  148. /* Allocate two new vm_structs for the new child and
  149. * uppermost remainder, and use existing parent one for the
  150. * lower remainder of parent range
  151. */
  152. vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL);
  153. if (vm2 == NULL) {
  154. printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
  155. kfree(vm1);
  156. return NULL;
  157. }
  158. vm1->size = size;
  159. vm1->addr = (void *) v_addr;
  160. vm1->next = vm2;
  161. new_vm = vm1;
  162. vm2->size = ((unsigned long) parent->addr + parent->size) -
  163. (v_addr + size);
  164. vm2->addr = (void *) v_addr + size;
  165. vm2->next = parent->next;
  166. parent->size = v_addr - (unsigned long) parent->addr;
  167. parent->next = vm1;
  168. }
  169. return new_vm;
  170. }
  171. static struct vm_struct * __add_new_im_area(unsigned long req_addr,
  172. unsigned long size)
  173. {
  174. struct vm_struct **p, *tmp, *area;
  175. for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
  176. if (req_addr + size <= (unsigned long)tmp->addr)
  177. break;
  178. }
  179. area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
  180. if (!area)
  181. return NULL;
  182. area->flags = 0;
  183. area->addr = (void *)req_addr;
  184. area->size = size;
  185. area->next = *p;
  186. *p = area;
  187. return area;
  188. }
  189. static struct vm_struct * __im_get_area(unsigned long req_addr,
  190. unsigned long size,
  191. int criteria)
  192. {
  193. struct vm_struct *tmp;
  194. int status;
  195. status = im_region_status(req_addr, size, &tmp);
  196. if ((criteria & status) == 0) {
  197. return NULL;
  198. }
  199. switch (status) {
  200. case IM_REGION_UNUSED:
  201. tmp = __add_new_im_area(req_addr, size);
  202. break;
  203. case IM_REGION_SUBSET:
  204. tmp = split_im_region(req_addr, size, tmp);
  205. break;
  206. case IM_REGION_EXISTS:
  207. /* Return requested region */
  208. break;
  209. case IM_REGION_SUPERSET:
  210. /* Return first existing subset of requested region */
  211. break;
  212. default:
  213. printk(KERN_ERR "%s() unexpected imalloc region status\n",
  214. __FUNCTION__);
  215. tmp = NULL;
  216. }
  217. return tmp;
  218. }
  219. struct vm_struct * im_get_free_area(unsigned long size)
  220. {
  221. struct vm_struct *area;
  222. unsigned long addr;
  223. mutex_lock(&imlist_mutex);
  224. if (get_free_im_addr(size, &addr)) {
  225. printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
  226. __FUNCTION__, size);
  227. area = NULL;
  228. goto next_im_done;
  229. }
  230. area = __im_get_area(addr, size, IM_REGION_UNUSED);
  231. if (area == NULL) {
  232. printk(KERN_ERR
  233. "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
  234. __FUNCTION__, addr, size);
  235. }
  236. next_im_done:
  237. mutex_unlock(&imlist_mutex);
  238. return area;
  239. }
  240. struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
  241. int criteria)
  242. {
  243. struct vm_struct *area;
  244. mutex_lock(&imlist_mutex);
  245. area = __im_get_area(v_addr, size, criteria);
  246. mutex_unlock(&imlist_mutex);
  247. return area;
  248. }
  249. void im_free(void * addr)
  250. {
  251. struct vm_struct **p, *tmp;
  252. if (!addr)
  253. return;
  254. if ((unsigned long) addr & ~PAGE_MASK) {
  255. printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
  256. return;
  257. }
  258. mutex_lock(&imlist_mutex);
  259. for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
  260. if (tmp->addr == addr) {
  261. *p = tmp->next;
  262. unmap_vm_area(tmp);
  263. kfree(tmp);
  264. mutex_unlock(&imlist_mutex);
  265. return;
  266. }
  267. }
  268. mutex_unlock(&imlist_mutex);
  269. printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
  270. addr);
  271. }