imalloc.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * c 2001 PPC 64 Team, IBM Corp
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/vmalloc.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/semaphore.h>
  15. #include <asm/imalloc.h>
  16. static DECLARE_MUTEX(imlist_sem);
  17. struct vm_struct * imlist = NULL;
  18. static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
  19. {
  20. unsigned long addr;
  21. struct vm_struct **p, *tmp;
  22. addr = ioremap_bot;
  23. for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
  24. if (size + addr < (unsigned long) tmp->addr)
  25. break;
  26. if ((unsigned long)tmp->addr >= ioremap_bot)
  27. addr = tmp->size + (unsigned long) tmp->addr;
  28. if (addr > IMALLOC_END-size)
  29. return 1;
  30. }
  31. *im_addr = addr;
  32. return 0;
  33. }
  34. /* Return whether the region described by v_addr and size is a subset
  35. * of the region described by parent
  36. */
  37. static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
  38. struct vm_struct *parent)
  39. {
  40. return (int) (v_addr >= (unsigned long) parent->addr &&
  41. v_addr < (unsigned long) parent->addr + parent->size &&
  42. size < parent->size);
  43. }
  44. /* Return whether the region described by v_addr and size is a superset
  45. * of the region described by child
  46. */
  47. static int im_region_is_superset(unsigned long v_addr, unsigned long size,
  48. struct vm_struct *child)
  49. {
  50. struct vm_struct parent;
  51. parent.addr = (void *) v_addr;
  52. parent.size = size;
  53. return im_region_is_subset((unsigned long) child->addr, child->size,
  54. &parent);
  55. }
  56. /* Return whether the region described by v_addr and size overlaps
  57. * the region described by vm. Overlapping regions meet the
  58. * following conditions:
  59. * 1) The regions share some part of the address space
  60. * 2) The regions aren't identical
  61. * 3) Neither region is a subset of the other
  62. */
  63. static int im_region_overlaps(unsigned long v_addr, unsigned long size,
  64. struct vm_struct *vm)
  65. {
  66. if (im_region_is_superset(v_addr, size, vm))
  67. return 0;
  68. return (v_addr + size > (unsigned long) vm->addr + vm->size &&
  69. v_addr < (unsigned long) vm->addr + vm->size) ||
  70. (v_addr < (unsigned long) vm->addr &&
  71. v_addr + size > (unsigned long) vm->addr);
  72. }
  73. /* Determine imalloc status of region described by v_addr and size.
  74. * Can return one of the following:
  75. * IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
  76. * IM_REGION_SUBSET - Region is a subset of a region that is already
  77. * allocated in imalloc space.
  78. * vm will be assigned to a ptr to the parent region.
  79. * IM_REGION_EXISTS - Exact region already allocated in imalloc space.
  80. * vm will be assigned to a ptr to the existing imlist
  81. * member.
  82. * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
  83. * IM_REGION_SUPERSET - Region is a superset of a region that is already
  84. * allocated in imalloc space.
  85. */
  86. static int im_region_status(unsigned long v_addr, unsigned long size,
  87. struct vm_struct **vm)
  88. {
  89. struct vm_struct *tmp;
  90. for (tmp = imlist; tmp; tmp = tmp->next)
  91. if (v_addr < (unsigned long) tmp->addr + tmp->size)
  92. break;
  93. if (tmp) {
  94. if (im_region_overlaps(v_addr, size, tmp))
  95. return IM_REGION_OVERLAP;
  96. *vm = tmp;
  97. if (im_region_is_subset(v_addr, size, tmp)) {
  98. /* Return with tmp pointing to superset */
  99. return IM_REGION_SUBSET;
  100. }
  101. if (im_region_is_superset(v_addr, size, tmp)) {
  102. /* Return with tmp pointing to first subset */
  103. return IM_REGION_SUPERSET;
  104. }
  105. else if (v_addr == (unsigned long) tmp->addr &&
  106. size == tmp->size) {
  107. /* Return with tmp pointing to exact region */
  108. return IM_REGION_EXISTS;
  109. }
  110. }
  111. *vm = NULL;
  112. return IM_REGION_UNUSED;
  113. }
  114. static struct vm_struct * split_im_region(unsigned long v_addr,
  115. unsigned long size, struct vm_struct *parent)
  116. {
  117. struct vm_struct *vm1 = NULL;
  118. struct vm_struct *vm2 = NULL;
  119. struct vm_struct *new_vm = NULL;
  120. vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL);
  121. if (vm1 == NULL) {
  122. printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
  123. return NULL;
  124. }
  125. if (v_addr == (unsigned long) parent->addr) {
  126. /* Use existing parent vm_struct to represent child, allocate
  127. * new one for the remainder of parent range
  128. */
  129. vm1->size = parent->size - size;
  130. vm1->addr = (void *) (v_addr + size);
  131. vm1->next = parent->next;
  132. parent->size = size;
  133. parent->next = vm1;
  134. new_vm = parent;
  135. } else if (v_addr + size == (unsigned long) parent->addr +
  136. parent->size) {
  137. /* Allocate new vm_struct to represent child, use existing
  138. * parent one for remainder of parent range
  139. */
  140. vm1->size = size;
  141. vm1->addr = (void *) v_addr;
  142. vm1->next = parent->next;
  143. new_vm = vm1;
  144. parent->size -= size;
  145. parent->next = vm1;
  146. } else {
  147. /* Allocate two new vm_structs for the new child and
  148. * uppermost remainder, and use existing parent one for the
  149. * lower remainder of parent range
  150. */
  151. vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL);
  152. if (vm2 == NULL) {
  153. printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
  154. kfree(vm1);
  155. return NULL;
  156. }
  157. vm1->size = size;
  158. vm1->addr = (void *) v_addr;
  159. vm1->next = vm2;
  160. new_vm = vm1;
  161. vm2->size = ((unsigned long) parent->addr + parent->size) -
  162. (v_addr + size);
  163. vm2->addr = (void *) v_addr + size;
  164. vm2->next = parent->next;
  165. parent->size = v_addr - (unsigned long) parent->addr;
  166. parent->next = vm1;
  167. }
  168. return new_vm;
  169. }
  170. static struct vm_struct * __add_new_im_area(unsigned long req_addr,
  171. unsigned long size)
  172. {
  173. struct vm_struct **p, *tmp, *area;
  174. for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
  175. if (req_addr + size <= (unsigned long)tmp->addr)
  176. break;
  177. }
  178. area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
  179. if (!area)
  180. return NULL;
  181. area->flags = 0;
  182. area->addr = (void *)req_addr;
  183. area->size = size;
  184. area->next = *p;
  185. *p = area;
  186. return area;
  187. }
  188. static struct vm_struct * __im_get_area(unsigned long req_addr,
  189. unsigned long size,
  190. int criteria)
  191. {
  192. struct vm_struct *tmp;
  193. int status;
  194. status = im_region_status(req_addr, size, &tmp);
  195. if ((criteria & status) == 0) {
  196. return NULL;
  197. }
  198. switch (status) {
  199. case IM_REGION_UNUSED:
  200. tmp = __add_new_im_area(req_addr, size);
  201. break;
  202. case IM_REGION_SUBSET:
  203. tmp = split_im_region(req_addr, size, tmp);
  204. break;
  205. case IM_REGION_EXISTS:
  206. /* Return requested region */
  207. break;
  208. case IM_REGION_SUPERSET:
  209. /* Return first existing subset of requested region */
  210. break;
  211. default:
  212. printk(KERN_ERR "%s() unexpected imalloc region status\n",
  213. __FUNCTION__);
  214. tmp = NULL;
  215. }
  216. return tmp;
  217. }
  218. struct vm_struct * im_get_free_area(unsigned long size)
  219. {
  220. struct vm_struct *area;
  221. unsigned long addr;
  222. down(&imlist_sem);
  223. if (get_free_im_addr(size, &addr)) {
  224. printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
  225. __FUNCTION__, size);
  226. area = NULL;
  227. goto next_im_done;
  228. }
  229. area = __im_get_area(addr, size, IM_REGION_UNUSED);
  230. if (area == NULL) {
  231. printk(KERN_ERR
  232. "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
  233. __FUNCTION__, addr, size);
  234. }
  235. next_im_done:
  236. up(&imlist_sem);
  237. return area;
  238. }
  239. struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
  240. int criteria)
  241. {
  242. struct vm_struct *area;
  243. down(&imlist_sem);
  244. area = __im_get_area(v_addr, size, criteria);
  245. up(&imlist_sem);
  246. return area;
  247. }
  248. unsigned long im_free(void * addr)
  249. {
  250. struct vm_struct **p, *tmp;
  251. unsigned long ret_size = 0;
  252. if (!addr)
  253. return ret_size;
  254. if ((PAGE_SIZE-1) & (unsigned long) addr) {
  255. printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
  256. return ret_size;
  257. }
  258. down(&imlist_sem);
  259. for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
  260. if (tmp->addr == addr) {
  261. ret_size = tmp->size;
  262. *p = tmp->next;
  263. kfree(tmp);
  264. up(&imlist_sem);
  265. return ret_size;
  266. }
  267. }
  268. up(&imlist_sem);
  269. printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
  270. addr);
  271. return ret_size;
  272. }