vmregion.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. #include <linux/spinlock.h>
  2. #include <linux/list.h>
  3. #include <linux/slab.h>
  4. #include "vmregion.h"
  5. /*
  6. * VM region handling support.
  7. *
  8. * This should become something generic, handling VM region allocations for
  9. * vmalloc and similar (ioremap, module space, etc).
  10. *
  11. * I envisage vmalloc()'s supporting vm_struct becoming:
  12. *
  13. * struct vm_struct {
  14. * struct vmregion region;
  15. * unsigned long flags;
  16. * struct page **pages;
  17. * unsigned int nr_pages;
  18. * unsigned long phys_addr;
  19. * };
  20. *
  21. * get_vm_area() would then call vmregion_alloc with an appropriate
  22. * struct vmregion head (eg):
  23. *
  24. * struct vmregion vmalloc_head = {
  25. * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
  26. * .vm_start = VMALLOC_START,
  27. * .vm_end = VMALLOC_END,
  28. * };
  29. *
  30. * However, vmalloc_head.vm_start is variable (typically, it is dependent on
  31. * the amount of RAM found at boot time.) I would imagine that get_vm_area()
  32. * would have to initialise this each time prior to calling vmregion_alloc().
  33. */
  34. struct arm_vmregion *
  35. arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
  36. size_t size, gfp_t gfp)
  37. {
  38. unsigned long addr = head->vm_start, end = head->vm_end - size;
  39. unsigned long flags;
  40. struct arm_vmregion *c, *new;
  41. if (head->vm_end - head->vm_start < size) {
  42. printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
  43. __func__, size);
  44. goto out;
  45. }
  46. new = kmalloc(sizeof(struct arm_vmregion), gfp);
  47. if (!new)
  48. goto out;
  49. spin_lock_irqsave(&head->vm_lock, flags);
  50. list_for_each_entry(c, &head->vm_list, vm_list) {
  51. if ((addr + size) < addr)
  52. goto nospc;
  53. if ((addr + size) <= c->vm_start)
  54. goto found;
  55. addr = ALIGN(c->vm_end, align);
  56. if (addr > end)
  57. goto nospc;
  58. }
  59. found:
  60. /*
  61. * Insert this entry _before_ the one we found.
  62. */
  63. list_add_tail(&new->vm_list, &c->vm_list);
  64. new->vm_start = addr;
  65. new->vm_end = addr + size;
  66. new->vm_active = 1;
  67. spin_unlock_irqrestore(&head->vm_lock, flags);
  68. return new;
  69. nospc:
  70. spin_unlock_irqrestore(&head->vm_lock, flags);
  71. kfree(new);
  72. out:
  73. return NULL;
  74. }
  75. static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
  76. {
  77. struct arm_vmregion *c;
  78. list_for_each_entry(c, &head->vm_list, vm_list) {
  79. if (c->vm_active && c->vm_start == addr)
  80. goto out;
  81. }
  82. c = NULL;
  83. out:
  84. return c;
  85. }
  86. struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
  87. {
  88. struct arm_vmregion *c;
  89. unsigned long flags;
  90. spin_lock_irqsave(&head->vm_lock, flags);
  91. c = __arm_vmregion_find(head, addr);
  92. spin_unlock_irqrestore(&head->vm_lock, flags);
  93. return c;
  94. }
  95. struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
  96. {
  97. struct arm_vmregion *c;
  98. unsigned long flags;
  99. spin_lock_irqsave(&head->vm_lock, flags);
  100. c = __arm_vmregion_find(head, addr);
  101. if (c)
  102. c->vm_active = 0;
  103. spin_unlock_irqrestore(&head->vm_lock, flags);
  104. return c;
  105. }
  106. void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
  107. {
  108. unsigned long flags;
  109. spin_lock_irqsave(&head->vm_lock, flags);
  110. list_del(&c->vm_list);
  111. spin_unlock_irqrestore(&head->vm_lock, flags);
  112. kfree(c);
  113. }