iova.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This file is released under the GPLv2.
  5. *
  6. * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  7. */
  8. #include "iova.h"
  9. void
  10. init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
  11. {
  12. spin_lock_init(&iovad->iova_alloc_lock);
  13. spin_lock_init(&iovad->iova_rbtree_lock);
  14. iovad->rbroot = RB_ROOT;
  15. iovad->cached32_node = NULL;
  16. iovad->dma_32bit_pfn = pfn_32bit;
  17. }
  18. static struct rb_node *
  19. __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
  20. {
  21. if ((*limit_pfn != iovad->dma_32bit_pfn) ||
  22. (iovad->cached32_node == NULL))
  23. return rb_last(&iovad->rbroot);
  24. else {
  25. struct rb_node *prev_node = rb_prev(iovad->cached32_node);
  26. struct iova *curr_iova =
  27. container_of(iovad->cached32_node, struct iova, node);
  28. *limit_pfn = curr_iova->pfn_lo - 1;
  29. return prev_node;
  30. }
  31. }
  32. static void
  33. __cached_rbnode_insert_update(struct iova_domain *iovad,
  34. unsigned long limit_pfn, struct iova *new)
  35. {
  36. if (limit_pfn != iovad->dma_32bit_pfn)
  37. return;
  38. iovad->cached32_node = &new->node;
  39. }
  40. static void
  41. __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  42. {
  43. struct iova *cached_iova;
  44. struct rb_node *curr;
  45. if (!iovad->cached32_node)
  46. return;
  47. curr = iovad->cached32_node;
  48. cached_iova = container_of(curr, struct iova, node);
  49. if (free->pfn_lo >= cached_iova->pfn_lo)
  50. iovad->cached32_node = rb_next(&free->node);
  51. }
  52. /* Computes the padding size required, to make the
  53. * the start address naturally aligned on its size
  54. */
  55. static int
  56. iova_get_pad_size(int size, unsigned int limit_pfn)
  57. {
  58. unsigned int pad_size = 0;
  59. unsigned int order = ilog2(size);
  60. if (order)
  61. pad_size = (limit_pfn + 1) % (1 << order);
  62. return pad_size;
  63. }
  64. static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
  65. unsigned long limit_pfn, struct iova *new, bool size_aligned)
  66. {
  67. struct rb_node *curr = NULL;
  68. unsigned long flags;
  69. unsigned long saved_pfn;
  70. unsigned int pad_size = 0;
  71. /* Walk the tree backwards */
  72. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  73. saved_pfn = limit_pfn;
  74. curr = __get_cached_rbnode(iovad, &limit_pfn);
  75. while (curr) {
  76. struct iova *curr_iova = container_of(curr, struct iova, node);
  77. if (limit_pfn < curr_iova->pfn_lo)
  78. goto move_left;
  79. else if (limit_pfn < curr_iova->pfn_hi)
  80. goto adjust_limit_pfn;
  81. else {
  82. if (size_aligned)
  83. pad_size = iova_get_pad_size(size, limit_pfn);
  84. if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
  85. break; /* found a free slot */
  86. }
  87. adjust_limit_pfn:
  88. limit_pfn = curr_iova->pfn_lo - 1;
  89. move_left:
  90. curr = rb_prev(curr);
  91. }
  92. if (!curr) {
  93. if (size_aligned)
  94. pad_size = iova_get_pad_size(size, limit_pfn);
  95. if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
  96. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  97. return -ENOMEM;
  98. }
  99. }
  100. /* pfn_lo will point to size aligned address if size_aligned is set */
  101. new->pfn_lo = limit_pfn - (size + pad_size) + 1;
  102. new->pfn_hi = new->pfn_lo + size - 1;
  103. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  104. return 0;
  105. }
  106. static void
  107. iova_insert_rbtree(struct rb_root *root, struct iova *iova)
  108. {
  109. struct rb_node **new = &(root->rb_node), *parent = NULL;
  110. /* Figure out where to put new node */
  111. while (*new) {
  112. struct iova *this = container_of(*new, struct iova, node);
  113. parent = *new;
  114. if (iova->pfn_lo < this->pfn_lo)
  115. new = &((*new)->rb_left);
  116. else if (iova->pfn_lo > this->pfn_lo)
  117. new = &((*new)->rb_right);
  118. else
  119. BUG(); /* this should not happen */
  120. }
  121. /* Add new node and rebalance tree. */
  122. rb_link_node(&iova->node, parent, new);
  123. rb_insert_color(&iova->node, root);
  124. }
  125. /**
  126. * alloc_iova - allocates an iova
  127. * @iovad - iova domain in question
  128. * @size - size of page frames to allocate
  129. * @limit_pfn - max limit address
  130. * @size_aligned - set if size_aligned address range is required
  131. * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
  132. * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
  133. * flag is set then the allocated address iova->pfn_lo will be naturally
  134. * aligned on roundup_power_of_two(size).
  135. */
  136. struct iova *
  137. alloc_iova(struct iova_domain *iovad, unsigned long size,
  138. unsigned long limit_pfn,
  139. bool size_aligned)
  140. {
  141. unsigned long flags;
  142. struct iova *new_iova;
  143. int ret;
  144. new_iova = alloc_iova_mem();
  145. if (!new_iova)
  146. return NULL;
  147. /* If size aligned is set then round the size to
  148. * to next power of two.
  149. */
  150. if (size_aligned)
  151. size = __roundup_pow_of_two(size);
  152. spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
  153. ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
  154. size_aligned);
  155. if (ret) {
  156. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  157. free_iova_mem(new_iova);
  158. return NULL;
  159. }
  160. /* Insert the new_iova into domain rbtree by holding writer lock */
  161. spin_lock(&iovad->iova_rbtree_lock);
  162. iova_insert_rbtree(&iovad->rbroot, new_iova);
  163. __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
  164. spin_unlock(&iovad->iova_rbtree_lock);
  165. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  166. return new_iova;
  167. }
  168. /**
  169. * find_iova - find's an iova for a given pfn
  170. * @iovad - iova domain in question.
  171. * pfn - page frame number
  172. * This function finds and returns an iova belonging to the
  173. * given doamin which matches the given pfn.
  174. */
  175. struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
  176. {
  177. unsigned long flags;
  178. struct rb_node *node;
  179. /* Take the lock so that no other thread is manipulating the rbtree */
  180. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  181. node = iovad->rbroot.rb_node;
  182. while (node) {
  183. struct iova *iova = container_of(node, struct iova, node);
  184. /* If pfn falls within iova's range, return iova */
  185. if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
  186. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  187. /* We are not holding the lock while this iova
  188. * is referenced by the caller as the same thread
  189. * which called this function also calls __free_iova()
  190. * and it is by desing that only one thread can possibly
  191. * reference a particular iova and hence no conflict.
  192. */
  193. return iova;
  194. }
  195. if (pfn < iova->pfn_lo)
  196. node = node->rb_left;
  197. else if (pfn > iova->pfn_lo)
  198. node = node->rb_right;
  199. }
  200. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  201. return NULL;
  202. }
  203. /**
  204. * __free_iova - frees the given iova
  205. * @iovad: iova domain in question.
  206. * @iova: iova in question.
  207. * Frees the given iova belonging to the giving domain
  208. */
  209. void
  210. __free_iova(struct iova_domain *iovad, struct iova *iova)
  211. {
  212. unsigned long flags;
  213. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  214. __cached_rbnode_delete_update(iovad, iova);
  215. rb_erase(&iova->node, &iovad->rbroot);
  216. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  217. free_iova_mem(iova);
  218. }
  219. /**
  220. * free_iova - finds and frees the iova for a given pfn
  221. * @iovad: - iova domain in question.
  222. * @pfn: - pfn that is allocated previously
  223. * This functions finds an iova for a given pfn and then
  224. * frees the iova from that domain.
  225. */
  226. void
  227. free_iova(struct iova_domain *iovad, unsigned long pfn)
  228. {
  229. struct iova *iova = find_iova(iovad, pfn);
  230. if (iova)
  231. __free_iova(iovad, iova);
  232. }
  233. /**
  234. * put_iova_domain - destroys the iova doamin
  235. * @iovad: - iova domain in question.
  236. * All the iova's in that domain are destroyed.
  237. */
  238. void put_iova_domain(struct iova_domain *iovad)
  239. {
  240. struct rb_node *node;
  241. unsigned long flags;
  242. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  243. node = rb_first(&iovad->rbroot);
  244. while (node) {
  245. struct iova *iova = container_of(node, struct iova, node);
  246. rb_erase(node, &iovad->rbroot);
  247. free_iova_mem(iova);
  248. node = rb_first(&iovad->rbroot);
  249. }
  250. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  251. }
  252. static int
  253. __is_range_overlap(struct rb_node *node,
  254. unsigned long pfn_lo, unsigned long pfn_hi)
  255. {
  256. struct iova *iova = container_of(node, struct iova, node);
  257. if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
  258. return 1;
  259. return 0;
  260. }
  261. static struct iova *
  262. __insert_new_range(struct iova_domain *iovad,
  263. unsigned long pfn_lo, unsigned long pfn_hi)
  264. {
  265. struct iova *iova;
  266. iova = alloc_iova_mem();
  267. if (!iova)
  268. return iova;
  269. iova->pfn_hi = pfn_hi;
  270. iova->pfn_lo = pfn_lo;
  271. iova_insert_rbtree(&iovad->rbroot, iova);
  272. return iova;
  273. }
  274. static void
  275. __adjust_overlap_range(struct iova *iova,
  276. unsigned long *pfn_lo, unsigned long *pfn_hi)
  277. {
  278. if (*pfn_lo < iova->pfn_lo)
  279. iova->pfn_lo = *pfn_lo;
  280. if (*pfn_hi > iova->pfn_hi)
  281. *pfn_lo = iova->pfn_hi + 1;
  282. }
  283. /**
  284. * reserve_iova - reserves an iova in the given range
  285. * @iovad: - iova domain pointer
  286. * @pfn_lo: - lower page frame address
  287. * @pfn_hi:- higher pfn adderss
  288. * This function allocates reserves the address range from pfn_lo to pfn_hi so
  289. * that this address is not dished out as part of alloc_iova.
  290. */
  291. struct iova *
  292. reserve_iova(struct iova_domain *iovad,
  293. unsigned long pfn_lo, unsigned long pfn_hi)
  294. {
  295. struct rb_node *node;
  296. unsigned long flags;
  297. struct iova *iova;
  298. unsigned int overlap = 0;
  299. spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
  300. spin_lock(&iovad->iova_rbtree_lock);
  301. for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
  302. if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
  303. iova = container_of(node, struct iova, node);
  304. __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
  305. if ((pfn_lo >= iova->pfn_lo) &&
  306. (pfn_hi <= iova->pfn_hi))
  307. goto finish;
  308. overlap = 1;
  309. } else if (overlap)
  310. break;
  311. }
  312. /* We are here either becasue this is the first reserver node
  313. * or need to insert remaining non overlap addr range
  314. */
  315. iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
  316. finish:
  317. spin_unlock(&iovad->iova_rbtree_lock);
  318. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  319. return iova;
  320. }
  321. /**
  322. * copy_reserved_iova - copies the reserved between domains
  323. * @from: - source doamin from where to copy
  324. * @to: - destination domin where to copy
  325. * This function copies reserved iova's from one doamin to
  326. * other.
  327. */
  328. void
  329. copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
  330. {
  331. unsigned long flags;
  332. struct rb_node *node;
  333. spin_lock_irqsave(&from->iova_alloc_lock, flags);
  334. spin_lock(&from->iova_rbtree_lock);
  335. for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
  336. struct iova *iova = container_of(node, struct iova, node);
  337. struct iova *new_iova;
  338. new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
  339. if (!new_iova)
  340. printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
  341. iova->pfn_lo, iova->pfn_lo);
  342. }
  343. spin_unlock(&from->iova_rbtree_lock);
  344. spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
  345. }