iova.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This file is released under the GPLv2.
  5. *
  6. * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  7. */
  8. #include "iova.h"
  9. void
  10. init_iova_domain(struct iova_domain *iovad)
  11. {
  12. spin_lock_init(&iovad->iova_alloc_lock);
  13. spin_lock_init(&iovad->iova_rbtree_lock);
  14. iovad->rbroot = RB_ROOT;
  15. iovad->cached32_node = NULL;
  16. }
  17. static struct rb_node *
  18. __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
  19. {
  20. if ((*limit_pfn != DMA_32BIT_PFN) ||
  21. (iovad->cached32_node == NULL))
  22. return rb_last(&iovad->rbroot);
  23. else {
  24. struct rb_node *prev_node = rb_prev(iovad->cached32_node);
  25. struct iova *curr_iova =
  26. container_of(iovad->cached32_node, struct iova, node);
  27. *limit_pfn = curr_iova->pfn_lo - 1;
  28. return prev_node;
  29. }
  30. }
  31. static void
  32. __cached_rbnode_insert_update(struct iova_domain *iovad,
  33. unsigned long limit_pfn, struct iova *new)
  34. {
  35. if (limit_pfn != DMA_32BIT_PFN)
  36. return;
  37. iovad->cached32_node = &new->node;
  38. }
  39. static void
  40. __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  41. {
  42. struct iova *cached_iova;
  43. struct rb_node *curr;
  44. if (!iovad->cached32_node)
  45. return;
  46. curr = iovad->cached32_node;
  47. cached_iova = container_of(curr, struct iova, node);
  48. if (free->pfn_lo >= cached_iova->pfn_lo)
  49. iovad->cached32_node = rb_next(&free->node);
  50. }
  51. /* Computes the padding size required, to make the
  52. * the start address naturally aligned on its size
  53. */
  54. static int
  55. iova_get_pad_size(int size, unsigned int limit_pfn)
  56. {
  57. unsigned int pad_size = 0;
  58. unsigned int order = ilog2(size);
  59. if (order)
  60. pad_size = (limit_pfn + 1) % (1 << order);
  61. return pad_size;
  62. }
  63. static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
  64. unsigned long limit_pfn, struct iova *new, bool size_aligned)
  65. {
  66. struct rb_node *curr = NULL;
  67. unsigned long flags;
  68. unsigned long saved_pfn;
  69. unsigned int pad_size = 0;
  70. /* Walk the tree backwards */
  71. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  72. saved_pfn = limit_pfn;
  73. curr = __get_cached_rbnode(iovad, &limit_pfn);
  74. while (curr) {
  75. struct iova *curr_iova = container_of(curr, struct iova, node);
  76. if (limit_pfn < curr_iova->pfn_lo)
  77. goto move_left;
  78. else if (limit_pfn < curr_iova->pfn_hi)
  79. goto adjust_limit_pfn;
  80. else {
  81. if (size_aligned)
  82. pad_size = iova_get_pad_size(size, limit_pfn);
  83. if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
  84. break; /* found a free slot */
  85. }
  86. adjust_limit_pfn:
  87. limit_pfn = curr_iova->pfn_lo - 1;
  88. move_left:
  89. curr = rb_prev(curr);
  90. }
  91. if (!curr) {
  92. if (size_aligned)
  93. pad_size = iova_get_pad_size(size, limit_pfn);
  94. if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
  95. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  96. return -ENOMEM;
  97. }
  98. }
  99. /* pfn_lo will point to size aligned address if size_aligned is set */
  100. new->pfn_lo = limit_pfn - (size + pad_size) + 1;
  101. new->pfn_hi = new->pfn_lo + size - 1;
  102. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  103. return 0;
  104. }
  105. static void
  106. iova_insert_rbtree(struct rb_root *root, struct iova *iova)
  107. {
  108. struct rb_node **new = &(root->rb_node), *parent = NULL;
  109. /* Figure out where to put new node */
  110. while (*new) {
  111. struct iova *this = container_of(*new, struct iova, node);
  112. parent = *new;
  113. if (iova->pfn_lo < this->pfn_lo)
  114. new = &((*new)->rb_left);
  115. else if (iova->pfn_lo > this->pfn_lo)
  116. new = &((*new)->rb_right);
  117. else
  118. BUG(); /* this should not happen */
  119. }
  120. /* Add new node and rebalance tree. */
  121. rb_link_node(&iova->node, parent, new);
  122. rb_insert_color(&iova->node, root);
  123. }
  124. /**
  125. * alloc_iova - allocates an iova
  126. * @iovad - iova domain in question
  127. * @size - size of page frames to allocate
  128. * @limit_pfn - max limit address
  129. * @size_aligned - set if size_aligned address range is required
  130. * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
  131. * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
  132. * flag is set then the allocated address iova->pfn_lo will be naturally
  133. * aligned on roundup_power_of_two(size).
  134. */
  135. struct iova *
  136. alloc_iova(struct iova_domain *iovad, unsigned long size,
  137. unsigned long limit_pfn,
  138. bool size_aligned)
  139. {
  140. unsigned long flags;
  141. struct iova *new_iova;
  142. int ret;
  143. new_iova = alloc_iova_mem();
  144. if (!new_iova)
  145. return NULL;
  146. /* If size aligned is set then round the size to
  147. * to next power of two.
  148. */
  149. if (size_aligned)
  150. size = __roundup_pow_of_two(size);
  151. spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
  152. ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
  153. size_aligned);
  154. if (ret) {
  155. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  156. free_iova_mem(new_iova);
  157. return NULL;
  158. }
  159. /* Insert the new_iova into domain rbtree by holding writer lock */
  160. spin_lock(&iovad->iova_rbtree_lock);
  161. iova_insert_rbtree(&iovad->rbroot, new_iova);
  162. __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
  163. spin_unlock(&iovad->iova_rbtree_lock);
  164. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  165. return new_iova;
  166. }
  167. /**
  168. * find_iova - find's an iova for a given pfn
  169. * @iovad - iova domain in question.
  170. * pfn - page frame number
  171. * This function finds and returns an iova belonging to the
  172. * given doamin which matches the given pfn.
  173. */
  174. struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
  175. {
  176. unsigned long flags;
  177. struct rb_node *node;
  178. /* Take the lock so that no other thread is manipulating the rbtree */
  179. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  180. node = iovad->rbroot.rb_node;
  181. while (node) {
  182. struct iova *iova = container_of(node, struct iova, node);
  183. /* If pfn falls within iova's range, return iova */
  184. if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
  185. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  186. /* We are not holding the lock while this iova
  187. * is referenced by the caller as the same thread
  188. * which called this function also calls __free_iova()
  189. * and it is by desing that only one thread can possibly
  190. * reference a particular iova and hence no conflict.
  191. */
  192. return iova;
  193. }
  194. if (pfn < iova->pfn_lo)
  195. node = node->rb_left;
  196. else if (pfn > iova->pfn_lo)
  197. node = node->rb_right;
  198. }
  199. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  200. return NULL;
  201. }
  202. /**
  203. * __free_iova - frees the given iova
  204. * @iovad: iova domain in question.
  205. * @iova: iova in question.
  206. * Frees the given iova belonging to the giving domain
  207. */
  208. void
  209. __free_iova(struct iova_domain *iovad, struct iova *iova)
  210. {
  211. unsigned long flags;
  212. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  213. __cached_rbnode_delete_update(iovad, iova);
  214. rb_erase(&iova->node, &iovad->rbroot);
  215. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  216. free_iova_mem(iova);
  217. }
  218. /**
  219. * free_iova - finds and frees the iova for a given pfn
  220. * @iovad: - iova domain in question.
  221. * @pfn: - pfn that is allocated previously
  222. * This functions finds an iova for a given pfn and then
  223. * frees the iova from that domain.
  224. */
  225. void
  226. free_iova(struct iova_domain *iovad, unsigned long pfn)
  227. {
  228. struct iova *iova = find_iova(iovad, pfn);
  229. if (iova)
  230. __free_iova(iovad, iova);
  231. }
  232. /**
  233. * put_iova_domain - destroys the iova doamin
  234. * @iovad: - iova domain in question.
  235. * All the iova's in that domain are destroyed.
  236. */
  237. void put_iova_domain(struct iova_domain *iovad)
  238. {
  239. struct rb_node *node;
  240. unsigned long flags;
  241. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  242. node = rb_first(&iovad->rbroot);
  243. while (node) {
  244. struct iova *iova = container_of(node, struct iova, node);
  245. rb_erase(node, &iovad->rbroot);
  246. free_iova_mem(iova);
  247. node = rb_first(&iovad->rbroot);
  248. }
  249. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  250. }
  251. static int
  252. __is_range_overlap(struct rb_node *node,
  253. unsigned long pfn_lo, unsigned long pfn_hi)
  254. {
  255. struct iova *iova = container_of(node, struct iova, node);
  256. if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
  257. return 1;
  258. return 0;
  259. }
  260. static struct iova *
  261. __insert_new_range(struct iova_domain *iovad,
  262. unsigned long pfn_lo, unsigned long pfn_hi)
  263. {
  264. struct iova *iova;
  265. iova = alloc_iova_mem();
  266. if (!iova)
  267. return iova;
  268. iova->pfn_hi = pfn_hi;
  269. iova->pfn_lo = pfn_lo;
  270. iova_insert_rbtree(&iovad->rbroot, iova);
  271. return iova;
  272. }
  273. static void
  274. __adjust_overlap_range(struct iova *iova,
  275. unsigned long *pfn_lo, unsigned long *pfn_hi)
  276. {
  277. if (*pfn_lo < iova->pfn_lo)
  278. iova->pfn_lo = *pfn_lo;
  279. if (*pfn_hi > iova->pfn_hi)
  280. *pfn_lo = iova->pfn_hi + 1;
  281. }
  282. /**
  283. * reserve_iova - reserves an iova in the given range
  284. * @iovad: - iova domain pointer
  285. * @pfn_lo: - lower page frame address
  286. * @pfn_hi:- higher pfn adderss
  287. * This function allocates reserves the address range from pfn_lo to pfn_hi so
  288. * that this address is not dished out as part of alloc_iova.
  289. */
  290. struct iova *
  291. reserve_iova(struct iova_domain *iovad,
  292. unsigned long pfn_lo, unsigned long pfn_hi)
  293. {
  294. struct rb_node *node;
  295. unsigned long flags;
  296. struct iova *iova;
  297. unsigned int overlap = 0;
  298. spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
  299. spin_lock(&iovad->iova_rbtree_lock);
  300. for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
  301. if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
  302. iova = container_of(node, struct iova, node);
  303. __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
  304. if ((pfn_lo >= iova->pfn_lo) &&
  305. (pfn_hi <= iova->pfn_hi))
  306. goto finish;
  307. overlap = 1;
  308. } else if (overlap)
  309. break;
  310. }
  311. /* We are here either becasue this is the first reserver node
  312. * or need to insert remaining non overlap addr range
  313. */
  314. iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
  315. finish:
  316. spin_unlock(&iovad->iova_rbtree_lock);
  317. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  318. return iova;
  319. }
  320. /**
  321. * copy_reserved_iova - copies the reserved between domains
  322. * @from: - source doamin from where to copy
  323. * @to: - destination domin where to copy
  324. * This function copies reserved iova's from one doamin to
  325. * other.
  326. */
  327. void
  328. copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
  329. {
  330. unsigned long flags;
  331. struct rb_node *node;
  332. spin_lock_irqsave(&from->iova_alloc_lock, flags);
  333. spin_lock(&from->iova_rbtree_lock);
  334. for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
  335. struct iova *iova = container_of(node, struct iova, node);
  336. struct iova *new_iova;
  337. new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
  338. if (!new_iova)
  339. printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
  340. iova->pfn_lo, iova->pfn_lo);
  341. }
  342. spin_unlock(&from->iova_rbtree_lock);
  343. spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
  344. }