iova.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This file is released under the GPLv2.
  5. *
  6. * Copyright (C) 2006-2008 Intel Corporation
  7. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. */
  9. #include "iova.h"
  10. void
  11. init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
  12. {
  13. spin_lock_init(&iovad->iova_alloc_lock);
  14. spin_lock_init(&iovad->iova_rbtree_lock);
  15. iovad->rbroot = RB_ROOT;
  16. iovad->cached32_node = NULL;
  17. iovad->dma_32bit_pfn = pfn_32bit;
  18. }
  19. static struct rb_node *
  20. __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
  21. {
  22. if ((*limit_pfn != iovad->dma_32bit_pfn) ||
  23. (iovad->cached32_node == NULL))
  24. return rb_last(&iovad->rbroot);
  25. else {
  26. struct rb_node *prev_node = rb_prev(iovad->cached32_node);
  27. struct iova *curr_iova =
  28. container_of(iovad->cached32_node, struct iova, node);
  29. *limit_pfn = curr_iova->pfn_lo - 1;
  30. return prev_node;
  31. }
  32. }
  33. static void
  34. __cached_rbnode_insert_update(struct iova_domain *iovad,
  35. unsigned long limit_pfn, struct iova *new)
  36. {
  37. if (limit_pfn != iovad->dma_32bit_pfn)
  38. return;
  39. iovad->cached32_node = &new->node;
  40. }
  41. static void
  42. __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  43. {
  44. struct iova *cached_iova;
  45. struct rb_node *curr;
  46. if (!iovad->cached32_node)
  47. return;
  48. curr = iovad->cached32_node;
  49. cached_iova = container_of(curr, struct iova, node);
  50. if (free->pfn_lo >= cached_iova->pfn_lo)
  51. iovad->cached32_node = rb_next(&free->node);
  52. }
  53. /* Computes the padding size required, to make the
  54. * the start address naturally aligned on its size
  55. */
  56. static int
  57. iova_get_pad_size(int size, unsigned int limit_pfn)
  58. {
  59. unsigned int pad_size = 0;
  60. unsigned int order = ilog2(size);
  61. if (order)
  62. pad_size = (limit_pfn + 1) % (1 << order);
  63. return pad_size;
  64. }
  65. static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
  66. unsigned long limit_pfn, struct iova *new, bool size_aligned)
  67. {
  68. struct rb_node *curr = NULL;
  69. unsigned long flags;
  70. unsigned long saved_pfn;
  71. unsigned int pad_size = 0;
  72. /* Walk the tree backwards */
  73. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  74. saved_pfn = limit_pfn;
  75. curr = __get_cached_rbnode(iovad, &limit_pfn);
  76. while (curr) {
  77. struct iova *curr_iova = container_of(curr, struct iova, node);
  78. if (limit_pfn < curr_iova->pfn_lo)
  79. goto move_left;
  80. else if (limit_pfn < curr_iova->pfn_hi)
  81. goto adjust_limit_pfn;
  82. else {
  83. if (size_aligned)
  84. pad_size = iova_get_pad_size(size, limit_pfn);
  85. if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
  86. break; /* found a free slot */
  87. }
  88. adjust_limit_pfn:
  89. limit_pfn = curr_iova->pfn_lo - 1;
  90. move_left:
  91. curr = rb_prev(curr);
  92. }
  93. if (!curr) {
  94. if (size_aligned)
  95. pad_size = iova_get_pad_size(size, limit_pfn);
  96. if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
  97. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  98. return -ENOMEM;
  99. }
  100. }
  101. /* pfn_lo will point to size aligned address if size_aligned is set */
  102. new->pfn_lo = limit_pfn - (size + pad_size) + 1;
  103. new->pfn_hi = new->pfn_lo + size - 1;
  104. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  105. return 0;
  106. }
  107. static void
  108. iova_insert_rbtree(struct rb_root *root, struct iova *iova)
  109. {
  110. struct rb_node **new = &(root->rb_node), *parent = NULL;
  111. /* Figure out where to put new node */
  112. while (*new) {
  113. struct iova *this = container_of(*new, struct iova, node);
  114. parent = *new;
  115. if (iova->pfn_lo < this->pfn_lo)
  116. new = &((*new)->rb_left);
  117. else if (iova->pfn_lo > this->pfn_lo)
  118. new = &((*new)->rb_right);
  119. else
  120. BUG(); /* this should not happen */
  121. }
  122. /* Add new node and rebalance tree. */
  123. rb_link_node(&iova->node, parent, new);
  124. rb_insert_color(&iova->node, root);
  125. }
  126. /**
  127. * alloc_iova - allocates an iova
  128. * @iovad - iova domain in question
  129. * @size - size of page frames to allocate
  130. * @limit_pfn - max limit address
  131. * @size_aligned - set if size_aligned address range is required
  132. * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
  133. * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
  134. * flag is set then the allocated address iova->pfn_lo will be naturally
  135. * aligned on roundup_power_of_two(size).
  136. */
  137. struct iova *
  138. alloc_iova(struct iova_domain *iovad, unsigned long size,
  139. unsigned long limit_pfn,
  140. bool size_aligned)
  141. {
  142. unsigned long flags;
  143. struct iova *new_iova;
  144. int ret;
  145. new_iova = alloc_iova_mem();
  146. if (!new_iova)
  147. return NULL;
  148. /* If size aligned is set then round the size to
  149. * to next power of two.
  150. */
  151. if (size_aligned)
  152. size = __roundup_pow_of_two(size);
  153. spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
  154. ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
  155. size_aligned);
  156. if (ret) {
  157. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  158. free_iova_mem(new_iova);
  159. return NULL;
  160. }
  161. /* Insert the new_iova into domain rbtree by holding writer lock */
  162. spin_lock(&iovad->iova_rbtree_lock);
  163. iova_insert_rbtree(&iovad->rbroot, new_iova);
  164. __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
  165. spin_unlock(&iovad->iova_rbtree_lock);
  166. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  167. return new_iova;
  168. }
  169. /**
  170. * find_iova - find's an iova for a given pfn
  171. * @iovad - iova domain in question.
  172. * pfn - page frame number
  173. * This function finds and returns an iova belonging to the
  174. * given doamin which matches the given pfn.
  175. */
  176. struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
  177. {
  178. unsigned long flags;
  179. struct rb_node *node;
  180. /* Take the lock so that no other thread is manipulating the rbtree */
  181. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  182. node = iovad->rbroot.rb_node;
  183. while (node) {
  184. struct iova *iova = container_of(node, struct iova, node);
  185. /* If pfn falls within iova's range, return iova */
  186. if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
  187. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  188. /* We are not holding the lock while this iova
  189. * is referenced by the caller as the same thread
  190. * which called this function also calls __free_iova()
  191. * and it is by desing that only one thread can possibly
  192. * reference a particular iova and hence no conflict.
  193. */
  194. return iova;
  195. }
  196. if (pfn < iova->pfn_lo)
  197. node = node->rb_left;
  198. else if (pfn > iova->pfn_lo)
  199. node = node->rb_right;
  200. }
  201. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  202. return NULL;
  203. }
  204. /**
  205. * __free_iova - frees the given iova
  206. * @iovad: iova domain in question.
  207. * @iova: iova in question.
  208. * Frees the given iova belonging to the giving domain
  209. */
  210. void
  211. __free_iova(struct iova_domain *iovad, struct iova *iova)
  212. {
  213. unsigned long flags;
  214. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  215. __cached_rbnode_delete_update(iovad, iova);
  216. rb_erase(&iova->node, &iovad->rbroot);
  217. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  218. free_iova_mem(iova);
  219. }
  220. /**
  221. * free_iova - finds and frees the iova for a given pfn
  222. * @iovad: - iova domain in question.
  223. * @pfn: - pfn that is allocated previously
  224. * This functions finds an iova for a given pfn and then
  225. * frees the iova from that domain.
  226. */
  227. void
  228. free_iova(struct iova_domain *iovad, unsigned long pfn)
  229. {
  230. struct iova *iova = find_iova(iovad, pfn);
  231. if (iova)
  232. __free_iova(iovad, iova);
  233. }
  234. /**
  235. * put_iova_domain - destroys the iova doamin
  236. * @iovad: - iova domain in question.
  237. * All the iova's in that domain are destroyed.
  238. */
  239. void put_iova_domain(struct iova_domain *iovad)
  240. {
  241. struct rb_node *node;
  242. unsigned long flags;
  243. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  244. node = rb_first(&iovad->rbroot);
  245. while (node) {
  246. struct iova *iova = container_of(node, struct iova, node);
  247. rb_erase(node, &iovad->rbroot);
  248. free_iova_mem(iova);
  249. node = rb_first(&iovad->rbroot);
  250. }
  251. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  252. }
  253. static int
  254. __is_range_overlap(struct rb_node *node,
  255. unsigned long pfn_lo, unsigned long pfn_hi)
  256. {
  257. struct iova *iova = container_of(node, struct iova, node);
  258. if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
  259. return 1;
  260. return 0;
  261. }
  262. static struct iova *
  263. __insert_new_range(struct iova_domain *iovad,
  264. unsigned long pfn_lo, unsigned long pfn_hi)
  265. {
  266. struct iova *iova;
  267. iova = alloc_iova_mem();
  268. if (!iova)
  269. return iova;
  270. iova->pfn_hi = pfn_hi;
  271. iova->pfn_lo = pfn_lo;
  272. iova_insert_rbtree(&iovad->rbroot, iova);
  273. return iova;
  274. }
  275. static void
  276. __adjust_overlap_range(struct iova *iova,
  277. unsigned long *pfn_lo, unsigned long *pfn_hi)
  278. {
  279. if (*pfn_lo < iova->pfn_lo)
  280. iova->pfn_lo = *pfn_lo;
  281. if (*pfn_hi > iova->pfn_hi)
  282. *pfn_lo = iova->pfn_hi + 1;
  283. }
  284. /**
  285. * reserve_iova - reserves an iova in the given range
  286. * @iovad: - iova domain pointer
  287. * @pfn_lo: - lower page frame address
  288. * @pfn_hi:- higher pfn adderss
  289. * This function allocates reserves the address range from pfn_lo to pfn_hi so
  290. * that this address is not dished out as part of alloc_iova.
  291. */
  292. struct iova *
  293. reserve_iova(struct iova_domain *iovad,
  294. unsigned long pfn_lo, unsigned long pfn_hi)
  295. {
  296. struct rb_node *node;
  297. unsigned long flags;
  298. struct iova *iova;
  299. unsigned int overlap = 0;
  300. spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
  301. spin_lock(&iovad->iova_rbtree_lock);
  302. for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
  303. if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
  304. iova = container_of(node, struct iova, node);
  305. __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
  306. if ((pfn_lo >= iova->pfn_lo) &&
  307. (pfn_hi <= iova->pfn_hi))
  308. goto finish;
  309. overlap = 1;
  310. } else if (overlap)
  311. break;
  312. }
  313. /* We are here either becasue this is the first reserver node
  314. * or need to insert remaining non overlap addr range
  315. */
  316. iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
  317. finish:
  318. spin_unlock(&iovad->iova_rbtree_lock);
  319. spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
  320. return iova;
  321. }
  322. /**
  323. * copy_reserved_iova - copies the reserved between domains
  324. * @from: - source doamin from where to copy
  325. * @to: - destination domin where to copy
  326. * This function copies reserved iova's from one doamin to
  327. * other.
  328. */
  329. void
  330. copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
  331. {
  332. unsigned long flags;
  333. struct rb_node *node;
  334. spin_lock_irqsave(&from->iova_alloc_lock, flags);
  335. spin_lock(&from->iova_rbtree_lock);
  336. for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
  337. struct iova *iova = container_of(node, struct iova, node);
  338. struct iova *new_iova;
  339. new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
  340. if (!new_iova)
  341. printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
  342. iova->pfn_lo, iova->pfn_lo);
  343. }
  344. spin_unlock(&from->iova_rbtree_lock);
  345. spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
  346. }