iova.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * Copyright © 2006-2009, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  18. */
  19. #include <linux/iova.h>
  20. void
  21. init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
  22. {
  23. spin_lock_init(&iovad->iova_rbtree_lock);
  24. iovad->rbroot = RB_ROOT;
  25. iovad->cached32_node = NULL;
  26. iovad->dma_32bit_pfn = pfn_32bit;
  27. }
  28. static struct rb_node *
  29. __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
  30. {
  31. if ((*limit_pfn != iovad->dma_32bit_pfn) ||
  32. (iovad->cached32_node == NULL))
  33. return rb_last(&iovad->rbroot);
  34. else {
  35. struct rb_node *prev_node = rb_prev(iovad->cached32_node);
  36. struct iova *curr_iova =
  37. container_of(iovad->cached32_node, struct iova, node);
  38. *limit_pfn = curr_iova->pfn_lo - 1;
  39. return prev_node;
  40. }
  41. }
  42. static void
  43. __cached_rbnode_insert_update(struct iova_domain *iovad,
  44. unsigned long limit_pfn, struct iova *new)
  45. {
  46. if (limit_pfn != iovad->dma_32bit_pfn)
  47. return;
  48. iovad->cached32_node = &new->node;
  49. }
  50. static void
  51. __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  52. {
  53. struct iova *cached_iova;
  54. struct rb_node *curr;
  55. if (!iovad->cached32_node)
  56. return;
  57. curr = iovad->cached32_node;
  58. cached_iova = container_of(curr, struct iova, node);
  59. if (free->pfn_lo >= cached_iova->pfn_lo)
  60. iovad->cached32_node = rb_next(&free->node);
  61. }
  62. /* Computes the padding size required, to make the
  63. * the start address naturally aligned on its size
  64. */
  65. static int
  66. iova_get_pad_size(int size, unsigned int limit_pfn)
  67. {
  68. unsigned int pad_size = 0;
  69. unsigned int order = ilog2(size);
  70. if (order)
  71. pad_size = (limit_pfn + 1) % (1 << order);
  72. return pad_size;
  73. }
  74. static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
  75. unsigned long size, unsigned long limit_pfn,
  76. struct iova *new, bool size_aligned)
  77. {
  78. struct rb_node *prev, *curr = NULL;
  79. unsigned long flags;
  80. unsigned long saved_pfn;
  81. unsigned int pad_size = 0;
  82. /* Walk the tree backwards */
  83. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  84. saved_pfn = limit_pfn;
  85. curr = __get_cached_rbnode(iovad, &limit_pfn);
  86. prev = curr;
  87. while (curr) {
  88. struct iova *curr_iova = container_of(curr, struct iova, node);
  89. if (limit_pfn < curr_iova->pfn_lo)
  90. goto move_left;
  91. else if (limit_pfn < curr_iova->pfn_hi)
  92. goto adjust_limit_pfn;
  93. else {
  94. if (size_aligned)
  95. pad_size = iova_get_pad_size(size, limit_pfn);
  96. if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
  97. break; /* found a free slot */
  98. }
  99. adjust_limit_pfn:
  100. limit_pfn = curr_iova->pfn_lo - 1;
  101. move_left:
  102. prev = curr;
  103. curr = rb_prev(curr);
  104. }
  105. if (!curr) {
  106. if (size_aligned)
  107. pad_size = iova_get_pad_size(size, limit_pfn);
  108. if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
  109. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  110. return -ENOMEM;
  111. }
  112. }
  113. /* pfn_lo will point to size aligned address if size_aligned is set */
  114. new->pfn_lo = limit_pfn - (size + pad_size) + 1;
  115. new->pfn_hi = new->pfn_lo + size - 1;
  116. /* Insert the new_iova into domain rbtree by holding writer lock */
  117. /* Add new node and rebalance tree. */
  118. {
  119. struct rb_node **entry, *parent = NULL;
  120. /* If we have 'prev', it's a valid place to start the
  121. insertion. Otherwise, start from the root. */
  122. if (prev)
  123. entry = &prev;
  124. else
  125. entry = &iovad->rbroot.rb_node;
  126. /* Figure out where to put new node */
  127. while (*entry) {
  128. struct iova *this = container_of(*entry,
  129. struct iova, node);
  130. parent = *entry;
  131. if (new->pfn_lo < this->pfn_lo)
  132. entry = &((*entry)->rb_left);
  133. else if (new->pfn_lo > this->pfn_lo)
  134. entry = &((*entry)->rb_right);
  135. else
  136. BUG(); /* this should not happen */
  137. }
  138. /* Add new node and rebalance tree. */
  139. rb_link_node(&new->node, parent, entry);
  140. rb_insert_color(&new->node, &iovad->rbroot);
  141. }
  142. __cached_rbnode_insert_update(iovad, saved_pfn, new);
  143. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  144. return 0;
  145. }
  146. static void
  147. iova_insert_rbtree(struct rb_root *root, struct iova *iova)
  148. {
  149. struct rb_node **new = &(root->rb_node), *parent = NULL;
  150. /* Figure out where to put new node */
  151. while (*new) {
  152. struct iova *this = container_of(*new, struct iova, node);
  153. parent = *new;
  154. if (iova->pfn_lo < this->pfn_lo)
  155. new = &((*new)->rb_left);
  156. else if (iova->pfn_lo > this->pfn_lo)
  157. new = &((*new)->rb_right);
  158. else
  159. BUG(); /* this should not happen */
  160. }
  161. /* Add new node and rebalance tree. */
  162. rb_link_node(&iova->node, parent, new);
  163. rb_insert_color(&iova->node, root);
  164. }
  165. /**
  166. * alloc_iova - allocates an iova
  167. * @iovad - iova domain in question
  168. * @size - size of page frames to allocate
  169. * @limit_pfn - max limit address
  170. * @size_aligned - set if size_aligned address range is required
  171. * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
  172. * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
  173. * flag is set then the allocated address iova->pfn_lo will be naturally
  174. * aligned on roundup_power_of_two(size).
  175. */
  176. struct iova *
  177. alloc_iova(struct iova_domain *iovad, unsigned long size,
  178. unsigned long limit_pfn,
  179. bool size_aligned)
  180. {
  181. struct iova *new_iova;
  182. int ret;
  183. new_iova = alloc_iova_mem();
  184. if (!new_iova)
  185. return NULL;
  186. /* If size aligned is set then round the size to
  187. * to next power of two.
  188. */
  189. if (size_aligned)
  190. size = __roundup_pow_of_two(size);
  191. ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
  192. new_iova, size_aligned);
  193. if (ret) {
  194. free_iova_mem(new_iova);
  195. return NULL;
  196. }
  197. return new_iova;
  198. }
  199. /**
  200. * find_iova - find's an iova for a given pfn
  201. * @iovad - iova domain in question.
  202. * pfn - page frame number
  203. * This function finds and returns an iova belonging to the
  204. * given doamin which matches the given pfn.
  205. */
  206. struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
  207. {
  208. unsigned long flags;
  209. struct rb_node *node;
  210. /* Take the lock so that no other thread is manipulating the rbtree */
  211. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  212. node = iovad->rbroot.rb_node;
  213. while (node) {
  214. struct iova *iova = container_of(node, struct iova, node);
  215. /* If pfn falls within iova's range, return iova */
  216. if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
  217. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  218. /* We are not holding the lock while this iova
  219. * is referenced by the caller as the same thread
  220. * which called this function also calls __free_iova()
  221. * and it is by desing that only one thread can possibly
  222. * reference a particular iova and hence no conflict.
  223. */
  224. return iova;
  225. }
  226. if (pfn < iova->pfn_lo)
  227. node = node->rb_left;
  228. else if (pfn > iova->pfn_lo)
  229. node = node->rb_right;
  230. }
  231. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  232. return NULL;
  233. }
  234. /**
  235. * __free_iova - frees the given iova
  236. * @iovad: iova domain in question.
  237. * @iova: iova in question.
  238. * Frees the given iova belonging to the giving domain
  239. */
  240. void
  241. __free_iova(struct iova_domain *iovad, struct iova *iova)
  242. {
  243. unsigned long flags;
  244. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  245. __cached_rbnode_delete_update(iovad, iova);
  246. rb_erase(&iova->node, &iovad->rbroot);
  247. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  248. free_iova_mem(iova);
  249. }
  250. /**
  251. * free_iova - finds and frees the iova for a given pfn
  252. * @iovad: - iova domain in question.
  253. * @pfn: - pfn that is allocated previously
  254. * This functions finds an iova for a given pfn and then
  255. * frees the iova from that domain.
  256. */
  257. void
  258. free_iova(struct iova_domain *iovad, unsigned long pfn)
  259. {
  260. struct iova *iova = find_iova(iovad, pfn);
  261. if (iova)
  262. __free_iova(iovad, iova);
  263. }
  264. /**
  265. * put_iova_domain - destroys the iova doamin
  266. * @iovad: - iova domain in question.
  267. * All the iova's in that domain are destroyed.
  268. */
  269. void put_iova_domain(struct iova_domain *iovad)
  270. {
  271. struct rb_node *node;
  272. unsigned long flags;
  273. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  274. node = rb_first(&iovad->rbroot);
  275. while (node) {
  276. struct iova *iova = container_of(node, struct iova, node);
  277. rb_erase(node, &iovad->rbroot);
  278. free_iova_mem(iova);
  279. node = rb_first(&iovad->rbroot);
  280. }
  281. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  282. }
  283. static int
  284. __is_range_overlap(struct rb_node *node,
  285. unsigned long pfn_lo, unsigned long pfn_hi)
  286. {
  287. struct iova *iova = container_of(node, struct iova, node);
  288. if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
  289. return 1;
  290. return 0;
  291. }
  292. static struct iova *
  293. __insert_new_range(struct iova_domain *iovad,
  294. unsigned long pfn_lo, unsigned long pfn_hi)
  295. {
  296. struct iova *iova;
  297. iova = alloc_iova_mem();
  298. if (!iova)
  299. return iova;
  300. iova->pfn_hi = pfn_hi;
  301. iova->pfn_lo = pfn_lo;
  302. iova_insert_rbtree(&iovad->rbroot, iova);
  303. return iova;
  304. }
  305. static void
  306. __adjust_overlap_range(struct iova *iova,
  307. unsigned long *pfn_lo, unsigned long *pfn_hi)
  308. {
  309. if (*pfn_lo < iova->pfn_lo)
  310. iova->pfn_lo = *pfn_lo;
  311. if (*pfn_hi > iova->pfn_hi)
  312. *pfn_lo = iova->pfn_hi + 1;
  313. }
  314. /**
  315. * reserve_iova - reserves an iova in the given range
  316. * @iovad: - iova domain pointer
  317. * @pfn_lo: - lower page frame address
  318. * @pfn_hi:- higher pfn adderss
  319. * This function allocates reserves the address range from pfn_lo to pfn_hi so
  320. * that this address is not dished out as part of alloc_iova.
  321. */
  322. struct iova *
  323. reserve_iova(struct iova_domain *iovad,
  324. unsigned long pfn_lo, unsigned long pfn_hi)
  325. {
  326. struct rb_node *node;
  327. unsigned long flags;
  328. struct iova *iova;
  329. unsigned int overlap = 0;
  330. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  331. for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
  332. if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
  333. iova = container_of(node, struct iova, node);
  334. __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
  335. if ((pfn_lo >= iova->pfn_lo) &&
  336. (pfn_hi <= iova->pfn_hi))
  337. goto finish;
  338. overlap = 1;
  339. } else if (overlap)
  340. break;
  341. }
  342. /* We are here either becasue this is the first reserver node
  343. * or need to insert remaining non overlap addr range
  344. */
  345. iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
  346. finish:
  347. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  348. return iova;
  349. }
  350. /**
  351. * copy_reserved_iova - copies the reserved between domains
  352. * @from: - source doamin from where to copy
  353. * @to: - destination domin where to copy
  354. * This function copies reserved iova's from one doamin to
  355. * other.
  356. */
  357. void
  358. copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
  359. {
  360. unsigned long flags;
  361. struct rb_node *node;
  362. spin_lock_irqsave(&from->iova_rbtree_lock, flags);
  363. for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
  364. struct iova *iova = container_of(node, struct iova, node);
  365. struct iova *new_iova;
  366. new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
  367. if (!new_iova)
  368. printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
  369. iova->pfn_lo, iova->pfn_lo);
  370. }
  371. spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
  372. }