qib_keys.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "qib.h"
  34. /**
  35. * qib_alloc_lkey - allocate an lkey
  36. * @rkt: lkey table in which to allocate the lkey
  37. * @mr: memory region that this lkey protects
  38. *
  39. * Returns 1 if successful, otherwise returns 0.
  40. */
  41. int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
  42. {
  43. unsigned long flags;
  44. u32 r;
  45. u32 n;
  46. int ret;
  47. spin_lock_irqsave(&rkt->lock, flags);
  48. /* Find the next available LKEY */
  49. r = rkt->next;
  50. n = r;
  51. for (;;) {
  52. if (rkt->table[r] == NULL)
  53. break;
  54. r = (r + 1) & (rkt->max - 1);
  55. if (r == n) {
  56. spin_unlock_irqrestore(&rkt->lock, flags);
  57. ret = 0;
  58. goto bail;
  59. }
  60. }
  61. rkt->next = (r + 1) & (rkt->max - 1);
  62. /*
  63. * Make sure lkey is never zero which is reserved to indicate an
  64. * unrestricted LKEY.
  65. */
  66. rkt->gen++;
  67. mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
  68. ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
  69. << 8);
  70. if (mr->lkey == 0) {
  71. mr->lkey |= 1 << 8;
  72. rkt->gen++;
  73. }
  74. rkt->table[r] = mr;
  75. spin_unlock_irqrestore(&rkt->lock, flags);
  76. ret = 1;
  77. bail:
  78. return ret;
  79. }
  80. /**
  81. * qib_free_lkey - free an lkey
  82. * @rkt: table from which to free the lkey
  83. * @lkey: lkey id to free
  84. */
  85. int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
  86. {
  87. unsigned long flags;
  88. u32 lkey = mr->lkey;
  89. u32 r;
  90. int ret;
  91. spin_lock_irqsave(&dev->lk_table.lock, flags);
  92. if (lkey == 0) {
  93. if (dev->dma_mr && dev->dma_mr == mr) {
  94. ret = atomic_read(&dev->dma_mr->refcount);
  95. if (!ret)
  96. dev->dma_mr = NULL;
  97. } else
  98. ret = 0;
  99. } else {
  100. r = lkey >> (32 - ib_qib_lkey_table_size);
  101. ret = atomic_read(&dev->lk_table.table[r]->refcount);
  102. if (!ret)
  103. dev->lk_table.table[r] = NULL;
  104. }
  105. spin_unlock_irqrestore(&dev->lk_table.lock, flags);
  106. if (ret)
  107. ret = -EBUSY;
  108. return ret;
  109. }
  110. /**
  111. * qib_lkey_ok - check IB SGE for validity and initialize
  112. * @rkt: table containing lkey to check SGE against
  113. * @isge: outgoing internal SGE
  114. * @sge: SGE to check
  115. * @acc: access flags
  116. *
  117. * Return 1 if valid and successful, otherwise returns 0.
  118. *
  119. * Check the IB SGE for validity and initialize our internal version
  120. * of it.
  121. */
  122. int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
  123. struct qib_sge *isge, struct ib_sge *sge, int acc)
  124. {
  125. struct qib_mregion *mr;
  126. unsigned n, m;
  127. size_t off;
  128. unsigned long flags;
  129. /*
  130. * We use LKEY == zero for kernel virtual addresses
  131. * (see qib_get_dma_mr and qib_dma.c).
  132. */
  133. spin_lock_irqsave(&rkt->lock, flags);
  134. if (sge->lkey == 0) {
  135. struct qib_ibdev *dev = to_idev(pd->ibpd.device);
  136. if (pd->user)
  137. goto bail;
  138. if (!dev->dma_mr)
  139. goto bail;
  140. atomic_inc(&dev->dma_mr->refcount);
  141. spin_unlock_irqrestore(&rkt->lock, flags);
  142. isge->mr = dev->dma_mr;
  143. isge->vaddr = (void *) sge->addr;
  144. isge->length = sge->length;
  145. isge->sge_length = sge->length;
  146. isge->m = 0;
  147. isge->n = 0;
  148. goto ok;
  149. }
  150. mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
  151. if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
  152. mr->pd != &pd->ibpd))
  153. goto bail;
  154. off = sge->addr - mr->user_base;
  155. if (unlikely(sge->addr < mr->user_base ||
  156. off + sge->length > mr->length ||
  157. (mr->access_flags & acc) != acc))
  158. goto bail;
  159. atomic_inc(&mr->refcount);
  160. spin_unlock_irqrestore(&rkt->lock, flags);
  161. off += mr->offset;
  162. if (mr->page_shift) {
  163. /*
  164. page sizes are uniform power of 2 so no loop is necessary
  165. entries_spanned_by_off is the number of times the loop below
  166. would have executed.
  167. */
  168. size_t entries_spanned_by_off;
  169. entries_spanned_by_off = off >> mr->page_shift;
  170. off -= (entries_spanned_by_off << mr->page_shift);
  171. m = entries_spanned_by_off/QIB_SEGSZ;
  172. n = entries_spanned_by_off%QIB_SEGSZ;
  173. } else {
  174. m = 0;
  175. n = 0;
  176. while (off >= mr->map[m]->segs[n].length) {
  177. off -= mr->map[m]->segs[n].length;
  178. n++;
  179. if (n >= QIB_SEGSZ) {
  180. m++;
  181. n = 0;
  182. }
  183. }
  184. }
  185. isge->mr = mr;
  186. isge->vaddr = mr->map[m]->segs[n].vaddr + off;
  187. isge->length = mr->map[m]->segs[n].length - off;
  188. isge->sge_length = sge->length;
  189. isge->m = m;
  190. isge->n = n;
  191. ok:
  192. return 1;
  193. bail:
  194. spin_unlock_irqrestore(&rkt->lock, flags);
  195. return 0;
  196. }
  197. /**
  198. * qib_rkey_ok - check the IB virtual address, length, and RKEY
  199. * @dev: infiniband device
  200. * @ss: SGE state
  201. * @len: length of data
  202. * @vaddr: virtual address to place data
  203. * @rkey: rkey to check
  204. * @acc: access flags
  205. *
  206. * Return 1 if successful, otherwise 0.
  207. */
  208. int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
  209. u32 len, u64 vaddr, u32 rkey, int acc)
  210. {
  211. struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
  212. struct qib_mregion *mr;
  213. unsigned n, m;
  214. size_t off;
  215. unsigned long flags;
  216. /*
  217. * We use RKEY == zero for kernel virtual addresses
  218. * (see qib_get_dma_mr and qib_dma.c).
  219. */
  220. spin_lock_irqsave(&rkt->lock, flags);
  221. if (rkey == 0) {
  222. struct qib_pd *pd = to_ipd(qp->ibqp.pd);
  223. struct qib_ibdev *dev = to_idev(pd->ibpd.device);
  224. if (pd->user)
  225. goto bail;
  226. if (!dev->dma_mr)
  227. goto bail;
  228. atomic_inc(&dev->dma_mr->refcount);
  229. spin_unlock_irqrestore(&rkt->lock, flags);
  230. sge->mr = dev->dma_mr;
  231. sge->vaddr = (void *) vaddr;
  232. sge->length = len;
  233. sge->sge_length = len;
  234. sge->m = 0;
  235. sge->n = 0;
  236. goto ok;
  237. }
  238. mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
  239. if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
  240. goto bail;
  241. off = vaddr - mr->iova;
  242. if (unlikely(vaddr < mr->iova || off + len > mr->length ||
  243. (mr->access_flags & acc) == 0))
  244. goto bail;
  245. atomic_inc(&mr->refcount);
  246. spin_unlock_irqrestore(&rkt->lock, flags);
  247. off += mr->offset;
  248. if (mr->page_shift) {
  249. /*
  250. page sizes are uniform power of 2 so no loop is necessary
  251. entries_spanned_by_off is the number of times the loop below
  252. would have executed.
  253. */
  254. size_t entries_spanned_by_off;
  255. entries_spanned_by_off = off >> mr->page_shift;
  256. off -= (entries_spanned_by_off << mr->page_shift);
  257. m = entries_spanned_by_off/QIB_SEGSZ;
  258. n = entries_spanned_by_off%QIB_SEGSZ;
  259. } else {
  260. m = 0;
  261. n = 0;
  262. while (off >= mr->map[m]->segs[n].length) {
  263. off -= mr->map[m]->segs[n].length;
  264. n++;
  265. if (n >= QIB_SEGSZ) {
  266. m++;
  267. n = 0;
  268. }
  269. }
  270. }
  271. sge->mr = mr;
  272. sge->vaddr = mr->map[m]->segs[n].vaddr + off;
  273. sge->length = mr->map[m]->segs[n].length - off;
  274. sge->sge_length = len;
  275. sge->m = m;
  276. sge->n = n;
  277. ok:
  278. return 1;
  279. bail:
  280. spin_unlock_irqrestore(&rkt->lock, flags);
  281. return 0;
  282. }
  283. /*
  284. * Initialize the memory region specified by the work reqeust.
  285. */
  286. int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
  287. {
  288. struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
  289. struct qib_pd *pd = to_ipd(qp->ibqp.pd);
  290. struct qib_mregion *mr;
  291. u32 rkey = wr->wr.fast_reg.rkey;
  292. unsigned i, n, m;
  293. int ret = -EINVAL;
  294. unsigned long flags;
  295. u64 *page_list;
  296. size_t ps;
  297. spin_lock_irqsave(&rkt->lock, flags);
  298. if (pd->user || rkey == 0)
  299. goto bail;
  300. mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
  301. if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
  302. goto bail;
  303. if (wr->wr.fast_reg.page_list_len > mr->max_segs)
  304. goto bail;
  305. ps = 1UL << wr->wr.fast_reg.page_shift;
  306. if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
  307. goto bail;
  308. mr->user_base = wr->wr.fast_reg.iova_start;
  309. mr->iova = wr->wr.fast_reg.iova_start;
  310. mr->lkey = rkey;
  311. mr->length = wr->wr.fast_reg.length;
  312. mr->access_flags = wr->wr.fast_reg.access_flags;
  313. page_list = wr->wr.fast_reg.page_list->page_list;
  314. m = 0;
  315. n = 0;
  316. for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
  317. mr->map[m]->segs[n].vaddr = (void *) page_list[i];
  318. mr->map[m]->segs[n].length = ps;
  319. if (++n == QIB_SEGSZ) {
  320. m++;
  321. n = 0;
  322. }
  323. }
  324. ret = 0;
  325. bail:
  326. spin_unlock_irqrestore(&rkt->lock, flags);
  327. return ret;
  328. }