mthca_srq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /*
  2. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
  33. */
  34. #include <linux/slab.h>
  35. #include <linux/string.h>
  36. #include <linux/sched.h>
  37. #include <asm/io.h>
  38. #include "mthca_dev.h"
  39. #include "mthca_cmd.h"
  40. #include "mthca_memfree.h"
  41. #include "mthca_wqe.h"
  42. enum {
  43. MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
  44. };
  45. struct mthca_tavor_srq_context {
  46. __be64 wqe_base_ds; /* low 6 bits is descriptor size */
  47. __be32 state_pd;
  48. __be32 lkey;
  49. __be32 uar;
  50. __be16 limit_watermark;
  51. __be16 wqe_cnt;
  52. u32 reserved[2];
  53. };
  54. struct mthca_arbel_srq_context {
  55. __be32 state_logsize_srqn;
  56. __be32 lkey;
  57. __be32 db_index;
  58. __be32 logstride_usrpage;
  59. __be64 wqe_base;
  60. __be32 eq_pd;
  61. __be16 limit_watermark;
  62. __be16 wqe_cnt;
  63. u16 reserved1;
  64. __be16 wqe_counter;
  65. u32 reserved2[3];
  66. };
  67. static void *get_wqe(struct mthca_srq *srq, int n)
  68. {
  69. if (srq->is_direct)
  70. return srq->queue.direct.buf + (n << srq->wqe_shift);
  71. else
  72. return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
  73. ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
  74. }
  75. /*
  76. * Return a pointer to the location within a WQE that we're using as a
  77. * link when the WQE is in the free list. We use the imm field
  78. * because in the Tavor case, posting a WQE may overwrite the next
  79. * segment of the previous WQE, but a receive WQE will never touch the
  80. * imm field. This avoids corrupting our free list if the previous
  81. * WQE has already completed and been put on the free list when we
  82. * post the next WQE.
  83. */
  84. static inline int *wqe_to_link(void *wqe)
  85. {
  86. return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
  87. }
  88. static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
  89. struct mthca_pd *pd,
  90. struct mthca_srq *srq,
  91. struct mthca_tavor_srq_context *context)
  92. {
  93. memset(context, 0, sizeof *context);
  94. context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
  95. context->state_pd = cpu_to_be32(pd->pd_num);
  96. context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
  97. if (pd->ibpd.uobject)
  98. context->uar =
  99. cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
  100. else
  101. context->uar = cpu_to_be32(dev->driver_uar.index);
  102. }
  103. static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
  104. struct mthca_pd *pd,
  105. struct mthca_srq *srq,
  106. struct mthca_arbel_srq_context *context)
  107. {
  108. int logsize, max;
  109. memset(context, 0, sizeof *context);
  110. /*
  111. * Put max in a temporary variable to work around gcc bug
  112. * triggered by ilog2() on sparc64.
  113. */
  114. max = srq->max;
  115. logsize = ilog2(max);
  116. context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
  117. context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
  118. context->db_index = cpu_to_be32(srq->db_index);
  119. context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
  120. if (pd->ibpd.uobject)
  121. context->logstride_usrpage |=
  122. cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
  123. else
  124. context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
  125. context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
  126. }
  127. static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
  128. {
  129. mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
  130. srq->is_direct, &srq->mr);
  131. kfree(srq->wrid);
  132. }
  133. static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
  134. struct mthca_srq *srq)
  135. {
  136. struct mthca_data_seg *scatter;
  137. void *wqe;
  138. int err;
  139. int i;
  140. if (pd->ibpd.uobject)
  141. return 0;
  142. srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
  143. if (!srq->wrid)
  144. return -ENOMEM;
  145. err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
  146. MTHCA_MAX_DIRECT_SRQ_SIZE,
  147. &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
  148. if (err) {
  149. kfree(srq->wrid);
  150. return err;
  151. }
  152. /*
  153. * Now initialize the SRQ buffer so that all of the WQEs are
  154. * linked into the list of free WQEs. In addition, set the
  155. * scatter list L_Keys to the sentry value of 0x100.
  156. */
  157. for (i = 0; i < srq->max; ++i) {
  158. wqe = get_wqe(srq, i);
  159. *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
  160. for (scatter = wqe + sizeof (struct mthca_next_seg);
  161. (void *) scatter < wqe + (1 << srq->wqe_shift);
  162. ++scatter)
  163. scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
  164. }
  165. srq->last = get_wqe(srq, srq->max - 1);
  166. return 0;
  167. }
  168. int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
  169. struct ib_srq_attr *attr, struct mthca_srq *srq)
  170. {
  171. struct mthca_mailbox *mailbox;
  172. u8 status;
  173. int ds;
  174. int err;
  175. /* Sanity check SRQ size before proceeding */
  176. if (attr->max_wr > dev->limits.max_srq_wqes ||
  177. attr->max_sge > dev->limits.max_srq_sge)
  178. return -EINVAL;
  179. srq->max = attr->max_wr;
  180. srq->max_gs = attr->max_sge;
  181. srq->counter = 0;
  182. if (mthca_is_memfree(dev))
  183. srq->max = roundup_pow_of_two(srq->max + 1);
  184. else
  185. srq->max = srq->max + 1;
  186. ds = max(64UL,
  187. roundup_pow_of_two(sizeof (struct mthca_next_seg) +
  188. srq->max_gs * sizeof (struct mthca_data_seg)));
  189. if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
  190. return -EINVAL;
  191. srq->wqe_shift = ilog2(ds);
  192. srq->srqn = mthca_alloc(&dev->srq_table.alloc);
  193. if (srq->srqn == -1)
  194. return -ENOMEM;
  195. if (mthca_is_memfree(dev)) {
  196. err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
  197. if (err)
  198. goto err_out;
  199. if (!pd->ibpd.uobject) {
  200. srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
  201. srq->srqn, &srq->db);
  202. if (srq->db_index < 0) {
  203. err = -ENOMEM;
  204. goto err_out_icm;
  205. }
  206. }
  207. }
  208. mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  209. if (IS_ERR(mailbox)) {
  210. err = PTR_ERR(mailbox);
  211. goto err_out_db;
  212. }
  213. err = mthca_alloc_srq_buf(dev, pd, srq);
  214. if (err)
  215. goto err_out_mailbox;
  216. spin_lock_init(&srq->lock);
  217. srq->refcount = 1;
  218. init_waitqueue_head(&srq->wait);
  219. mutex_init(&srq->mutex);
  220. if (mthca_is_memfree(dev))
  221. mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
  222. else
  223. mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
  224. err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
  225. if (err) {
  226. mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
  227. goto err_out_free_buf;
  228. }
  229. if (status) {
  230. mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
  231. status);
  232. err = -EINVAL;
  233. goto err_out_free_buf;
  234. }
  235. spin_lock_irq(&dev->srq_table.lock);
  236. if (mthca_array_set(&dev->srq_table.srq,
  237. srq->srqn & (dev->limits.num_srqs - 1),
  238. srq)) {
  239. spin_unlock_irq(&dev->srq_table.lock);
  240. goto err_out_free_srq;
  241. }
  242. spin_unlock_irq(&dev->srq_table.lock);
  243. mthca_free_mailbox(dev, mailbox);
  244. srq->first_free = 0;
  245. srq->last_free = srq->max - 1;
  246. attr->max_wr = srq->max - 1;
  247. attr->max_sge = srq->max_gs;
  248. return 0;
  249. err_out_free_srq:
  250. err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
  251. if (err)
  252. mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
  253. else if (status)
  254. mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
  255. err_out_free_buf:
  256. if (!pd->ibpd.uobject)
  257. mthca_free_srq_buf(dev, srq);
  258. err_out_mailbox:
  259. mthca_free_mailbox(dev, mailbox);
  260. err_out_db:
  261. if (!pd->ibpd.uobject && mthca_is_memfree(dev))
  262. mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
  263. err_out_icm:
  264. mthca_table_put(dev, dev->srq_table.table, srq->srqn);
  265. err_out:
  266. mthca_free(&dev->srq_table.alloc, srq->srqn);
  267. return err;
  268. }
  269. static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
  270. {
  271. int c;
  272. spin_lock_irq(&dev->srq_table.lock);
  273. c = srq->refcount;
  274. spin_unlock_irq(&dev->srq_table.lock);
  275. return c;
  276. }
  277. void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
  278. {
  279. struct mthca_mailbox *mailbox;
  280. int err;
  281. u8 status;
  282. mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  283. if (IS_ERR(mailbox)) {
  284. mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
  285. return;
  286. }
  287. err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
  288. if (err)
  289. mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
  290. else if (status)
  291. mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
  292. spin_lock_irq(&dev->srq_table.lock);
  293. mthca_array_clear(&dev->srq_table.srq,
  294. srq->srqn & (dev->limits.num_srqs - 1));
  295. --srq->refcount;
  296. spin_unlock_irq(&dev->srq_table.lock);
  297. wait_event(srq->wait, !get_srq_refcount(dev, srq));
  298. if (!srq->ibsrq.uobject) {
  299. mthca_free_srq_buf(dev, srq);
  300. if (mthca_is_memfree(dev))
  301. mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
  302. }
  303. mthca_table_put(dev, dev->srq_table.table, srq->srqn);
  304. mthca_free(&dev->srq_table.alloc, srq->srqn);
  305. mthca_free_mailbox(dev, mailbox);
  306. }
  307. int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  308. enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
  309. {
  310. struct mthca_dev *dev = to_mdev(ibsrq->device);
  311. struct mthca_srq *srq = to_msrq(ibsrq);
  312. int ret;
  313. u8 status;
  314. /* We don't support resizing SRQs (yet?) */
  315. if (attr_mask & IB_SRQ_MAX_WR)
  316. return -EINVAL;
  317. if (attr_mask & IB_SRQ_LIMIT) {
  318. u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
  319. if (attr->srq_limit > max_wr)
  320. return -EINVAL;
  321. mutex_lock(&srq->mutex);
  322. ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
  323. mutex_unlock(&srq->mutex);
  324. if (ret)
  325. return ret;
  326. if (status)
  327. return -EINVAL;
  328. }
  329. return 0;
  330. }
  331. int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
  332. {
  333. struct mthca_dev *dev = to_mdev(ibsrq->device);
  334. struct mthca_srq *srq = to_msrq(ibsrq);
  335. struct mthca_mailbox *mailbox;
  336. struct mthca_arbel_srq_context *arbel_ctx;
  337. struct mthca_tavor_srq_context *tavor_ctx;
  338. u8 status;
  339. int err;
  340. mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  341. if (IS_ERR(mailbox))
  342. return PTR_ERR(mailbox);
  343. err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
  344. if (err)
  345. goto out;
  346. if (mthca_is_memfree(dev)) {
  347. arbel_ctx = mailbox->buf;
  348. srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
  349. } else {
  350. tavor_ctx = mailbox->buf;
  351. srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
  352. }
  353. srq_attr->max_wr = srq->max - 1;
  354. srq_attr->max_sge = srq->max_gs;
  355. out:
  356. mthca_free_mailbox(dev, mailbox);
  357. return err;
  358. }
  359. void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
  360. enum ib_event_type event_type)
  361. {
  362. struct mthca_srq *srq;
  363. struct ib_event event;
  364. spin_lock(&dev->srq_table.lock);
  365. srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
  366. if (srq)
  367. ++srq->refcount;
  368. spin_unlock(&dev->srq_table.lock);
  369. if (!srq) {
  370. mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
  371. return;
  372. }
  373. if (!srq->ibsrq.event_handler)
  374. goto out;
  375. event.device = &dev->ib_dev;
  376. event.event = event_type;
  377. event.element.srq = &srq->ibsrq;
  378. srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
  379. out:
  380. spin_lock(&dev->srq_table.lock);
  381. if (!--srq->refcount)
  382. wake_up(&srq->wait);
  383. spin_unlock(&dev->srq_table.lock);
  384. }
  385. /*
  386. * This function must be called with IRQs disabled.
  387. */
  388. void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
  389. {
  390. int ind;
  391. ind = wqe_addr >> srq->wqe_shift;
  392. spin_lock(&srq->lock);
  393. if (likely(srq->first_free >= 0))
  394. *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
  395. else
  396. srq->first_free = ind;
  397. *wqe_to_link(get_wqe(srq, ind)) = -1;
  398. srq->last_free = ind;
  399. spin_unlock(&srq->lock);
  400. }
  401. int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  402. struct ib_recv_wr **bad_wr)
  403. {
  404. struct mthca_dev *dev = to_mdev(ibsrq->device);
  405. struct mthca_srq *srq = to_msrq(ibsrq);
  406. unsigned long flags;
  407. int err = 0;
  408. int first_ind;
  409. int ind;
  410. int next_ind;
  411. int nreq;
  412. int i;
  413. void *wqe;
  414. void *prev_wqe;
  415. spin_lock_irqsave(&srq->lock, flags);
  416. first_ind = srq->first_free;
  417. for (nreq = 0; wr; wr = wr->next) {
  418. ind = srq->first_free;
  419. if (unlikely(ind < 0)) {
  420. mthca_err(dev, "SRQ %06x full\n", srq->srqn);
  421. err = -ENOMEM;
  422. *bad_wr = wr;
  423. break;
  424. }
  425. wqe = get_wqe(srq, ind);
  426. next_ind = *wqe_to_link(wqe);
  427. if (unlikely(next_ind < 0)) {
  428. mthca_err(dev, "SRQ %06x full\n", srq->srqn);
  429. err = -ENOMEM;
  430. *bad_wr = wr;
  431. break;
  432. }
  433. prev_wqe = srq->last;
  434. srq->last = wqe;
  435. ((struct mthca_next_seg *) wqe)->nda_op = 0;
  436. ((struct mthca_next_seg *) wqe)->ee_nds = 0;
  437. /* flags field will always remain 0 */
  438. wqe += sizeof (struct mthca_next_seg);
  439. if (unlikely(wr->num_sge > srq->max_gs)) {
  440. err = -EINVAL;
  441. *bad_wr = wr;
  442. srq->last = prev_wqe;
  443. break;
  444. }
  445. for (i = 0; i < wr->num_sge; ++i) {
  446. mthca_set_data_seg(wqe, wr->sg_list + i);
  447. wqe += sizeof (struct mthca_data_seg);
  448. }
  449. if (i < srq->max_gs)
  450. mthca_set_data_seg_inval(wqe);
  451. ((struct mthca_next_seg *) prev_wqe)->nda_op =
  452. cpu_to_be32((ind << srq->wqe_shift) | 1);
  453. wmb();
  454. ((struct mthca_next_seg *) prev_wqe)->ee_nds =
  455. cpu_to_be32(MTHCA_NEXT_DBD);
  456. srq->wrid[ind] = wr->wr_id;
  457. srq->first_free = next_ind;
  458. ++nreq;
  459. if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
  460. nreq = 0;
  461. /*
  462. * Make sure that descriptors are written
  463. * before doorbell is rung.
  464. */
  465. wmb();
  466. mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
  467. dev->kar + MTHCA_RECEIVE_DOORBELL,
  468. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  469. first_ind = srq->first_free;
  470. }
  471. }
  472. if (likely(nreq)) {
  473. /*
  474. * Make sure that descriptors are written before
  475. * doorbell is rung.
  476. */
  477. wmb();
  478. mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
  479. dev->kar + MTHCA_RECEIVE_DOORBELL,
  480. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  481. }
  482. /*
  483. * Make sure doorbells don't leak out of SRQ spinlock and
  484. * reach the HCA out of order:
  485. */
  486. mmiowb();
  487. spin_unlock_irqrestore(&srq->lock, flags);
  488. return err;
  489. }
  490. int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  491. struct ib_recv_wr **bad_wr)
  492. {
  493. struct mthca_dev *dev = to_mdev(ibsrq->device);
  494. struct mthca_srq *srq = to_msrq(ibsrq);
  495. unsigned long flags;
  496. int err = 0;
  497. int ind;
  498. int next_ind;
  499. int nreq;
  500. int i;
  501. void *wqe;
  502. spin_lock_irqsave(&srq->lock, flags);
  503. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  504. ind = srq->first_free;
  505. if (unlikely(ind < 0)) {
  506. mthca_err(dev, "SRQ %06x full\n", srq->srqn);
  507. err = -ENOMEM;
  508. *bad_wr = wr;
  509. break;
  510. }
  511. wqe = get_wqe(srq, ind);
  512. next_ind = *wqe_to_link(wqe);
  513. if (unlikely(next_ind < 0)) {
  514. mthca_err(dev, "SRQ %06x full\n", srq->srqn);
  515. err = -ENOMEM;
  516. *bad_wr = wr;
  517. break;
  518. }
  519. ((struct mthca_next_seg *) wqe)->nda_op =
  520. cpu_to_be32((next_ind << srq->wqe_shift) | 1);
  521. ((struct mthca_next_seg *) wqe)->ee_nds = 0;
  522. /* flags field will always remain 0 */
  523. wqe += sizeof (struct mthca_next_seg);
  524. if (unlikely(wr->num_sge > srq->max_gs)) {
  525. err = -EINVAL;
  526. *bad_wr = wr;
  527. break;
  528. }
  529. for (i = 0; i < wr->num_sge; ++i) {
  530. mthca_set_data_seg(wqe, wr->sg_list + i);
  531. wqe += sizeof (struct mthca_data_seg);
  532. }
  533. if (i < srq->max_gs)
  534. mthca_set_data_seg_inval(wqe);
  535. srq->wrid[ind] = wr->wr_id;
  536. srq->first_free = next_ind;
  537. }
  538. if (likely(nreq)) {
  539. srq->counter += nreq;
  540. /*
  541. * Make sure that descriptors are written before
  542. * we write doorbell record.
  543. */
  544. wmb();
  545. *srq->db = cpu_to_be32(srq->counter);
  546. }
  547. spin_unlock_irqrestore(&srq->lock, flags);
  548. return err;
  549. }
  550. int mthca_max_srq_sge(struct mthca_dev *dev)
  551. {
  552. if (mthca_is_memfree(dev))
  553. return dev->limits.max_sg;
  554. /*
  555. * SRQ allocations are based on powers of 2 for Tavor,
  556. * (although they only need to be multiples of 16 bytes).
  557. *
  558. * Therefore, we need to base the max number of sg entries on
  559. * the largest power of 2 descriptor size that is <= to the
  560. * actual max WQE descriptor size, rather than return the
  561. * max_sg value given by the firmware (which is based on WQE
  562. * sizes as multiples of 16, not powers of 2).
  563. *
  564. * If SRQ implementation is changed for Tavor to be based on
  565. * multiples of 16, the calculation below can be deleted and
  566. * the FW max_sg value returned.
  567. */
  568. return min_t(int, dev->limits.max_sg,
  569. ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
  570. sizeof (struct mthca_next_seg)) /
  571. sizeof (struct mthca_data_seg));
  572. }
  573. int mthca_init_srq_table(struct mthca_dev *dev)
  574. {
  575. int err;
  576. if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
  577. return 0;
  578. spin_lock_init(&dev->srq_table.lock);
  579. err = mthca_alloc_init(&dev->srq_table.alloc,
  580. dev->limits.num_srqs,
  581. dev->limits.num_srqs - 1,
  582. dev->limits.reserved_srqs);
  583. if (err)
  584. return err;
  585. err = mthca_array_init(&dev->srq_table.srq,
  586. dev->limits.num_srqs);
  587. if (err)
  588. mthca_alloc_cleanup(&dev->srq_table.alloc);
  589. return err;
  590. }
  591. void mthca_cleanup_srq_table(struct mthca_dev *dev)
  592. {
  593. if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
  594. return;
  595. mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
  596. mthca_alloc_cleanup(&dev->srq_table.alloc);
  597. }