cxio_hal.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <asm/delay.h>
  33. #include <linux/mutex.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/sched.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/pci.h>
  38. #include <linux/dma-mapping.h>
  39. #include <net/net_namespace.h>
  40. #include "cxio_resource.h"
  41. #include "cxio_hal.h"
  42. #include "cxgb3_offload.h"
  43. #include "sge_defs.h"
  44. static LIST_HEAD(rdev_list);
  45. static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
  46. static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
  47. {
  48. struct cxio_rdev *rdev;
  49. list_for_each_entry(rdev, &rdev_list, entry)
  50. if (!strcmp(rdev->dev_name, dev_name))
  51. return rdev;
  52. return NULL;
  53. }
  54. static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
  55. {
  56. struct cxio_rdev *rdev;
  57. list_for_each_entry(rdev, &rdev_list, entry)
  58. if (rdev->t3cdev_p == tdev)
  59. return rdev;
  60. return NULL;
  61. }
  62. int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
  63. enum t3_cq_opcode op, u32 credit)
  64. {
  65. int ret;
  66. struct t3_cqe *cqe;
  67. u32 rptr;
  68. struct rdma_cq_op setup;
  69. setup.id = cq->cqid;
  70. setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
  71. setup.op = op;
  72. ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
  73. if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
  74. return ret;
  75. /*
  76. * If the rearm returned an index other than our current index,
  77. * then there might be CQE's in flight (being DMA'd). We must wait
  78. * here for them to complete or the consumer can miss a notification.
  79. */
  80. if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
  81. int i=0;
  82. rptr = cq->rptr;
  83. /*
  84. * Keep the generation correct by bumping rptr until it
  85. * matches the index returned by the rearm - 1.
  86. */
  87. while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
  88. rptr++;
  89. /*
  90. * Now rptr is the index for the (last) cqe that was
  91. * in-flight at the time the HW rearmed the CQ. We
  92. * spin until that CQE is valid.
  93. */
  94. cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
  95. while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
  96. udelay(1);
  97. if (i++ > 1000000) {
  98. BUG_ON(1);
  99. printk(KERN_ERR "%s: stalled rnic\n",
  100. rdev_p->dev_name);
  101. return -EIO;
  102. }
  103. }
  104. return 1;
  105. }
  106. return 0;
  107. }
  108. static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
  109. {
  110. struct rdma_cq_setup setup;
  111. setup.id = cqid;
  112. setup.base_addr = 0; /* NULL address */
  113. setup.size = 0; /* disaable the CQ */
  114. setup.credits = 0;
  115. setup.credit_thres = 0;
  116. setup.ovfl_mode = 0;
  117. return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
  118. }
  119. static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
  120. {
  121. u64 sge_cmd;
  122. struct t3_modify_qp_wr *wqe;
  123. struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
  124. if (!skb) {
  125. PDBG("%s alloc_skb failed\n", __FUNCTION__);
  126. return -ENOMEM;
  127. }
  128. wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
  129. memset(wqe, 0, sizeof(*wqe));
  130. build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 0, qpid, 7);
  131. wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
  132. sge_cmd = qpid << 8 | 3;
  133. wqe->sge_cmd = cpu_to_be64(sge_cmd);
  134. skb->priority = CPL_PRIORITY_CONTROL;
  135. return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
  136. }
  137. int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
  138. {
  139. struct rdma_cq_setup setup;
  140. int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
  141. cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
  142. if (!cq->cqid)
  143. return -ENOMEM;
  144. cq->sw_queue = kzalloc(size, GFP_KERNEL);
  145. if (!cq->sw_queue)
  146. return -ENOMEM;
  147. cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
  148. (1UL << (cq->size_log2)) *
  149. sizeof(struct t3_cqe),
  150. &(cq->dma_addr), GFP_KERNEL);
  151. if (!cq->queue) {
  152. kfree(cq->sw_queue);
  153. return -ENOMEM;
  154. }
  155. pci_unmap_addr_set(cq, mapping, cq->dma_addr);
  156. memset(cq->queue, 0, size);
  157. setup.id = cq->cqid;
  158. setup.base_addr = (u64) (cq->dma_addr);
  159. setup.size = 1UL << cq->size_log2;
  160. setup.credits = 65535;
  161. setup.credit_thres = 1;
  162. if (rdev_p->t3cdev_p->type == T3B)
  163. setup.ovfl_mode = 0;
  164. else
  165. setup.ovfl_mode = 1;
  166. return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
  167. }
  168. int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
  169. {
  170. struct rdma_cq_setup setup;
  171. setup.id = cq->cqid;
  172. setup.base_addr = (u64) (cq->dma_addr);
  173. setup.size = 1UL << cq->size_log2;
  174. setup.credits = setup.size;
  175. setup.credit_thres = setup.size; /* TBD: overflow recovery */
  176. setup.ovfl_mode = 1;
  177. return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
  178. }
  179. static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
  180. {
  181. struct cxio_qpid_list *entry;
  182. u32 qpid;
  183. int i;
  184. mutex_lock(&uctx->lock);
  185. if (!list_empty(&uctx->qpids)) {
  186. entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
  187. entry);
  188. list_del(&entry->entry);
  189. qpid = entry->qpid;
  190. kfree(entry);
  191. } else {
  192. qpid = cxio_hal_get_qpid(rdev_p->rscp);
  193. if (!qpid)
  194. goto out;
  195. for (i = qpid+1; i & rdev_p->qpmask; i++) {
  196. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  197. if (!entry)
  198. break;
  199. entry->qpid = i;
  200. list_add_tail(&entry->entry, &uctx->qpids);
  201. }
  202. }
  203. out:
  204. mutex_unlock(&uctx->lock);
  205. PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
  206. return qpid;
  207. }
  208. static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
  209. struct cxio_ucontext *uctx)
  210. {
  211. struct cxio_qpid_list *entry;
  212. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  213. if (!entry)
  214. return;
  215. PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
  216. entry->qpid = qpid;
  217. mutex_lock(&uctx->lock);
  218. list_add_tail(&entry->entry, &uctx->qpids);
  219. mutex_unlock(&uctx->lock);
  220. }
  221. void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
  222. {
  223. struct list_head *pos, *nxt;
  224. struct cxio_qpid_list *entry;
  225. mutex_lock(&uctx->lock);
  226. list_for_each_safe(pos, nxt, &uctx->qpids) {
  227. entry = list_entry(pos, struct cxio_qpid_list, entry);
  228. list_del_init(&entry->entry);
  229. if (!(entry->qpid & rdev_p->qpmask))
  230. cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
  231. kfree(entry);
  232. }
  233. mutex_unlock(&uctx->lock);
  234. }
  235. void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
  236. {
  237. INIT_LIST_HEAD(&uctx->qpids);
  238. mutex_init(&uctx->lock);
  239. }
  240. int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
  241. struct t3_wq *wq, struct cxio_ucontext *uctx)
  242. {
  243. int depth = 1UL << wq->size_log2;
  244. int rqsize = 1UL << wq->rq_size_log2;
  245. wq->qpid = get_qpid(rdev_p, uctx);
  246. if (!wq->qpid)
  247. return -ENOMEM;
  248. wq->rq = kzalloc(depth * sizeof(u64), GFP_KERNEL);
  249. if (!wq->rq)
  250. goto err1;
  251. wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
  252. if (!wq->rq_addr)
  253. goto err2;
  254. wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
  255. if (!wq->sq)
  256. goto err3;
  257. wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
  258. depth * sizeof(union t3_wr),
  259. &(wq->dma_addr), GFP_KERNEL);
  260. if (!wq->queue)
  261. goto err4;
  262. memset(wq->queue, 0, depth * sizeof(union t3_wr));
  263. pci_unmap_addr_set(wq, mapping, wq->dma_addr);
  264. wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
  265. if (!kernel_domain)
  266. wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
  267. (wq->qpid << rdev_p->qpshift);
  268. PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__,
  269. wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
  270. return 0;
  271. err4:
  272. kfree(wq->sq);
  273. err3:
  274. cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
  275. err2:
  276. kfree(wq->rq);
  277. err1:
  278. put_qpid(rdev_p, wq->qpid, uctx);
  279. return -ENOMEM;
  280. }
  281. int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
  282. {
  283. int err;
  284. err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
  285. kfree(cq->sw_queue);
  286. dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
  287. (1UL << (cq->size_log2))
  288. * sizeof(struct t3_cqe), cq->queue,
  289. pci_unmap_addr(cq, mapping));
  290. cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
  291. return err;
  292. }
  293. int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
  294. struct cxio_ucontext *uctx)
  295. {
  296. dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
  297. (1UL << (wq->size_log2))
  298. * sizeof(union t3_wr), wq->queue,
  299. pci_unmap_addr(wq, mapping));
  300. kfree(wq->sq);
  301. cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
  302. kfree(wq->rq);
  303. put_qpid(rdev_p, wq->qpid, uctx);
  304. return 0;
  305. }
  306. static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
  307. {
  308. struct t3_cqe cqe;
  309. PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
  310. wq, cq, cq->sw_rptr, cq->sw_wptr);
  311. memset(&cqe, 0, sizeof(cqe));
  312. cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
  313. V_CQE_OPCODE(T3_SEND) |
  314. V_CQE_TYPE(0) |
  315. V_CQE_SWCQE(1) |
  316. V_CQE_QPID(wq->qpid) |
  317. V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
  318. cq->size_log2)));
  319. *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
  320. cq->sw_wptr++;
  321. }
  322. void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
  323. {
  324. u32 ptr;
  325. PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq);
  326. /* flush RQ */
  327. PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__,
  328. wq->rq_rptr, wq->rq_wptr, count);
  329. ptr = wq->rq_rptr + count;
  330. while (ptr++ != wq->rq_wptr)
  331. insert_recv_cqe(wq, cq);
  332. }
  333. static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
  334. struct t3_swsq *sqp)
  335. {
  336. struct t3_cqe cqe;
  337. PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
  338. wq, cq, cq->sw_rptr, cq->sw_wptr);
  339. memset(&cqe, 0, sizeof(cqe));
  340. cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
  341. V_CQE_OPCODE(sqp->opcode) |
  342. V_CQE_TYPE(1) |
  343. V_CQE_SWCQE(1) |
  344. V_CQE_QPID(wq->qpid) |
  345. V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
  346. cq->size_log2)));
  347. cqe.u.scqe.wrid_hi = sqp->sq_wptr;
  348. *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
  349. cq->sw_wptr++;
  350. }
  351. void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
  352. {
  353. __u32 ptr;
  354. struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
  355. ptr = wq->sq_rptr + count;
  356. sqp += count;
  357. while (ptr != wq->sq_wptr) {
  358. insert_sq_cqe(wq, cq, sqp);
  359. sqp++;
  360. ptr++;
  361. }
  362. }
  363. /*
  364. * Move all CQEs from the HWCQ into the SWCQ.
  365. */
  366. void cxio_flush_hw_cq(struct t3_cq *cq)
  367. {
  368. struct t3_cqe *cqe, *swcqe;
  369. PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
  370. cqe = cxio_next_hw_cqe(cq);
  371. while (cqe) {
  372. PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
  373. __FUNCTION__, cq->rptr, cq->sw_wptr);
  374. swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
  375. *swcqe = *cqe;
  376. swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
  377. cq->sw_wptr++;
  378. cq->rptr++;
  379. cqe = cxio_next_hw_cqe(cq);
  380. }
  381. }
  382. static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
  383. {
  384. if (CQE_OPCODE(*cqe) == T3_TERMINATE)
  385. return 0;
  386. if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
  387. return 0;
  388. if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
  389. return 0;
  390. if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&
  391. Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
  392. return 0;
  393. return 1;
  394. }
  395. void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
  396. {
  397. struct t3_cqe *cqe;
  398. u32 ptr;
  399. *count = 0;
  400. ptr = cq->sw_rptr;
  401. while (!Q_EMPTY(ptr, cq->sw_wptr)) {
  402. cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
  403. if ((SQ_TYPE(*cqe) || (CQE_OPCODE(*cqe) == T3_READ_RESP)) &&
  404. (CQE_QPID(*cqe) == wq->qpid))
  405. (*count)++;
  406. ptr++;
  407. }
  408. PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
  409. }
  410. void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
  411. {
  412. struct t3_cqe *cqe;
  413. u32 ptr;
  414. *count = 0;
  415. PDBG("%s count zero %d\n", __FUNCTION__, *count);
  416. ptr = cq->sw_rptr;
  417. while (!Q_EMPTY(ptr, cq->sw_wptr)) {
  418. cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
  419. if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
  420. (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
  421. (*count)++;
  422. ptr++;
  423. }
  424. PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
  425. }
  426. static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
  427. {
  428. struct rdma_cq_setup setup;
  429. setup.id = 0;
  430. setup.base_addr = 0; /* NULL address */
  431. setup.size = 1; /* enable the CQ */
  432. setup.credits = 0;
  433. /* force SGE to redirect to RspQ and interrupt */
  434. setup.credit_thres = 0;
  435. setup.ovfl_mode = 1;
  436. return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
  437. }
  438. static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
  439. {
  440. int err;
  441. u64 sge_cmd, ctx0, ctx1;
  442. u64 base_addr;
  443. struct t3_modify_qp_wr *wqe;
  444. struct sk_buff *skb;
  445. skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
  446. if (!skb) {
  447. PDBG("%s alloc_skb failed\n", __FUNCTION__);
  448. return -ENOMEM;
  449. }
  450. err = cxio_hal_init_ctrl_cq(rdev_p);
  451. if (err) {
  452. PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);
  453. goto err;
  454. }
  455. rdev_p->ctrl_qp.workq = dma_alloc_coherent(
  456. &(rdev_p->rnic_info.pdev->dev),
  457. (1 << T3_CTRL_QP_SIZE_LOG2) *
  458. sizeof(union t3_wr),
  459. &(rdev_p->ctrl_qp.dma_addr),
  460. GFP_KERNEL);
  461. if (!rdev_p->ctrl_qp.workq) {
  462. PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);
  463. err = -ENOMEM;
  464. goto err;
  465. }
  466. pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
  467. rdev_p->ctrl_qp.dma_addr);
  468. rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
  469. memset(rdev_p->ctrl_qp.workq, 0,
  470. (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));
  471. mutex_init(&rdev_p->ctrl_qp.lock);
  472. init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
  473. /* update HW Ctrl QP context */
  474. base_addr = rdev_p->ctrl_qp.dma_addr;
  475. base_addr >>= 12;
  476. ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
  477. V_EC_BASE_LO((u32) base_addr & 0xffff));
  478. ctx0 <<= 32;
  479. ctx0 |= V_EC_CREDITS(FW_WR_NUM);
  480. base_addr >>= 16;
  481. ctx1 = (u32) base_addr;
  482. base_addr >>= 32;
  483. ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
  484. V_EC_TYPE(0) | V_EC_GEN(1) |
  485. V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
  486. wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
  487. memset(wqe, 0, sizeof(*wqe));
  488. build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
  489. T3_CTL_QP_TID, 7);
  490. wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
  491. sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
  492. wqe->sge_cmd = cpu_to_be64(sge_cmd);
  493. wqe->ctx1 = cpu_to_be64(ctx1);
  494. wqe->ctx0 = cpu_to_be64(ctx0);
  495. PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
  496. (unsigned long long) rdev_p->ctrl_qp.dma_addr,
  497. rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
  498. skb->priority = CPL_PRIORITY_CONTROL;
  499. return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
  500. err:
  501. kfree_skb(skb);
  502. return err;
  503. }
  504. static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
  505. {
  506. dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
  507. (1UL << T3_CTRL_QP_SIZE_LOG2)
  508. * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
  509. pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
  510. return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
  511. }
  512. /* write len bytes of data into addr (32B aligned address)
  513. * If data is NULL, clear len byte of memory to zero.
  514. * caller aquires the ctrl_qp lock before the call
  515. */
  516. static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
  517. u32 len, void *data, int completion)
  518. {
  519. u32 i, nr_wqe, copy_len;
  520. u8 *copy_data;
  521. u8 wr_len, utx_len; /* lenght in 8 byte flit */
  522. enum t3_wr_flags flag;
  523. __be64 *wqe;
  524. u64 utx_cmd;
  525. addr &= 0x7FFFFFF;
  526. nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
  527. PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
  528. __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
  529. nr_wqe, data, addr);
  530. utx_len = 3; /* in 32B unit */
  531. for (i = 0; i < nr_wqe; i++) {
  532. if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
  533. T3_CTRL_QP_SIZE_LOG2)) {
  534. PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
  535. "wait for more space i %d\n", __FUNCTION__,
  536. rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
  537. if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
  538. !Q_FULL(rdev_p->ctrl_qp.rptr,
  539. rdev_p->ctrl_qp.wptr,
  540. T3_CTRL_QP_SIZE_LOG2))) {
  541. PDBG("%s ctrl_qp workq interrupted\n",
  542. __FUNCTION__);
  543. return -ERESTARTSYS;
  544. }
  545. PDBG("%s ctrl_qp wakeup, continue posting work request "
  546. "i %d\n", __FUNCTION__, i);
  547. }
  548. wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
  549. (1 << T3_CTRL_QP_SIZE_LOG2)));
  550. flag = 0;
  551. if (i == (nr_wqe - 1)) {
  552. /* last WQE */
  553. flag = completion ? T3_COMPLETION_FLAG : 0;
  554. if (len % 32)
  555. utx_len = len / 32 + 1;
  556. else
  557. utx_len = len / 32;
  558. }
  559. /*
  560. * Force a CQE to return the credit to the workq in case
  561. * we posted more than half the max QP size of WRs
  562. */
  563. if ((i != 0) &&
  564. (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
  565. flag = T3_COMPLETION_FLAG;
  566. PDBG("%s force completion at i %d\n", __FUNCTION__, i);
  567. }
  568. /* build the utx mem command */
  569. wqe += (sizeof(struct t3_bypass_wr) >> 3);
  570. utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
  571. utx_cmd <<= 32;
  572. utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
  573. *wqe = cpu_to_be64(utx_cmd);
  574. wqe++;
  575. copy_data = (u8 *) data + i * 96;
  576. copy_len = len > 96 ? 96 : len;
  577. /* clear memory content if data is NULL */
  578. if (data)
  579. memcpy(wqe, copy_data, copy_len);
  580. else
  581. memset(wqe, 0, copy_len);
  582. if (copy_len % 32)
  583. memset(((u8 *) wqe) + copy_len, 0,
  584. 32 - (copy_len % 32));
  585. wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
  586. (utx_len << 2);
  587. wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
  588. (1 << T3_CTRL_QP_SIZE_LOG2)));
  589. /* wptr in the WRID[31:0] */
  590. ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
  591. /*
  592. * This must be the last write with a memory barrier
  593. * for the genbit
  594. */
  595. build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
  596. Q_GENBIT(rdev_p->ctrl_qp.wptr,
  597. T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
  598. wr_len);
  599. if (flag == T3_COMPLETION_FLAG)
  600. ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
  601. len -= 96;
  602. rdev_p->ctrl_qp.wptr++;
  603. }
  604. return 0;
  605. }
  606. /* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size
  607. * OUT: stag index, actual pbl_size, pbl_addr allocated.
  608. * TBD: shared memory region support
  609. */
  610. static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
  611. u32 *stag, u8 stag_state, u32 pdid,
  612. enum tpt_mem_type type, enum tpt_mem_perm perm,
  613. u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl,
  614. u32 *pbl_size, u32 *pbl_addr)
  615. {
  616. int err;
  617. struct tpt_entry tpt;
  618. u32 stag_idx;
  619. u32 wptr;
  620. int rereg = (*stag != T3_STAG_UNSET);
  621. stag_state = stag_state > 0;
  622. stag_idx = (*stag) >> 8;
  623. if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
  624. stag_idx = cxio_hal_get_stag(rdev_p->rscp);
  625. if (!stag_idx)
  626. return -ENOMEM;
  627. *stag = (stag_idx << 8) | ((*stag) & 0xFF);
  628. }
  629. PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
  630. __FUNCTION__, stag_state, type, pdid, stag_idx);
  631. if (reset_tpt_entry)
  632. cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
  633. else if (!rereg) {
  634. *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
  635. if (!*pbl_addr) {
  636. return -ENOMEM;
  637. }
  638. }
  639. mutex_lock(&rdev_p->ctrl_qp.lock);
  640. /* write PBL first if any - update pbl only if pbl list exist */
  641. if (pbl) {
  642. PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
  643. __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base,
  644. *pbl_size);
  645. err = cxio_hal_ctrl_qp_write_mem(rdev_p,
  646. (*pbl_addr >> 5),
  647. (*pbl_size << 3), pbl, 0);
  648. if (err)
  649. goto ret;
  650. }
  651. /* write TPT entry */
  652. if (reset_tpt_entry)
  653. memset(&tpt, 0, sizeof(tpt));
  654. else {
  655. tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
  656. V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
  657. V_TPT_STAG_STATE(stag_state) |
  658. V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
  659. BUG_ON(page_size >= 28);
  660. tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
  661. F_TPT_MW_BIND_ENABLE |
  662. V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
  663. V_TPT_PAGE_SIZE(page_size));
  664. tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
  665. cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3));
  666. tpt.len = cpu_to_be32(len);
  667. tpt.va_hi = cpu_to_be32((u32) (to >> 32));
  668. tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
  669. tpt.rsvd_bind_cnt_or_pstag = 0;
  670. tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
  671. cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2));
  672. }
  673. err = cxio_hal_ctrl_qp_write_mem(rdev_p,
  674. stag_idx +
  675. (rdev_p->rnic_info.tpt_base >> 5),
  676. sizeof(tpt), &tpt, 1);
  677. /* release the stag index to free pool */
  678. if (reset_tpt_entry)
  679. cxio_hal_put_stag(rdev_p->rscp, stag_idx);
  680. ret:
  681. wptr = rdev_p->ctrl_qp.wptr;
  682. mutex_unlock(&rdev_p->ctrl_qp.lock);
  683. if (!err)
  684. if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
  685. SEQ32_GE(rdev_p->ctrl_qp.rptr,
  686. wptr)))
  687. return -ERESTARTSYS;
  688. return err;
  689. }
  690. int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
  691. enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
  692. u8 page_size, __be64 *pbl, u32 *pbl_size,
  693. u32 *pbl_addr)
  694. {
  695. *stag = T3_STAG_UNSET;
  696. return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
  697. zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
  698. }
  699. int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
  700. enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
  701. u8 page_size, __be64 *pbl, u32 *pbl_size,
  702. u32 *pbl_addr)
  703. {
  704. return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
  705. zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
  706. }
  707. int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
  708. u32 pbl_addr)
  709. {
  710. return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
  711. &pbl_size, &pbl_addr);
  712. }
  713. int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
  714. {
  715. u32 pbl_size = 0;
  716. *stag = T3_STAG_UNSET;
  717. return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
  718. NULL, &pbl_size, NULL);
  719. }
  720. int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
  721. {
  722. return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
  723. NULL, NULL);
  724. }
  725. int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
  726. {
  727. struct t3_rdma_init_wr *wqe;
  728. struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
  729. if (!skb)
  730. return -ENOMEM;
  731. PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p);
  732. wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
  733. wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
  734. wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
  735. V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
  736. wqe->wrid.id1 = 0;
  737. wqe->qpid = cpu_to_be32(attr->qpid);
  738. wqe->pdid = cpu_to_be32(attr->pdid);
  739. wqe->scqid = cpu_to_be32(attr->scqid);
  740. wqe->rcqid = cpu_to_be32(attr->rcqid);
  741. wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
  742. wqe->rq_size = cpu_to_be32(attr->rq_size);
  743. wqe->mpaattrs = attr->mpaattrs;
  744. wqe->qpcaps = attr->qpcaps;
  745. wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
  746. wqe->flags = cpu_to_be32(attr->flags);
  747. wqe->ord = cpu_to_be32(attr->ord);
  748. wqe->ird = cpu_to_be32(attr->ird);
  749. wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
  750. wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
  751. wqe->irs = cpu_to_be32(attr->irs);
  752. skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
  753. return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
  754. }
  755. void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
  756. {
  757. cxio_ev_cb = ev_cb;
  758. }
  759. void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
  760. {
  761. cxio_ev_cb = NULL;
  762. }
  763. static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
  764. {
  765. static int cnt;
  766. struct cxio_rdev *rdev_p = NULL;
  767. struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
  768. PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
  769. " se %0x notify %0x cqbranch %0x creditth %0x\n",
  770. cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
  771. RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
  772. RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
  773. RSPQ_CREDIT_THRESH(rsp_msg));
  774. PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
  775. "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
  776. CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
  777. CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
  778. CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
  779. CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
  780. rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
  781. if (!rdev_p) {
  782. PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__,
  783. t3cdev_p);
  784. return 0;
  785. }
  786. if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
  787. rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
  788. wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
  789. dev_kfree_skb_irq(skb);
  790. } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
  791. dev_kfree_skb_irq(skb);
  792. else if (cxio_ev_cb)
  793. (*cxio_ev_cb) (rdev_p, skb);
  794. else
  795. dev_kfree_skb_irq(skb);
  796. cnt++;
  797. return 0;
  798. }
  799. /* Caller takes care of locking if needed */
  800. int cxio_rdev_open(struct cxio_rdev *rdev_p)
  801. {
  802. struct net_device *netdev_p = NULL;
  803. int err = 0;
  804. if (strlen(rdev_p->dev_name)) {
  805. if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
  806. return -EBUSY;
  807. }
  808. netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
  809. if (!netdev_p) {
  810. return -EINVAL;
  811. }
  812. dev_put(netdev_p);
  813. } else if (rdev_p->t3cdev_p) {
  814. if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
  815. return -EBUSY;
  816. }
  817. netdev_p = rdev_p->t3cdev_p->lldev;
  818. strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
  819. T3_MAX_DEV_NAME_LEN);
  820. } else {
  821. PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__);
  822. return -EINVAL;
  823. }
  824. list_add_tail(&rdev_p->entry, &rdev_list);
  825. PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name);
  826. memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
  827. if (!rdev_p->t3cdev_p)
  828. rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
  829. rdev_p->t3cdev_p->ulp = (void *) rdev_p;
  830. err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
  831. &(rdev_p->rnic_info));
  832. if (err) {
  833. printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
  834. __FUNCTION__, rdev_p->t3cdev_p, err);
  835. goto err1;
  836. }
  837. err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
  838. &(rdev_p->port_info));
  839. if (err) {
  840. printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
  841. __FUNCTION__, rdev_p->t3cdev_p, err);
  842. goto err1;
  843. }
  844. /*
  845. * qpshift is the number of bits to shift the qpid left in order
  846. * to get the correct address of the doorbell for that qp.
  847. */
  848. cxio_init_ucontext(rdev_p, &rdev_p->uctx);
  849. rdev_p->qpshift = PAGE_SHIFT -
  850. ilog2(65536 >>
  851. ilog2(rdev_p->rnic_info.udbell_len >>
  852. PAGE_SHIFT));
  853. rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
  854. rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
  855. PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
  856. "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
  857. __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
  858. rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
  859. rdev_p->rnic_info.pbl_base,
  860. rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
  861. rdev_p->rnic_info.rqt_top);
  862. PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
  863. "qpnr %d qpmask 0x%x\n",
  864. rdev_p->rnic_info.udbell_len,
  865. rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
  866. rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
  867. err = cxio_hal_init_ctrl_qp(rdev_p);
  868. if (err) {
  869. printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
  870. __FUNCTION__, err);
  871. goto err1;
  872. }
  873. err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
  874. 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
  875. T3_MAX_NUM_PD);
  876. if (err) {
  877. printk(KERN_ERR "%s error %d initializing hal resources.\n",
  878. __FUNCTION__, err);
  879. goto err2;
  880. }
  881. err = cxio_hal_pblpool_create(rdev_p);
  882. if (err) {
  883. printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
  884. __FUNCTION__, err);
  885. goto err3;
  886. }
  887. err = cxio_hal_rqtpool_create(rdev_p);
  888. if (err) {
  889. printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
  890. __FUNCTION__, err);
  891. goto err4;
  892. }
  893. return 0;
  894. err4:
  895. cxio_hal_pblpool_destroy(rdev_p);
  896. err3:
  897. cxio_hal_destroy_resource(rdev_p->rscp);
  898. err2:
  899. cxio_hal_destroy_ctrl_qp(rdev_p);
  900. err1:
  901. list_del(&rdev_p->entry);
  902. return err;
  903. }
  904. void cxio_rdev_close(struct cxio_rdev *rdev_p)
  905. {
  906. if (rdev_p) {
  907. cxio_hal_pblpool_destroy(rdev_p);
  908. cxio_hal_rqtpool_destroy(rdev_p);
  909. list_del(&rdev_p->entry);
  910. rdev_p->t3cdev_p->ulp = NULL;
  911. cxio_hal_destroy_ctrl_qp(rdev_p);
  912. cxio_hal_destroy_resource(rdev_p->rscp);
  913. }
  914. }
  915. int __init cxio_hal_init(void)
  916. {
  917. if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
  918. return -ENOMEM;
  919. t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
  920. return 0;
  921. }
  922. void __exit cxio_hal_exit(void)
  923. {
  924. struct cxio_rdev *rdev, *tmp;
  925. t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
  926. list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
  927. cxio_rdev_close(rdev);
  928. cxio_hal_destroy_rhdl_resource();
  929. }
  930. static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
  931. {
  932. struct t3_swsq *sqp;
  933. __u32 ptr = wq->sq_rptr;
  934. int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
  935. sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
  936. while (count--)
  937. if (!sqp->signaled) {
  938. ptr++;
  939. sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
  940. } else if (sqp->complete) {
  941. /*
  942. * Insert this completed cqe into the swcq.
  943. */
  944. PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
  945. __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2),
  946. Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
  947. sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
  948. *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
  949. = sqp->cqe;
  950. cq->sw_wptr++;
  951. sqp->signaled = 0;
  952. break;
  953. } else
  954. break;
  955. }
  956. static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
  957. struct t3_cqe *read_cqe)
  958. {
  959. read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
  960. read_cqe->len = wq->oldest_read->read_len;
  961. read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
  962. V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
  963. V_CQE_OPCODE(T3_READ_REQ) |
  964. V_CQE_TYPE(1));
  965. }
  966. /*
  967. * Return a ptr to the next read wr in the SWSQ or NULL.
  968. */
  969. static void advance_oldest_read(struct t3_wq *wq)
  970. {
  971. u32 rptr = wq->oldest_read - wq->sq + 1;
  972. u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
  973. while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
  974. wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
  975. if (wq->oldest_read->opcode == T3_READ_REQ)
  976. return;
  977. rptr++;
  978. }
  979. wq->oldest_read = NULL;
  980. }
  981. /*
  982. * cxio_poll_cq
  983. *
  984. * Caller must:
  985. * check the validity of the first CQE,
  986. * supply the wq assicated with the qpid.
  987. *
  988. * credit: cq credit to return to sge.
  989. * cqe_flushed: 1 iff the CQE is flushed.
  990. * cqe: copy of the polled CQE.
  991. *
  992. * return value:
  993. * 0 CQE returned,
  994. * -1 CQE skipped, try again.
  995. */
  996. int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
  997. u8 *cqe_flushed, u64 *cookie, u32 *credit)
  998. {
  999. int ret = 0;
  1000. struct t3_cqe *hw_cqe, read_cqe;
  1001. *cqe_flushed = 0;
  1002. *credit = 0;
  1003. hw_cqe = cxio_next_cqe(cq);
  1004. PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
  1005. " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
  1006. __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
  1007. CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
  1008. CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
  1009. CQE_WRID_LOW(*hw_cqe));
  1010. /*
  1011. * skip cqe's not affiliated with a QP.
  1012. */
  1013. if (wq == NULL) {
  1014. ret = -1;
  1015. goto skip_cqe;
  1016. }
  1017. /*
  1018. * Gotta tweak READ completions:
  1019. * 1) the cqe doesn't contain the sq_wptr from the wr.
  1020. * 2) opcode not reflected from the wr.
  1021. * 3) read_len not reflected from the wr.
  1022. * 4) cq_type is RQ_TYPE not SQ_TYPE.
  1023. */
  1024. if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
  1025. /*
  1026. * Don't write to the HWCQ, so create a new read req CQE
  1027. * in local memory.
  1028. */
  1029. create_read_req_cqe(wq, hw_cqe, &read_cqe);
  1030. hw_cqe = &read_cqe;
  1031. advance_oldest_read(wq);
  1032. }
  1033. /*
  1034. * T3A: Discard TERMINATE CQEs.
  1035. */
  1036. if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
  1037. ret = -1;
  1038. wq->error = 1;
  1039. goto skip_cqe;
  1040. }
  1041. if (CQE_STATUS(*hw_cqe) || wq->error) {
  1042. *cqe_flushed = wq->error;
  1043. wq->error = 1;
  1044. /*
  1045. * T3A inserts errors into the CQE. We cannot return
  1046. * these as work completions.
  1047. */
  1048. /* incoming write failures */
  1049. if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
  1050. && RQ_TYPE(*hw_cqe)) {
  1051. ret = -1;
  1052. goto skip_cqe;
  1053. }
  1054. /* incoming read request failures */
  1055. if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
  1056. ret = -1;
  1057. goto skip_cqe;
  1058. }
  1059. /* incoming SEND with no receive posted failures */
  1060. if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) &&
  1061. Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
  1062. ret = -1;
  1063. goto skip_cqe;
  1064. }
  1065. goto proc_cqe;
  1066. }
  1067. /*
  1068. * RECV completion.
  1069. */
  1070. if (RQ_TYPE(*hw_cqe)) {
  1071. /*
  1072. * HW only validates 4 bits of MSN. So we must validate that
  1073. * the MSN in the SEND is the next expected MSN. If its not,
  1074. * then we complete this with TPT_ERR_MSN and mark the wq in
  1075. * error.
  1076. */
  1077. if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
  1078. wq->error = 1;
  1079. hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
  1080. goto proc_cqe;
  1081. }
  1082. goto proc_cqe;
  1083. }
  1084. /*
  1085. * If we get here its a send completion.
  1086. *
  1087. * Handle out of order completion. These get stuffed
  1088. * in the SW SQ. Then the SW SQ is walked to move any
  1089. * now in-order completions into the SW CQ. This handles
  1090. * 2 cases:
  1091. * 1) reaping unsignaled WRs when the first subsequent
  1092. * signaled WR is completed.
  1093. * 2) out of order read completions.
  1094. */
  1095. if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
  1096. struct t3_swsq *sqp;
  1097. PDBG("%s out of order completion going in swsq at idx %ld\n",
  1098. __FUNCTION__,
  1099. Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
  1100. sqp = wq->sq +
  1101. Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
  1102. sqp->cqe = *hw_cqe;
  1103. sqp->complete = 1;
  1104. ret = -1;
  1105. goto flush_wq;
  1106. }
  1107. proc_cqe:
  1108. *cqe = *hw_cqe;
  1109. /*
  1110. * Reap the associated WR(s) that are freed up with this
  1111. * completion.
  1112. */
  1113. if (SQ_TYPE(*hw_cqe)) {
  1114. wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
  1115. PDBG("%s completing sq idx %ld\n", __FUNCTION__,
  1116. Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
  1117. *cookie = (wq->sq +
  1118. Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
  1119. wq->sq_rptr++;
  1120. } else {
  1121. PDBG("%s completing rq idx %ld\n", __FUNCTION__,
  1122. Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
  1123. *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
  1124. wq->rq_rptr++;
  1125. }
  1126. flush_wq:
  1127. /*
  1128. * Flush any completed cqes that are now in-order.
  1129. */
  1130. flush_completed_wrs(wq, cq);
  1131. skip_cqe:
  1132. if (SW_CQE(*hw_cqe)) {
  1133. PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
  1134. __FUNCTION__, cq, cq->cqid, cq->sw_rptr);
  1135. ++cq->sw_rptr;
  1136. } else {
  1137. PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
  1138. __FUNCTION__, cq, cq->cqid, cq->rptr);
  1139. ++cq->rptr;
  1140. /*
  1141. * T3A: compute credits.
  1142. */
  1143. if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
  1144. || ((cq->rptr - cq->wptr) >= 128)) {
  1145. *credit = cq->rptr - cq->wptr;
  1146. cq->wptr = cq->rptr;
  1147. }
  1148. }
  1149. return ret;
  1150. }