iwch_provider.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/moduleparam.h>
  34. #include <linux/device.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/etherdevice.h>
  37. #include <linux/delay.h>
  38. #include <linux/errno.h>
  39. #include <linux/list.h>
  40. #include <linux/sched.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/ethtool.h>
  43. #include <linux/rtnetlink.h>
  44. #include <linux/inetdevice.h>
  45. #include <linux/slab.h>
  46. #include <asm/io.h>
  47. #include <asm/irq.h>
  48. #include <asm/byteorder.h>
  49. #include <rdma/iw_cm.h>
  50. #include <rdma/ib_verbs.h>
  51. #include <rdma/ib_smi.h>
  52. #include <rdma/ib_umem.h>
  53. #include <rdma/ib_user_verbs.h>
  54. #include "cxio_hal.h"
  55. #include "iwch.h"
  56. #include "iwch_provider.h"
  57. #include "iwch_cm.h"
  58. #include "iwch_user.h"
  59. #include "common.h"
  60. static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
  61. struct ib_ah_attr *ah_attr)
  62. {
  63. return ERR_PTR(-ENOSYS);
  64. }
  65. static int iwch_ah_destroy(struct ib_ah *ah)
  66. {
  67. return -ENOSYS;
  68. }
  69. static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  70. {
  71. return -ENOSYS;
  72. }
  73. static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  74. {
  75. return -ENOSYS;
  76. }
  77. static int iwch_process_mad(struct ib_device *ibdev,
  78. int mad_flags,
  79. u8 port_num,
  80. struct ib_wc *in_wc,
  81. struct ib_grh *in_grh,
  82. struct ib_mad *in_mad, struct ib_mad *out_mad)
  83. {
  84. return -ENOSYS;
  85. }
  86. static int iwch_dealloc_ucontext(struct ib_ucontext *context)
  87. {
  88. struct iwch_dev *rhp = to_iwch_dev(context->device);
  89. struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
  90. struct iwch_mm_entry *mm, *tmp;
  91. PDBG("%s context %p\n", __func__, context);
  92. list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
  93. kfree(mm);
  94. cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
  95. kfree(ucontext);
  96. return 0;
  97. }
  98. static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
  99. struct ib_udata *udata)
  100. {
  101. struct iwch_ucontext *context;
  102. struct iwch_dev *rhp = to_iwch_dev(ibdev);
  103. PDBG("%s ibdev %p\n", __func__, ibdev);
  104. context = kzalloc(sizeof(*context), GFP_KERNEL);
  105. if (!context)
  106. return ERR_PTR(-ENOMEM);
  107. cxio_init_ucontext(&rhp->rdev, &context->uctx);
  108. INIT_LIST_HEAD(&context->mmaps);
  109. spin_lock_init(&context->mmap_lock);
  110. return &context->ibucontext;
  111. }
  112. static int iwch_destroy_cq(struct ib_cq *ib_cq)
  113. {
  114. struct iwch_cq *chp;
  115. PDBG("%s ib_cq %p\n", __func__, ib_cq);
  116. chp = to_iwch_cq(ib_cq);
  117. remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
  118. atomic_dec(&chp->refcnt);
  119. wait_event(chp->wait, !atomic_read(&chp->refcnt));
  120. cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
  121. kfree(chp);
  122. return 0;
  123. }
  124. static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
  125. struct ib_ucontext *ib_context,
  126. struct ib_udata *udata)
  127. {
  128. struct iwch_dev *rhp;
  129. struct iwch_cq *chp;
  130. struct iwch_create_cq_resp uresp;
  131. struct iwch_create_cq_req ureq;
  132. struct iwch_ucontext *ucontext = NULL;
  133. static int warned;
  134. size_t resplen;
  135. PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
  136. rhp = to_iwch_dev(ibdev);
  137. chp = kzalloc(sizeof(*chp), GFP_KERNEL);
  138. if (!chp)
  139. return ERR_PTR(-ENOMEM);
  140. if (ib_context) {
  141. ucontext = to_iwch_ucontext(ib_context);
  142. if (!t3a_device(rhp)) {
  143. if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
  144. kfree(chp);
  145. return ERR_PTR(-EFAULT);
  146. }
  147. chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
  148. }
  149. }
  150. if (t3a_device(rhp)) {
  151. /*
  152. * T3A: Add some fluff to handle extra CQEs inserted
  153. * for various errors.
  154. * Additional CQE possibilities:
  155. * TERMINATE,
  156. * incoming RDMA WRITE Failures
  157. * incoming RDMA READ REQUEST FAILUREs
  158. * NOTE: We cannot ensure the CQ won't overflow.
  159. */
  160. entries += 16;
  161. }
  162. entries = roundup_pow_of_two(entries);
  163. chp->cq.size_log2 = ilog2(entries);
  164. if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
  165. kfree(chp);
  166. return ERR_PTR(-ENOMEM);
  167. }
  168. chp->rhp = rhp;
  169. chp->ibcq.cqe = 1 << chp->cq.size_log2;
  170. spin_lock_init(&chp->lock);
  171. spin_lock_init(&chp->comp_handler_lock);
  172. atomic_set(&chp->refcnt, 1);
  173. init_waitqueue_head(&chp->wait);
  174. if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
  175. cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
  176. kfree(chp);
  177. return ERR_PTR(-ENOMEM);
  178. }
  179. if (ucontext) {
  180. struct iwch_mm_entry *mm;
  181. mm = kmalloc(sizeof *mm, GFP_KERNEL);
  182. if (!mm) {
  183. iwch_destroy_cq(&chp->ibcq);
  184. return ERR_PTR(-ENOMEM);
  185. }
  186. uresp.cqid = chp->cq.cqid;
  187. uresp.size_log2 = chp->cq.size_log2;
  188. spin_lock(&ucontext->mmap_lock);
  189. uresp.key = ucontext->key;
  190. ucontext->key += PAGE_SIZE;
  191. spin_unlock(&ucontext->mmap_lock);
  192. mm->key = uresp.key;
  193. mm->addr = virt_to_phys(chp->cq.queue);
  194. if (udata->outlen < sizeof uresp) {
  195. if (!warned++)
  196. printk(KERN_WARNING MOD "Warning - "
  197. "downlevel libcxgb3 (non-fatal).\n");
  198. mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
  199. sizeof(struct t3_cqe));
  200. resplen = sizeof(struct iwch_create_cq_resp_v0);
  201. } else {
  202. mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
  203. sizeof(struct t3_cqe));
  204. uresp.memsize = mm->len;
  205. uresp.reserved = 0;
  206. resplen = sizeof uresp;
  207. }
  208. if (ib_copy_to_udata(udata, &uresp, resplen)) {
  209. kfree(mm);
  210. iwch_destroy_cq(&chp->ibcq);
  211. return ERR_PTR(-EFAULT);
  212. }
  213. insert_mmap(ucontext, mm);
  214. }
  215. PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
  216. chp->cq.cqid, chp, (1 << chp->cq.size_log2),
  217. (unsigned long long) chp->cq.dma_addr);
  218. return &chp->ibcq;
  219. }
  220. static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
  221. {
  222. #ifdef notyet
  223. struct iwch_cq *chp = to_iwch_cq(cq);
  224. struct t3_cq oldcq, newcq;
  225. int ret;
  226. PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
  227. /* We don't downsize... */
  228. if (cqe <= cq->cqe)
  229. return 0;
  230. /* create new t3_cq with new size */
  231. cqe = roundup_pow_of_two(cqe+1);
  232. newcq.size_log2 = ilog2(cqe);
  233. /* Dont allow resize to less than the current wce count */
  234. if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
  235. return -ENOMEM;
  236. }
  237. /* Quiesce all QPs using this CQ */
  238. ret = iwch_quiesce_qps(chp);
  239. if (ret) {
  240. return ret;
  241. }
  242. ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
  243. if (ret) {
  244. return ret;
  245. }
  246. /* copy CQEs */
  247. memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
  248. sizeof(struct t3_cqe));
  249. /* old iwch_qp gets new t3_cq but keeps old cqid */
  250. oldcq = chp->cq;
  251. chp->cq = newcq;
  252. chp->cq.cqid = oldcq.cqid;
  253. /* resize new t3_cq to update the HW context */
  254. ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
  255. if (ret) {
  256. chp->cq = oldcq;
  257. return ret;
  258. }
  259. chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
  260. /* destroy old t3_cq */
  261. oldcq.cqid = newcq.cqid;
  262. ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
  263. if (ret) {
  264. printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
  265. __func__, ret);
  266. }
  267. /* add user hooks here */
  268. /* resume qps */
  269. ret = iwch_resume_qps(chp);
  270. return ret;
  271. #else
  272. return -ENOSYS;
  273. #endif
  274. }
  275. static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  276. {
  277. struct iwch_dev *rhp;
  278. struct iwch_cq *chp;
  279. enum t3_cq_opcode cq_op;
  280. int err;
  281. unsigned long flag;
  282. u32 rptr;
  283. chp = to_iwch_cq(ibcq);
  284. rhp = chp->rhp;
  285. if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
  286. cq_op = CQ_ARM_SE;
  287. else
  288. cq_op = CQ_ARM_AN;
  289. if (chp->user_rptr_addr) {
  290. if (get_user(rptr, chp->user_rptr_addr))
  291. return -EFAULT;
  292. spin_lock_irqsave(&chp->lock, flag);
  293. chp->cq.rptr = rptr;
  294. } else
  295. spin_lock_irqsave(&chp->lock, flag);
  296. PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
  297. err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
  298. spin_unlock_irqrestore(&chp->lock, flag);
  299. if (err < 0)
  300. printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
  301. chp->cq.cqid);
  302. if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
  303. err = 0;
  304. return err;
  305. }
  306. static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  307. {
  308. int len = vma->vm_end - vma->vm_start;
  309. u32 key = vma->vm_pgoff << PAGE_SHIFT;
  310. struct cxio_rdev *rdev_p;
  311. int ret = 0;
  312. struct iwch_mm_entry *mm;
  313. struct iwch_ucontext *ucontext;
  314. u64 addr;
  315. PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
  316. key, len);
  317. if (vma->vm_start & (PAGE_SIZE-1)) {
  318. return -EINVAL;
  319. }
  320. rdev_p = &(to_iwch_dev(context->device)->rdev);
  321. ucontext = to_iwch_ucontext(context);
  322. mm = remove_mmap(ucontext, key, len);
  323. if (!mm)
  324. return -EINVAL;
  325. addr = mm->addr;
  326. kfree(mm);
  327. if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
  328. (addr < (rdev_p->rnic_info.udbell_physbase +
  329. rdev_p->rnic_info.udbell_len))) {
  330. /*
  331. * Map T3 DB register.
  332. */
  333. if (vma->vm_flags & VM_READ) {
  334. return -EPERM;
  335. }
  336. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  337. vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
  338. vma->vm_flags &= ~VM_MAYREAD;
  339. ret = io_remap_pfn_range(vma, vma->vm_start,
  340. addr >> PAGE_SHIFT,
  341. len, vma->vm_page_prot);
  342. } else {
  343. /*
  344. * Map WQ or CQ contig dma memory...
  345. */
  346. ret = remap_pfn_range(vma, vma->vm_start,
  347. addr >> PAGE_SHIFT,
  348. len, vma->vm_page_prot);
  349. }
  350. return ret;
  351. }
  352. static int iwch_deallocate_pd(struct ib_pd *pd)
  353. {
  354. struct iwch_dev *rhp;
  355. struct iwch_pd *php;
  356. php = to_iwch_pd(pd);
  357. rhp = php->rhp;
  358. PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
  359. cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
  360. kfree(php);
  361. return 0;
  362. }
  363. static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
  364. struct ib_ucontext *context,
  365. struct ib_udata *udata)
  366. {
  367. struct iwch_pd *php;
  368. u32 pdid;
  369. struct iwch_dev *rhp;
  370. PDBG("%s ibdev %p\n", __func__, ibdev);
  371. rhp = (struct iwch_dev *) ibdev;
  372. pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
  373. if (!pdid)
  374. return ERR_PTR(-EINVAL);
  375. php = kzalloc(sizeof(*php), GFP_KERNEL);
  376. if (!php) {
  377. cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
  378. return ERR_PTR(-ENOMEM);
  379. }
  380. php->pdid = pdid;
  381. php->rhp = rhp;
  382. if (context) {
  383. if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
  384. iwch_deallocate_pd(&php->ibpd);
  385. return ERR_PTR(-EFAULT);
  386. }
  387. }
  388. PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
  389. return &php->ibpd;
  390. }
  391. static int iwch_dereg_mr(struct ib_mr *ib_mr)
  392. {
  393. struct iwch_dev *rhp;
  394. struct iwch_mr *mhp;
  395. u32 mmid;
  396. PDBG("%s ib_mr %p\n", __func__, ib_mr);
  397. /* There can be no memory windows */
  398. if (atomic_read(&ib_mr->usecnt))
  399. return -EINVAL;
  400. mhp = to_iwch_mr(ib_mr);
  401. rhp = mhp->rhp;
  402. mmid = mhp->attr.stag >> 8;
  403. cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
  404. mhp->attr.pbl_addr);
  405. iwch_free_pbl(mhp);
  406. remove_handle(rhp, &rhp->mmidr, mmid);
  407. if (mhp->kva)
  408. kfree((void *) (unsigned long) mhp->kva);
  409. if (mhp->umem)
  410. ib_umem_release(mhp->umem);
  411. PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
  412. kfree(mhp);
  413. return 0;
  414. }
  415. static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
  416. struct ib_phys_buf *buffer_list,
  417. int num_phys_buf,
  418. int acc,
  419. u64 *iova_start)
  420. {
  421. __be64 *page_list;
  422. int shift;
  423. u64 total_size;
  424. int npages;
  425. struct iwch_dev *rhp;
  426. struct iwch_pd *php;
  427. struct iwch_mr *mhp;
  428. int ret;
  429. PDBG("%s ib_pd %p\n", __func__, pd);
  430. php = to_iwch_pd(pd);
  431. rhp = php->rhp;
  432. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  433. if (!mhp)
  434. return ERR_PTR(-ENOMEM);
  435. mhp->rhp = rhp;
  436. /* First check that we have enough alignment */
  437. if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
  438. ret = -EINVAL;
  439. goto err;
  440. }
  441. if (num_phys_buf > 1 &&
  442. ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
  443. ret = -EINVAL;
  444. goto err;
  445. }
  446. ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
  447. &total_size, &npages, &shift, &page_list);
  448. if (ret)
  449. goto err;
  450. ret = iwch_alloc_pbl(mhp, npages);
  451. if (ret) {
  452. kfree(page_list);
  453. goto err_pbl;
  454. }
  455. ret = iwch_write_pbl(mhp, page_list, npages, 0);
  456. kfree(page_list);
  457. if (ret)
  458. goto err_pbl;
  459. mhp->attr.pdid = php->pdid;
  460. mhp->attr.zbva = 0;
  461. mhp->attr.perms = iwch_ib_to_tpt_access(acc);
  462. mhp->attr.va_fbo = *iova_start;
  463. mhp->attr.page_size = shift - 12;
  464. mhp->attr.len = (u32) total_size;
  465. mhp->attr.pbl_size = npages;
  466. ret = iwch_register_mem(rhp, php, mhp, shift);
  467. if (ret)
  468. goto err_pbl;
  469. return &mhp->ibmr;
  470. err_pbl:
  471. iwch_free_pbl(mhp);
  472. err:
  473. kfree(mhp);
  474. return ERR_PTR(ret);
  475. }
  476. static int iwch_reregister_phys_mem(struct ib_mr *mr,
  477. int mr_rereg_mask,
  478. struct ib_pd *pd,
  479. struct ib_phys_buf *buffer_list,
  480. int num_phys_buf,
  481. int acc, u64 * iova_start)
  482. {
  483. struct iwch_mr mh, *mhp;
  484. struct iwch_pd *php;
  485. struct iwch_dev *rhp;
  486. __be64 *page_list = NULL;
  487. int shift = 0;
  488. u64 total_size;
  489. int npages = 0;
  490. int ret;
  491. PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
  492. /* There can be no memory windows */
  493. if (atomic_read(&mr->usecnt))
  494. return -EINVAL;
  495. mhp = to_iwch_mr(mr);
  496. rhp = mhp->rhp;
  497. php = to_iwch_pd(mr->pd);
  498. /* make sure we are on the same adapter */
  499. if (rhp != php->rhp)
  500. return -EINVAL;
  501. memcpy(&mh, mhp, sizeof *mhp);
  502. if (mr_rereg_mask & IB_MR_REREG_PD)
  503. php = to_iwch_pd(pd);
  504. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  505. mh.attr.perms = iwch_ib_to_tpt_access(acc);
  506. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  507. ret = build_phys_page_list(buffer_list, num_phys_buf,
  508. iova_start,
  509. &total_size, &npages,
  510. &shift, &page_list);
  511. if (ret)
  512. return ret;
  513. }
  514. ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
  515. kfree(page_list);
  516. if (ret) {
  517. return ret;
  518. }
  519. if (mr_rereg_mask & IB_MR_REREG_PD)
  520. mhp->attr.pdid = php->pdid;
  521. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  522. mhp->attr.perms = iwch_ib_to_tpt_access(acc);
  523. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  524. mhp->attr.zbva = 0;
  525. mhp->attr.va_fbo = *iova_start;
  526. mhp->attr.page_size = shift - 12;
  527. mhp->attr.len = (u32) total_size;
  528. mhp->attr.pbl_size = npages;
  529. }
  530. return 0;
  531. }
  532. static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  533. u64 virt, int acc, struct ib_udata *udata)
  534. {
  535. __be64 *pages;
  536. int shift, n, len;
  537. int i, j, k;
  538. int err = 0;
  539. struct ib_umem_chunk *chunk;
  540. struct iwch_dev *rhp;
  541. struct iwch_pd *php;
  542. struct iwch_mr *mhp;
  543. struct iwch_reg_user_mr_resp uresp;
  544. PDBG("%s ib_pd %p\n", __func__, pd);
  545. php = to_iwch_pd(pd);
  546. rhp = php->rhp;
  547. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  548. if (!mhp)
  549. return ERR_PTR(-ENOMEM);
  550. mhp->rhp = rhp;
  551. mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
  552. if (IS_ERR(mhp->umem)) {
  553. err = PTR_ERR(mhp->umem);
  554. kfree(mhp);
  555. return ERR_PTR(err);
  556. }
  557. shift = ffs(mhp->umem->page_size) - 1;
  558. n = 0;
  559. list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
  560. n += chunk->nents;
  561. err = iwch_alloc_pbl(mhp, n);
  562. if (err)
  563. goto err;
  564. pages = (__be64 *) __get_free_page(GFP_KERNEL);
  565. if (!pages) {
  566. err = -ENOMEM;
  567. goto err_pbl;
  568. }
  569. i = n = 0;
  570. list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
  571. for (j = 0; j < chunk->nmap; ++j) {
  572. len = sg_dma_len(&chunk->page_list[j]) >> shift;
  573. for (k = 0; k < len; ++k) {
  574. pages[i++] = cpu_to_be64(sg_dma_address(
  575. &chunk->page_list[j]) +
  576. mhp->umem->page_size * k);
  577. if (i == PAGE_SIZE / sizeof *pages) {
  578. err = iwch_write_pbl(mhp, pages, i, n);
  579. if (err)
  580. goto pbl_done;
  581. n += i;
  582. i = 0;
  583. }
  584. }
  585. }
  586. if (i)
  587. err = iwch_write_pbl(mhp, pages, i, n);
  588. pbl_done:
  589. free_page((unsigned long) pages);
  590. if (err)
  591. goto err_pbl;
  592. mhp->attr.pdid = php->pdid;
  593. mhp->attr.zbva = 0;
  594. mhp->attr.perms = iwch_ib_to_tpt_access(acc);
  595. mhp->attr.va_fbo = virt;
  596. mhp->attr.page_size = shift - 12;
  597. mhp->attr.len = (u32) length;
  598. err = iwch_register_mem(rhp, php, mhp, shift);
  599. if (err)
  600. goto err_pbl;
  601. if (udata && !t3a_device(rhp)) {
  602. uresp.pbl_addr = (mhp->attr.pbl_addr -
  603. rhp->rdev.rnic_info.pbl_base) >> 3;
  604. PDBG("%s user resp pbl_addr 0x%x\n", __func__,
  605. uresp.pbl_addr);
  606. if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
  607. iwch_dereg_mr(&mhp->ibmr);
  608. err = -EFAULT;
  609. goto err;
  610. }
  611. }
  612. return &mhp->ibmr;
  613. err_pbl:
  614. iwch_free_pbl(mhp);
  615. err:
  616. ib_umem_release(mhp->umem);
  617. kfree(mhp);
  618. return ERR_PTR(err);
  619. }
  620. static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
  621. {
  622. struct ib_phys_buf bl;
  623. u64 kva;
  624. struct ib_mr *ibmr;
  625. PDBG("%s ib_pd %p\n", __func__, pd);
  626. /*
  627. * T3 only supports 32 bits of size.
  628. */
  629. bl.size = 0xffffffff;
  630. bl.addr = 0;
  631. kva = 0;
  632. ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
  633. return ibmr;
  634. }
  635. static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
  636. {
  637. struct iwch_dev *rhp;
  638. struct iwch_pd *php;
  639. struct iwch_mw *mhp;
  640. u32 mmid;
  641. u32 stag = 0;
  642. int ret;
  643. if (type != IB_MW_TYPE_1)
  644. return ERR_PTR(-EINVAL);
  645. php = to_iwch_pd(pd);
  646. rhp = php->rhp;
  647. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  648. if (!mhp)
  649. return ERR_PTR(-ENOMEM);
  650. ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
  651. if (ret) {
  652. kfree(mhp);
  653. return ERR_PTR(ret);
  654. }
  655. mhp->rhp = rhp;
  656. mhp->attr.pdid = php->pdid;
  657. mhp->attr.type = TPT_MW;
  658. mhp->attr.stag = stag;
  659. mmid = (stag) >> 8;
  660. mhp->ibmw.rkey = stag;
  661. if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
  662. cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
  663. kfree(mhp);
  664. return ERR_PTR(-ENOMEM);
  665. }
  666. PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
  667. return &(mhp->ibmw);
  668. }
  669. static int iwch_dealloc_mw(struct ib_mw *mw)
  670. {
  671. struct iwch_dev *rhp;
  672. struct iwch_mw *mhp;
  673. u32 mmid;
  674. mhp = to_iwch_mw(mw);
  675. rhp = mhp->rhp;
  676. mmid = (mw->rkey) >> 8;
  677. cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
  678. remove_handle(rhp, &rhp->mmidr, mmid);
  679. PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
  680. kfree(mhp);
  681. return 0;
  682. }
  683. static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
  684. {
  685. struct iwch_dev *rhp;
  686. struct iwch_pd *php;
  687. struct iwch_mr *mhp;
  688. u32 mmid;
  689. u32 stag = 0;
  690. int ret = 0;
  691. php = to_iwch_pd(pd);
  692. rhp = php->rhp;
  693. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  694. if (!mhp)
  695. goto err;
  696. mhp->rhp = rhp;
  697. ret = iwch_alloc_pbl(mhp, pbl_depth);
  698. if (ret)
  699. goto err1;
  700. mhp->attr.pbl_size = pbl_depth;
  701. ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
  702. mhp->attr.pbl_size, mhp->attr.pbl_addr);
  703. if (ret)
  704. goto err2;
  705. mhp->attr.pdid = php->pdid;
  706. mhp->attr.type = TPT_NON_SHARED_MR;
  707. mhp->attr.stag = stag;
  708. mhp->attr.state = 1;
  709. mmid = (stag) >> 8;
  710. mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
  711. if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
  712. goto err3;
  713. PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
  714. return &(mhp->ibmr);
  715. err3:
  716. cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
  717. mhp->attr.pbl_addr);
  718. err2:
  719. iwch_free_pbl(mhp);
  720. err1:
  721. kfree(mhp);
  722. err:
  723. return ERR_PTR(ret);
  724. }
  725. static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
  726. struct ib_device *device,
  727. int page_list_len)
  728. {
  729. struct ib_fast_reg_page_list *page_list;
  730. page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),
  731. GFP_KERNEL);
  732. if (!page_list)
  733. return ERR_PTR(-ENOMEM);
  734. page_list->page_list = (u64 *)(page_list + 1);
  735. page_list->max_page_list_len = page_list_len;
  736. return page_list;
  737. }
  738. static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)
  739. {
  740. kfree(page_list);
  741. }
  742. static int iwch_destroy_qp(struct ib_qp *ib_qp)
  743. {
  744. struct iwch_dev *rhp;
  745. struct iwch_qp *qhp;
  746. struct iwch_qp_attributes attrs;
  747. struct iwch_ucontext *ucontext;
  748. qhp = to_iwch_qp(ib_qp);
  749. rhp = qhp->rhp;
  750. attrs.next_state = IWCH_QP_STATE_ERROR;
  751. iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
  752. wait_event(qhp->wait, !qhp->ep);
  753. remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
  754. atomic_dec(&qhp->refcnt);
  755. wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
  756. ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
  757. : NULL;
  758. cxio_destroy_qp(&rhp->rdev, &qhp->wq,
  759. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  760. PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
  761. ib_qp, qhp->wq.qpid, qhp);
  762. kfree(qhp);
  763. return 0;
  764. }
  765. static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
  766. struct ib_qp_init_attr *attrs,
  767. struct ib_udata *udata)
  768. {
  769. struct iwch_dev *rhp;
  770. struct iwch_qp *qhp;
  771. struct iwch_pd *php;
  772. struct iwch_cq *schp;
  773. struct iwch_cq *rchp;
  774. struct iwch_create_qp_resp uresp;
  775. int wqsize, sqsize, rqsize;
  776. struct iwch_ucontext *ucontext;
  777. PDBG("%s ib_pd %p\n", __func__, pd);
  778. if (attrs->qp_type != IB_QPT_RC)
  779. return ERR_PTR(-EINVAL);
  780. php = to_iwch_pd(pd);
  781. rhp = php->rhp;
  782. schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
  783. rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
  784. if (!schp || !rchp)
  785. return ERR_PTR(-EINVAL);
  786. /* The RQT size must be # of entries + 1 rounded up to a power of two */
  787. rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
  788. if (rqsize == attrs->cap.max_recv_wr)
  789. rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
  790. /* T3 doesn't support RQT depth < 16 */
  791. if (rqsize < 16)
  792. rqsize = 16;
  793. if (rqsize > T3_MAX_RQ_SIZE)
  794. return ERR_PTR(-EINVAL);
  795. if (attrs->cap.max_inline_data > T3_MAX_INLINE)
  796. return ERR_PTR(-EINVAL);
  797. /*
  798. * NOTE: The SQ and total WQ sizes don't need to be
  799. * a power of two. However, all the code assumes
  800. * they are. EG: Q_FREECNT() and friends.
  801. */
  802. sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
  803. wqsize = roundup_pow_of_two(rqsize + sqsize);
  804. /*
  805. * Kernel users need more wq space for fastreg WRs which can take
  806. * 2 WR fragments.
  807. */
  808. ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
  809. if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
  810. wqsize = roundup_pow_of_two(rqsize +
  811. roundup_pow_of_two(attrs->cap.max_send_wr * 2));
  812. PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
  813. wqsize, sqsize, rqsize);
  814. qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
  815. if (!qhp)
  816. return ERR_PTR(-ENOMEM);
  817. qhp->wq.size_log2 = ilog2(wqsize);
  818. qhp->wq.rq_size_log2 = ilog2(rqsize);
  819. qhp->wq.sq_size_log2 = ilog2(sqsize);
  820. if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
  821. ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
  822. kfree(qhp);
  823. return ERR_PTR(-ENOMEM);
  824. }
  825. attrs->cap.max_recv_wr = rqsize - 1;
  826. attrs->cap.max_send_wr = sqsize;
  827. attrs->cap.max_inline_data = T3_MAX_INLINE;
  828. qhp->rhp = rhp;
  829. qhp->attr.pd = php->pdid;
  830. qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
  831. qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
  832. qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
  833. qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
  834. qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
  835. qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
  836. qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
  837. qhp->attr.state = IWCH_QP_STATE_IDLE;
  838. qhp->attr.next_state = IWCH_QP_STATE_IDLE;
  839. /*
  840. * XXX - These don't get passed in from the openib user
  841. * at create time. The CM sets them via a QP modify.
  842. * Need to fix... I think the CM should
  843. */
  844. qhp->attr.enable_rdma_read = 1;
  845. qhp->attr.enable_rdma_write = 1;
  846. qhp->attr.enable_bind = 1;
  847. qhp->attr.max_ord = 1;
  848. qhp->attr.max_ird = 1;
  849. spin_lock_init(&qhp->lock);
  850. init_waitqueue_head(&qhp->wait);
  851. atomic_set(&qhp->refcnt, 1);
  852. if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
  853. cxio_destroy_qp(&rhp->rdev, &qhp->wq,
  854. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  855. kfree(qhp);
  856. return ERR_PTR(-ENOMEM);
  857. }
  858. if (udata) {
  859. struct iwch_mm_entry *mm1, *mm2;
  860. mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
  861. if (!mm1) {
  862. iwch_destroy_qp(&qhp->ibqp);
  863. return ERR_PTR(-ENOMEM);
  864. }
  865. mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
  866. if (!mm2) {
  867. kfree(mm1);
  868. iwch_destroy_qp(&qhp->ibqp);
  869. return ERR_PTR(-ENOMEM);
  870. }
  871. uresp.qpid = qhp->wq.qpid;
  872. uresp.size_log2 = qhp->wq.size_log2;
  873. uresp.sq_size_log2 = qhp->wq.sq_size_log2;
  874. uresp.rq_size_log2 = qhp->wq.rq_size_log2;
  875. spin_lock(&ucontext->mmap_lock);
  876. uresp.key = ucontext->key;
  877. ucontext->key += PAGE_SIZE;
  878. uresp.db_key = ucontext->key;
  879. ucontext->key += PAGE_SIZE;
  880. spin_unlock(&ucontext->mmap_lock);
  881. if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
  882. kfree(mm1);
  883. kfree(mm2);
  884. iwch_destroy_qp(&qhp->ibqp);
  885. return ERR_PTR(-EFAULT);
  886. }
  887. mm1->key = uresp.key;
  888. mm1->addr = virt_to_phys(qhp->wq.queue);
  889. mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
  890. insert_mmap(ucontext, mm1);
  891. mm2->key = uresp.db_key;
  892. mm2->addr = qhp->wq.udb & PAGE_MASK;
  893. mm2->len = PAGE_SIZE;
  894. insert_mmap(ucontext, mm2);
  895. }
  896. qhp->ibqp.qp_num = qhp->wq.qpid;
  897. init_timer(&(qhp->timer));
  898. PDBG("%s sq_num_entries %d, rq_num_entries %d "
  899. "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
  900. __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
  901. qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
  902. 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
  903. return &qhp->ibqp;
  904. }
  905. static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  906. int attr_mask, struct ib_udata *udata)
  907. {
  908. struct iwch_dev *rhp;
  909. struct iwch_qp *qhp;
  910. enum iwch_qp_attr_mask mask = 0;
  911. struct iwch_qp_attributes attrs;
  912. PDBG("%s ib_qp %p\n", __func__, ibqp);
  913. /* iwarp does not support the RTR state */
  914. if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
  915. attr_mask &= ~IB_QP_STATE;
  916. /* Make sure we still have something left to do */
  917. if (!attr_mask)
  918. return 0;
  919. memset(&attrs, 0, sizeof attrs);
  920. qhp = to_iwch_qp(ibqp);
  921. rhp = qhp->rhp;
  922. attrs.next_state = iwch_convert_state(attr->qp_state);
  923. attrs.enable_rdma_read = (attr->qp_access_flags &
  924. IB_ACCESS_REMOTE_READ) ? 1 : 0;
  925. attrs.enable_rdma_write = (attr->qp_access_flags &
  926. IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
  927. attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
  928. mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
  929. mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
  930. (IWCH_QP_ATTR_ENABLE_RDMA_READ |
  931. IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
  932. IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
  933. return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
  934. }
  935. void iwch_qp_add_ref(struct ib_qp *qp)
  936. {
  937. PDBG("%s ib_qp %p\n", __func__, qp);
  938. atomic_inc(&(to_iwch_qp(qp)->refcnt));
  939. }
  940. void iwch_qp_rem_ref(struct ib_qp *qp)
  941. {
  942. PDBG("%s ib_qp %p\n", __func__, qp);
  943. if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
  944. wake_up(&(to_iwch_qp(qp)->wait));
  945. }
  946. static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
  947. {
  948. PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
  949. return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
  950. }
  951. static int iwch_query_pkey(struct ib_device *ibdev,
  952. u8 port, u16 index, u16 * pkey)
  953. {
  954. PDBG("%s ibdev %p\n", __func__, ibdev);
  955. *pkey = 0;
  956. return 0;
  957. }
  958. static int iwch_query_gid(struct ib_device *ibdev, u8 port,
  959. int index, union ib_gid *gid)
  960. {
  961. struct iwch_dev *dev;
  962. PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
  963. __func__, ibdev, port, index, gid);
  964. dev = to_iwch_dev(ibdev);
  965. BUG_ON(port == 0 || port > 2);
  966. memset(&(gid->raw[0]), 0, sizeof(gid->raw));
  967. memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
  968. return 0;
  969. }
  970. static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
  971. {
  972. struct ethtool_drvinfo info;
  973. struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
  974. char *cp, *next;
  975. unsigned fw_maj, fw_min, fw_mic;
  976. lldev->ethtool_ops->get_drvinfo(lldev, &info);
  977. next = info.fw_version + 1;
  978. cp = strsep(&next, ".");
  979. sscanf(cp, "%i", &fw_maj);
  980. cp = strsep(&next, ".");
  981. sscanf(cp, "%i", &fw_min);
  982. cp = strsep(&next, ".");
  983. sscanf(cp, "%i", &fw_mic);
  984. return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
  985. (fw_mic & 0xffff);
  986. }
  987. static int iwch_query_device(struct ib_device *ibdev,
  988. struct ib_device_attr *props)
  989. {
  990. struct iwch_dev *dev;
  991. PDBG("%s ibdev %p\n", __func__, ibdev);
  992. dev = to_iwch_dev(ibdev);
  993. memset(props, 0, sizeof *props);
  994. memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
  995. props->hw_ver = dev->rdev.t3cdev_p->type;
  996. props->fw_ver = fw_vers_string_to_u64(dev);
  997. props->device_cap_flags = dev->device_cap_flags;
  998. props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
  999. props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
  1000. props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
  1001. props->max_mr_size = dev->attr.max_mr_size;
  1002. props->max_qp = dev->attr.max_qps;
  1003. props->max_qp_wr = dev->attr.max_wrs;
  1004. props->max_sge = dev->attr.max_sge_per_wr;
  1005. props->max_sge_rd = 1;
  1006. props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
  1007. props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
  1008. props->max_cq = dev->attr.max_cqs;
  1009. props->max_cqe = dev->attr.max_cqes_per_cq;
  1010. props->max_mr = dev->attr.max_mem_regs;
  1011. props->max_pd = dev->attr.max_pds;
  1012. props->local_ca_ack_delay = 0;
  1013. props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
  1014. return 0;
  1015. }
  1016. static int iwch_query_port(struct ib_device *ibdev,
  1017. u8 port, struct ib_port_attr *props)
  1018. {
  1019. struct iwch_dev *dev;
  1020. struct net_device *netdev;
  1021. struct in_device *inetdev;
  1022. PDBG("%s ibdev %p\n", __func__, ibdev);
  1023. dev = to_iwch_dev(ibdev);
  1024. netdev = dev->rdev.port_info.lldevs[port-1];
  1025. memset(props, 0, sizeof(struct ib_port_attr));
  1026. props->max_mtu = IB_MTU_4096;
  1027. if (netdev->mtu >= 4096)
  1028. props->active_mtu = IB_MTU_4096;
  1029. else if (netdev->mtu >= 2048)
  1030. props->active_mtu = IB_MTU_2048;
  1031. else if (netdev->mtu >= 1024)
  1032. props->active_mtu = IB_MTU_1024;
  1033. else if (netdev->mtu >= 512)
  1034. props->active_mtu = IB_MTU_512;
  1035. else
  1036. props->active_mtu = IB_MTU_256;
  1037. if (!netif_carrier_ok(netdev))
  1038. props->state = IB_PORT_DOWN;
  1039. else {
  1040. inetdev = in_dev_get(netdev);
  1041. if (inetdev) {
  1042. if (inetdev->ifa_list)
  1043. props->state = IB_PORT_ACTIVE;
  1044. else
  1045. props->state = IB_PORT_INIT;
  1046. in_dev_put(inetdev);
  1047. } else
  1048. props->state = IB_PORT_INIT;
  1049. }
  1050. props->port_cap_flags =
  1051. IB_PORT_CM_SUP |
  1052. IB_PORT_SNMP_TUNNEL_SUP |
  1053. IB_PORT_REINIT_SUP |
  1054. IB_PORT_DEVICE_MGMT_SUP |
  1055. IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
  1056. props->gid_tbl_len = 1;
  1057. props->pkey_tbl_len = 1;
  1058. props->active_width = 2;
  1059. props->active_speed = IB_SPEED_DDR;
  1060. props->max_msg_sz = -1;
  1061. return 0;
  1062. }
  1063. static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
  1064. char *buf)
  1065. {
  1066. struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
  1067. ibdev.dev);
  1068. PDBG("%s dev 0x%p\n", __func__, dev);
  1069. return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
  1070. }
  1071. static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
  1072. {
  1073. struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
  1074. ibdev.dev);
  1075. struct ethtool_drvinfo info;
  1076. struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
  1077. PDBG("%s dev 0x%p\n", __func__, dev);
  1078. lldev->ethtool_ops->get_drvinfo(lldev, &info);
  1079. return sprintf(buf, "%s\n", info.fw_version);
  1080. }
  1081. static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
  1082. char *buf)
  1083. {
  1084. struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
  1085. ibdev.dev);
  1086. struct ethtool_drvinfo info;
  1087. struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
  1088. PDBG("%s dev 0x%p\n", __func__, dev);
  1089. lldev->ethtool_ops->get_drvinfo(lldev, &info);
  1090. return sprintf(buf, "%s\n", info.driver);
  1091. }
  1092. static ssize_t show_board(struct device *dev, struct device_attribute *attr,
  1093. char *buf)
  1094. {
  1095. struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
  1096. ibdev.dev);
  1097. PDBG("%s dev 0x%p\n", __func__, dev);
  1098. return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
  1099. iwch_dev->rdev.rnic_info.pdev->device);
  1100. }
  1101. static int iwch_get_mib(struct ib_device *ibdev,
  1102. union rdma_protocol_stats *stats)
  1103. {
  1104. struct iwch_dev *dev;
  1105. struct tp_mib_stats m;
  1106. int ret;
  1107. PDBG("%s ibdev %p\n", __func__, ibdev);
  1108. dev = to_iwch_dev(ibdev);
  1109. ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
  1110. if (ret)
  1111. return -ENOSYS;
  1112. memset(stats, 0, sizeof *stats);
  1113. stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
  1114. m.ipInReceive_lo;
  1115. stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
  1116. m.ipInHdrErrors_lo;
  1117. stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
  1118. m.ipInAddrErrors_lo;
  1119. stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
  1120. m.ipInUnknownProtos_lo;
  1121. stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
  1122. m.ipInDiscards_lo;
  1123. stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
  1124. m.ipInDelivers_lo;
  1125. stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
  1126. m.ipOutRequests_lo;
  1127. stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
  1128. m.ipOutDiscards_lo;
  1129. stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
  1130. m.ipOutNoRoutes_lo;
  1131. stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
  1132. stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
  1133. stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
  1134. stats->iw.ipReasmFails = (u64) m.ipReasmFails;
  1135. stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
  1136. stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
  1137. stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
  1138. stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
  1139. stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
  1140. stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
  1141. stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
  1142. m.tcpInSegs_lo;
  1143. stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
  1144. m.tcpOutSegs_lo;
  1145. stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
  1146. m.tcpRetransSeg_lo;
  1147. stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
  1148. m.tcpInErrs_lo;
  1149. stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
  1150. stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
  1151. return 0;
  1152. }
  1153. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  1154. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  1155. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  1156. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  1157. static struct device_attribute *iwch_class_attributes[] = {
  1158. &dev_attr_hw_rev,
  1159. &dev_attr_fw_ver,
  1160. &dev_attr_hca_type,
  1161. &dev_attr_board_id,
  1162. };
  1163. int iwch_register_device(struct iwch_dev *dev)
  1164. {
  1165. int ret;
  1166. int i;
  1167. PDBG("%s iwch_dev %p\n", __func__, dev);
  1168. strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
  1169. memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
  1170. memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
  1171. dev->ibdev.owner = THIS_MODULE;
  1172. dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
  1173. IB_DEVICE_MEM_WINDOW |
  1174. IB_DEVICE_MEM_MGT_EXTENSIONS;
  1175. /* cxgb3 supports STag 0. */
  1176. dev->ibdev.local_dma_lkey = 0;
  1177. dev->ibdev.uverbs_cmd_mask =
  1178. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1179. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1180. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1181. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1182. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1183. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1184. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1185. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1186. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1187. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1188. (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
  1189. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1190. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1191. (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
  1192. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1193. (1ull << IB_USER_VERBS_CMD_POST_SEND) |
  1194. (1ull << IB_USER_VERBS_CMD_POST_RECV);
  1195. dev->ibdev.node_type = RDMA_NODE_RNIC;
  1196. memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
  1197. dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
  1198. dev->ibdev.num_comp_vectors = 1;
  1199. dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
  1200. dev->ibdev.query_device = iwch_query_device;
  1201. dev->ibdev.query_port = iwch_query_port;
  1202. dev->ibdev.query_pkey = iwch_query_pkey;
  1203. dev->ibdev.query_gid = iwch_query_gid;
  1204. dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
  1205. dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
  1206. dev->ibdev.mmap = iwch_mmap;
  1207. dev->ibdev.alloc_pd = iwch_allocate_pd;
  1208. dev->ibdev.dealloc_pd = iwch_deallocate_pd;
  1209. dev->ibdev.create_ah = iwch_ah_create;
  1210. dev->ibdev.destroy_ah = iwch_ah_destroy;
  1211. dev->ibdev.create_qp = iwch_create_qp;
  1212. dev->ibdev.modify_qp = iwch_ib_modify_qp;
  1213. dev->ibdev.destroy_qp = iwch_destroy_qp;
  1214. dev->ibdev.create_cq = iwch_create_cq;
  1215. dev->ibdev.destroy_cq = iwch_destroy_cq;
  1216. dev->ibdev.resize_cq = iwch_resize_cq;
  1217. dev->ibdev.poll_cq = iwch_poll_cq;
  1218. dev->ibdev.get_dma_mr = iwch_get_dma_mr;
  1219. dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
  1220. dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
  1221. dev->ibdev.reg_user_mr = iwch_reg_user_mr;
  1222. dev->ibdev.dereg_mr = iwch_dereg_mr;
  1223. dev->ibdev.alloc_mw = iwch_alloc_mw;
  1224. dev->ibdev.bind_mw = iwch_bind_mw;
  1225. dev->ibdev.dealloc_mw = iwch_dealloc_mw;
  1226. dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
  1227. dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
  1228. dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
  1229. dev->ibdev.attach_mcast = iwch_multicast_attach;
  1230. dev->ibdev.detach_mcast = iwch_multicast_detach;
  1231. dev->ibdev.process_mad = iwch_process_mad;
  1232. dev->ibdev.req_notify_cq = iwch_arm_cq;
  1233. dev->ibdev.post_send = iwch_post_send;
  1234. dev->ibdev.post_recv = iwch_post_receive;
  1235. dev->ibdev.get_protocol_stats = iwch_get_mib;
  1236. dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
  1237. dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
  1238. if (!dev->ibdev.iwcm)
  1239. return -ENOMEM;
  1240. dev->ibdev.iwcm->connect = iwch_connect;
  1241. dev->ibdev.iwcm->accept = iwch_accept_cr;
  1242. dev->ibdev.iwcm->reject = iwch_reject_cr;
  1243. dev->ibdev.iwcm->create_listen = iwch_create_listen;
  1244. dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
  1245. dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
  1246. dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
  1247. dev->ibdev.iwcm->get_qp = iwch_get_qp;
  1248. ret = ib_register_device(&dev->ibdev, NULL);
  1249. if (ret)
  1250. goto bail1;
  1251. for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
  1252. ret = device_create_file(&dev->ibdev.dev,
  1253. iwch_class_attributes[i]);
  1254. if (ret) {
  1255. goto bail2;
  1256. }
  1257. }
  1258. return 0;
  1259. bail2:
  1260. ib_unregister_device(&dev->ibdev);
  1261. bail1:
  1262. kfree(dev->ibdev.iwcm);
  1263. return ret;
  1264. }
  1265. void iwch_unregister_device(struct iwch_dev *dev)
  1266. {
  1267. int i;
  1268. PDBG("%s iwch_dev %p\n", __func__, dev);
  1269. for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
  1270. device_remove_file(&dev->ibdev.dev,
  1271. iwch_class_attributes[i]);
  1272. ib_unregister_device(&dev->ibdev);
  1273. kfree(dev->ibdev.iwcm);
  1274. return;
  1275. }