mthca_provider.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  5. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  6. * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <rdma/ib_smi.h>
  37. #include <rdma/ib_umem.h>
  38. #include <rdma/ib_user_verbs.h>
  39. #include <linux/sched.h>
  40. #include <linux/mm.h>
  41. #include "mthca_dev.h"
  42. #include "mthca_cmd.h"
  43. #include "mthca_user.h"
  44. #include "mthca_memfree.h"
  45. static void init_query_mad(struct ib_smp *mad)
  46. {
  47. mad->base_version = 1;
  48. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  49. mad->class_version = 1;
  50. mad->method = IB_MGMT_METHOD_GET;
  51. }
  52. static int mthca_query_device(struct ib_device *ibdev,
  53. struct ib_device_attr *props)
  54. {
  55. struct ib_smp *in_mad = NULL;
  56. struct ib_smp *out_mad = NULL;
  57. int err = -ENOMEM;
  58. struct mthca_dev *mdev = to_mdev(ibdev);
  59. u8 status;
  60. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  61. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  62. if (!in_mad || !out_mad)
  63. goto out;
  64. memset(props, 0, sizeof *props);
  65. props->fw_ver = mdev->fw_ver;
  66. init_query_mad(in_mad);
  67. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  68. err = mthca_MAD_IFC(mdev, 1, 1,
  69. 1, NULL, NULL, in_mad, out_mad,
  70. &status);
  71. if (err)
  72. goto out;
  73. if (status) {
  74. err = -EINVAL;
  75. goto out;
  76. }
  77. props->device_cap_flags = mdev->device_cap_flags;
  78. props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  79. 0xffffff;
  80. props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
  81. props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
  82. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  83. props->max_mr_size = ~0ull;
  84. props->page_size_cap = mdev->limits.page_size_cap;
  85. props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
  86. props->max_qp_wr = mdev->limits.max_wqes;
  87. props->max_sge = mdev->limits.max_sg;
  88. props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
  89. props->max_cqe = mdev->limits.max_cqes;
  90. props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
  91. props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
  92. props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
  93. props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
  94. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  95. props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
  96. props->max_srq_wr = mdev->limits.max_srq_wqes;
  97. props->max_srq_sge = mdev->limits.max_srq_sge;
  98. props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
  99. props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
  100. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  101. props->max_pkeys = mdev->limits.pkey_table_len;
  102. props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
  103. props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
  104. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  105. props->max_mcast_grp;
  106. /*
  107. * If Sinai memory key optimization is being used, then only
  108. * the 8-bit key portion will change. For other HCAs, the
  109. * unused index bits will also be used for FMR remapping.
  110. */
  111. if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
  112. props->max_map_per_fmr = 255;
  113. else
  114. props->max_map_per_fmr =
  115. (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
  116. err = 0;
  117. out:
  118. kfree(in_mad);
  119. kfree(out_mad);
  120. return err;
  121. }
  122. static int mthca_query_port(struct ib_device *ibdev,
  123. u8 port, struct ib_port_attr *props)
  124. {
  125. struct ib_smp *in_mad = NULL;
  126. struct ib_smp *out_mad = NULL;
  127. int err = -ENOMEM;
  128. u8 status;
  129. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  130. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  131. if (!in_mad || !out_mad)
  132. goto out;
  133. memset(props, 0, sizeof *props);
  134. init_query_mad(in_mad);
  135. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  136. in_mad->attr_mod = cpu_to_be32(port);
  137. err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
  138. port, NULL, NULL, in_mad, out_mad,
  139. &status);
  140. if (err)
  141. goto out;
  142. if (status) {
  143. err = -EINVAL;
  144. goto out;
  145. }
  146. props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
  147. props->lmc = out_mad->data[34] & 0x7;
  148. props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
  149. props->sm_sl = out_mad->data[36] & 0xf;
  150. props->state = out_mad->data[32] & 0xf;
  151. props->phys_state = out_mad->data[33] >> 4;
  152. props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
  153. props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
  154. props->max_msg_sz = 0x80000000;
  155. props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
  156. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  157. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  158. props->active_width = out_mad->data[31] & 0xf;
  159. props->active_speed = out_mad->data[35] >> 4;
  160. props->max_mtu = out_mad->data[41] & 0xf;
  161. props->active_mtu = out_mad->data[36] >> 4;
  162. props->subnet_timeout = out_mad->data[51] & 0x1f;
  163. props->max_vl_num = out_mad->data[37] >> 4;
  164. props->init_type_reply = out_mad->data[41] >> 4;
  165. out:
  166. kfree(in_mad);
  167. kfree(out_mad);
  168. return err;
  169. }
  170. static int mthca_modify_device(struct ib_device *ibdev,
  171. int mask,
  172. struct ib_device_modify *props)
  173. {
  174. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  175. return -EOPNOTSUPP;
  176. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  177. if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
  178. return -ERESTARTSYS;
  179. memcpy(ibdev->node_desc, props->node_desc, 64);
  180. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  181. }
  182. return 0;
  183. }
  184. static int mthca_modify_port(struct ib_device *ibdev,
  185. u8 port, int port_modify_mask,
  186. struct ib_port_modify *props)
  187. {
  188. struct mthca_set_ib_param set_ib;
  189. struct ib_port_attr attr;
  190. int err;
  191. u8 status;
  192. if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
  193. return -ERESTARTSYS;
  194. err = mthca_query_port(ibdev, port, &attr);
  195. if (err)
  196. goto out;
  197. set_ib.set_si_guid = 0;
  198. set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
  199. set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
  200. ~props->clr_port_cap_mask;
  201. err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
  202. if (err)
  203. goto out;
  204. if (status) {
  205. err = -EINVAL;
  206. goto out;
  207. }
  208. out:
  209. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  210. return err;
  211. }
  212. static int mthca_query_pkey(struct ib_device *ibdev,
  213. u8 port, u16 index, u16 *pkey)
  214. {
  215. struct ib_smp *in_mad = NULL;
  216. struct ib_smp *out_mad = NULL;
  217. int err = -ENOMEM;
  218. u8 status;
  219. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  220. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  221. if (!in_mad || !out_mad)
  222. goto out;
  223. init_query_mad(in_mad);
  224. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  225. in_mad->attr_mod = cpu_to_be32(index / 32);
  226. err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
  227. port, NULL, NULL, in_mad, out_mad,
  228. &status);
  229. if (err)
  230. goto out;
  231. if (status) {
  232. err = -EINVAL;
  233. goto out;
  234. }
  235. *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
  236. out:
  237. kfree(in_mad);
  238. kfree(out_mad);
  239. return err;
  240. }
  241. static int mthca_query_gid(struct ib_device *ibdev, u8 port,
  242. int index, union ib_gid *gid)
  243. {
  244. struct ib_smp *in_mad = NULL;
  245. struct ib_smp *out_mad = NULL;
  246. int err = -ENOMEM;
  247. u8 status;
  248. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  249. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  250. if (!in_mad || !out_mad)
  251. goto out;
  252. init_query_mad(in_mad);
  253. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  254. in_mad->attr_mod = cpu_to_be32(port);
  255. err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
  256. port, NULL, NULL, in_mad, out_mad,
  257. &status);
  258. if (err)
  259. goto out;
  260. if (status) {
  261. err = -EINVAL;
  262. goto out;
  263. }
  264. memcpy(gid->raw, out_mad->data + 8, 8);
  265. init_query_mad(in_mad);
  266. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  267. in_mad->attr_mod = cpu_to_be32(index / 8);
  268. err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
  269. port, NULL, NULL, in_mad, out_mad,
  270. &status);
  271. if (err)
  272. goto out;
  273. if (status) {
  274. err = -EINVAL;
  275. goto out;
  276. }
  277. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  278. out:
  279. kfree(in_mad);
  280. kfree(out_mad);
  281. return err;
  282. }
  283. static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
  284. struct ib_udata *udata)
  285. {
  286. struct mthca_alloc_ucontext_resp uresp;
  287. struct mthca_ucontext *context;
  288. int err;
  289. if (!(to_mdev(ibdev)->active))
  290. return ERR_PTR(-EAGAIN);
  291. memset(&uresp, 0, sizeof uresp);
  292. uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
  293. if (mthca_is_memfree(to_mdev(ibdev)))
  294. uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
  295. else
  296. uresp.uarc_size = 0;
  297. context = kmalloc(sizeof *context, GFP_KERNEL);
  298. if (!context)
  299. return ERR_PTR(-ENOMEM);
  300. err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
  301. if (err) {
  302. kfree(context);
  303. return ERR_PTR(err);
  304. }
  305. context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
  306. if (IS_ERR(context->db_tab)) {
  307. err = PTR_ERR(context->db_tab);
  308. mthca_uar_free(to_mdev(ibdev), &context->uar);
  309. kfree(context);
  310. return ERR_PTR(err);
  311. }
  312. if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
  313. mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
  314. mthca_uar_free(to_mdev(ibdev), &context->uar);
  315. kfree(context);
  316. return ERR_PTR(-EFAULT);
  317. }
  318. context->reg_mr_warned = 0;
  319. return &context->ibucontext;
  320. }
  321. static int mthca_dealloc_ucontext(struct ib_ucontext *context)
  322. {
  323. mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
  324. to_mucontext(context)->db_tab);
  325. mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
  326. kfree(to_mucontext(context));
  327. return 0;
  328. }
  329. static int mthca_mmap_uar(struct ib_ucontext *context,
  330. struct vm_area_struct *vma)
  331. {
  332. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  333. return -EINVAL;
  334. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  335. if (io_remap_pfn_range(vma, vma->vm_start,
  336. to_mucontext(context)->uar.pfn,
  337. PAGE_SIZE, vma->vm_page_prot))
  338. return -EAGAIN;
  339. return 0;
  340. }
  341. static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
  342. struct ib_ucontext *context,
  343. struct ib_udata *udata)
  344. {
  345. struct mthca_pd *pd;
  346. int err;
  347. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  348. if (!pd)
  349. return ERR_PTR(-ENOMEM);
  350. err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
  351. if (err) {
  352. kfree(pd);
  353. return ERR_PTR(err);
  354. }
  355. if (context) {
  356. if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
  357. mthca_pd_free(to_mdev(ibdev), pd);
  358. kfree(pd);
  359. return ERR_PTR(-EFAULT);
  360. }
  361. }
  362. return &pd->ibpd;
  363. }
  364. static int mthca_dealloc_pd(struct ib_pd *pd)
  365. {
  366. mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
  367. kfree(pd);
  368. return 0;
  369. }
  370. static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
  371. struct ib_ah_attr *ah_attr)
  372. {
  373. int err;
  374. struct mthca_ah *ah;
  375. ah = kmalloc(sizeof *ah, GFP_ATOMIC);
  376. if (!ah)
  377. return ERR_PTR(-ENOMEM);
  378. err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
  379. if (err) {
  380. kfree(ah);
  381. return ERR_PTR(err);
  382. }
  383. return &ah->ibah;
  384. }
  385. static int mthca_ah_destroy(struct ib_ah *ah)
  386. {
  387. mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
  388. kfree(ah);
  389. return 0;
  390. }
  391. static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
  392. struct ib_srq_init_attr *init_attr,
  393. struct ib_udata *udata)
  394. {
  395. struct mthca_create_srq ucmd;
  396. struct mthca_ucontext *context = NULL;
  397. struct mthca_srq *srq;
  398. int err;
  399. srq = kmalloc(sizeof *srq, GFP_KERNEL);
  400. if (!srq)
  401. return ERR_PTR(-ENOMEM);
  402. if (pd->uobject) {
  403. context = to_mucontext(pd->uobject->context);
  404. if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
  405. err = -EFAULT;
  406. goto err_free;
  407. }
  408. err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
  409. context->db_tab, ucmd.db_index,
  410. ucmd.db_page);
  411. if (err)
  412. goto err_free;
  413. srq->mr.ibmr.lkey = ucmd.lkey;
  414. srq->db_index = ucmd.db_index;
  415. }
  416. err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
  417. &init_attr->attr, srq);
  418. if (err && pd->uobject)
  419. mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
  420. context->db_tab, ucmd.db_index);
  421. if (err)
  422. goto err_free;
  423. if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
  424. mthca_free_srq(to_mdev(pd->device), srq);
  425. err = -EFAULT;
  426. goto err_free;
  427. }
  428. return &srq->ibsrq;
  429. err_free:
  430. kfree(srq);
  431. return ERR_PTR(err);
  432. }
  433. static int mthca_destroy_srq(struct ib_srq *srq)
  434. {
  435. struct mthca_ucontext *context;
  436. if (srq->uobject) {
  437. context = to_mucontext(srq->uobject->context);
  438. mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
  439. context->db_tab, to_msrq(srq)->db_index);
  440. }
  441. mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
  442. kfree(srq);
  443. return 0;
  444. }
  445. static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
  446. struct ib_qp_init_attr *init_attr,
  447. struct ib_udata *udata)
  448. {
  449. struct mthca_create_qp ucmd;
  450. struct mthca_qp *qp;
  451. int err;
  452. if (init_attr->create_flags)
  453. return ERR_PTR(-EINVAL);
  454. switch (init_attr->qp_type) {
  455. case IB_QPT_RC:
  456. case IB_QPT_UC:
  457. case IB_QPT_UD:
  458. {
  459. struct mthca_ucontext *context;
  460. qp = kmalloc(sizeof *qp, GFP_KERNEL);
  461. if (!qp)
  462. return ERR_PTR(-ENOMEM);
  463. if (pd->uobject) {
  464. context = to_mucontext(pd->uobject->context);
  465. if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
  466. kfree(qp);
  467. return ERR_PTR(-EFAULT);
  468. }
  469. err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
  470. context->db_tab,
  471. ucmd.sq_db_index, ucmd.sq_db_page);
  472. if (err) {
  473. kfree(qp);
  474. return ERR_PTR(err);
  475. }
  476. err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
  477. context->db_tab,
  478. ucmd.rq_db_index, ucmd.rq_db_page);
  479. if (err) {
  480. mthca_unmap_user_db(to_mdev(pd->device),
  481. &context->uar,
  482. context->db_tab,
  483. ucmd.sq_db_index);
  484. kfree(qp);
  485. return ERR_PTR(err);
  486. }
  487. qp->mr.ibmr.lkey = ucmd.lkey;
  488. qp->sq.db_index = ucmd.sq_db_index;
  489. qp->rq.db_index = ucmd.rq_db_index;
  490. }
  491. err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
  492. to_mcq(init_attr->send_cq),
  493. to_mcq(init_attr->recv_cq),
  494. init_attr->qp_type, init_attr->sq_sig_type,
  495. &init_attr->cap, qp);
  496. if (err && pd->uobject) {
  497. context = to_mucontext(pd->uobject->context);
  498. mthca_unmap_user_db(to_mdev(pd->device),
  499. &context->uar,
  500. context->db_tab,
  501. ucmd.sq_db_index);
  502. mthca_unmap_user_db(to_mdev(pd->device),
  503. &context->uar,
  504. context->db_tab,
  505. ucmd.rq_db_index);
  506. }
  507. qp->ibqp.qp_num = qp->qpn;
  508. break;
  509. }
  510. case IB_QPT_SMI:
  511. case IB_QPT_GSI:
  512. {
  513. /* Don't allow userspace to create special QPs */
  514. if (pd->uobject)
  515. return ERR_PTR(-EINVAL);
  516. qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
  517. if (!qp)
  518. return ERR_PTR(-ENOMEM);
  519. qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
  520. err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
  521. to_mcq(init_attr->send_cq),
  522. to_mcq(init_attr->recv_cq),
  523. init_attr->sq_sig_type, &init_attr->cap,
  524. qp->ibqp.qp_num, init_attr->port_num,
  525. to_msqp(qp));
  526. break;
  527. }
  528. default:
  529. /* Don't support raw QPs */
  530. return ERR_PTR(-ENOSYS);
  531. }
  532. if (err) {
  533. kfree(qp);
  534. return ERR_PTR(err);
  535. }
  536. init_attr->cap.max_send_wr = qp->sq.max;
  537. init_attr->cap.max_recv_wr = qp->rq.max;
  538. init_attr->cap.max_send_sge = qp->sq.max_gs;
  539. init_attr->cap.max_recv_sge = qp->rq.max_gs;
  540. init_attr->cap.max_inline_data = qp->max_inline_data;
  541. return &qp->ibqp;
  542. }
  543. static int mthca_destroy_qp(struct ib_qp *qp)
  544. {
  545. if (qp->uobject) {
  546. mthca_unmap_user_db(to_mdev(qp->device),
  547. &to_mucontext(qp->uobject->context)->uar,
  548. to_mucontext(qp->uobject->context)->db_tab,
  549. to_mqp(qp)->sq.db_index);
  550. mthca_unmap_user_db(to_mdev(qp->device),
  551. &to_mucontext(qp->uobject->context)->uar,
  552. to_mucontext(qp->uobject->context)->db_tab,
  553. to_mqp(qp)->rq.db_index);
  554. }
  555. mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
  556. kfree(qp);
  557. return 0;
  558. }
  559. static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
  560. int comp_vector,
  561. struct ib_ucontext *context,
  562. struct ib_udata *udata)
  563. {
  564. struct mthca_create_cq ucmd;
  565. struct mthca_cq *cq;
  566. int nent;
  567. int err;
  568. if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
  569. return ERR_PTR(-EINVAL);
  570. if (context) {
  571. if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
  572. return ERR_PTR(-EFAULT);
  573. err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
  574. to_mucontext(context)->db_tab,
  575. ucmd.set_db_index, ucmd.set_db_page);
  576. if (err)
  577. return ERR_PTR(err);
  578. err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
  579. to_mucontext(context)->db_tab,
  580. ucmd.arm_db_index, ucmd.arm_db_page);
  581. if (err)
  582. goto err_unmap_set;
  583. }
  584. cq = kmalloc(sizeof *cq, GFP_KERNEL);
  585. if (!cq) {
  586. err = -ENOMEM;
  587. goto err_unmap_arm;
  588. }
  589. if (context) {
  590. cq->buf.mr.ibmr.lkey = ucmd.lkey;
  591. cq->set_ci_db_index = ucmd.set_db_index;
  592. cq->arm_db_index = ucmd.arm_db_index;
  593. }
  594. for (nent = 1; nent <= entries; nent <<= 1)
  595. ; /* nothing */
  596. err = mthca_init_cq(to_mdev(ibdev), nent,
  597. context ? to_mucontext(context) : NULL,
  598. context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
  599. cq);
  600. if (err)
  601. goto err_free;
  602. if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
  603. mthca_free_cq(to_mdev(ibdev), cq);
  604. goto err_free;
  605. }
  606. cq->resize_buf = NULL;
  607. return &cq->ibcq;
  608. err_free:
  609. kfree(cq);
  610. err_unmap_arm:
  611. if (context)
  612. mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
  613. to_mucontext(context)->db_tab, ucmd.arm_db_index);
  614. err_unmap_set:
  615. if (context)
  616. mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
  617. to_mucontext(context)->db_tab, ucmd.set_db_index);
  618. return ERR_PTR(err);
  619. }
  620. static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
  621. int entries)
  622. {
  623. int ret;
  624. spin_lock_irq(&cq->lock);
  625. if (cq->resize_buf) {
  626. ret = -EBUSY;
  627. goto unlock;
  628. }
  629. cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
  630. if (!cq->resize_buf) {
  631. ret = -ENOMEM;
  632. goto unlock;
  633. }
  634. cq->resize_buf->state = CQ_RESIZE_ALLOC;
  635. ret = 0;
  636. unlock:
  637. spin_unlock_irq(&cq->lock);
  638. if (ret)
  639. return ret;
  640. ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
  641. if (ret) {
  642. spin_lock_irq(&cq->lock);
  643. kfree(cq->resize_buf);
  644. cq->resize_buf = NULL;
  645. spin_unlock_irq(&cq->lock);
  646. return ret;
  647. }
  648. cq->resize_buf->cqe = entries - 1;
  649. spin_lock_irq(&cq->lock);
  650. cq->resize_buf->state = CQ_RESIZE_READY;
  651. spin_unlock_irq(&cq->lock);
  652. return 0;
  653. }
  654. static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
  655. {
  656. struct mthca_dev *dev = to_mdev(ibcq->device);
  657. struct mthca_cq *cq = to_mcq(ibcq);
  658. struct mthca_resize_cq ucmd;
  659. u32 lkey;
  660. u8 status;
  661. int ret;
  662. if (entries < 1 || entries > dev->limits.max_cqes)
  663. return -EINVAL;
  664. mutex_lock(&cq->mutex);
  665. entries = roundup_pow_of_two(entries + 1);
  666. if (entries == ibcq->cqe + 1) {
  667. ret = 0;
  668. goto out;
  669. }
  670. if (cq->is_kernel) {
  671. ret = mthca_alloc_resize_buf(dev, cq, entries);
  672. if (ret)
  673. goto out;
  674. lkey = cq->resize_buf->buf.mr.ibmr.lkey;
  675. } else {
  676. if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
  677. ret = -EFAULT;
  678. goto out;
  679. }
  680. lkey = ucmd.lkey;
  681. }
  682. ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
  683. if (status)
  684. ret = -EINVAL;
  685. if (ret) {
  686. if (cq->resize_buf) {
  687. mthca_free_cq_buf(dev, &cq->resize_buf->buf,
  688. cq->resize_buf->cqe);
  689. kfree(cq->resize_buf);
  690. spin_lock_irq(&cq->lock);
  691. cq->resize_buf = NULL;
  692. spin_unlock_irq(&cq->lock);
  693. }
  694. goto out;
  695. }
  696. if (cq->is_kernel) {
  697. struct mthca_cq_buf tbuf;
  698. int tcqe;
  699. spin_lock_irq(&cq->lock);
  700. if (cq->resize_buf->state == CQ_RESIZE_READY) {
  701. mthca_cq_resize_copy_cqes(cq);
  702. tbuf = cq->buf;
  703. tcqe = cq->ibcq.cqe;
  704. cq->buf = cq->resize_buf->buf;
  705. cq->ibcq.cqe = cq->resize_buf->cqe;
  706. } else {
  707. tbuf = cq->resize_buf->buf;
  708. tcqe = cq->resize_buf->cqe;
  709. }
  710. kfree(cq->resize_buf);
  711. cq->resize_buf = NULL;
  712. spin_unlock_irq(&cq->lock);
  713. mthca_free_cq_buf(dev, &tbuf, tcqe);
  714. } else
  715. ibcq->cqe = entries - 1;
  716. out:
  717. mutex_unlock(&cq->mutex);
  718. return ret;
  719. }
  720. static int mthca_destroy_cq(struct ib_cq *cq)
  721. {
  722. if (cq->uobject) {
  723. mthca_unmap_user_db(to_mdev(cq->device),
  724. &to_mucontext(cq->uobject->context)->uar,
  725. to_mucontext(cq->uobject->context)->db_tab,
  726. to_mcq(cq)->arm_db_index);
  727. mthca_unmap_user_db(to_mdev(cq->device),
  728. &to_mucontext(cq->uobject->context)->uar,
  729. to_mucontext(cq->uobject->context)->db_tab,
  730. to_mcq(cq)->set_ci_db_index);
  731. }
  732. mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
  733. kfree(cq);
  734. return 0;
  735. }
  736. static inline u32 convert_access(int acc)
  737. {
  738. return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
  739. (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
  740. (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
  741. (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
  742. MTHCA_MPT_FLAG_LOCAL_READ;
  743. }
  744. static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
  745. {
  746. struct mthca_mr *mr;
  747. int err;
  748. mr = kmalloc(sizeof *mr, GFP_KERNEL);
  749. if (!mr)
  750. return ERR_PTR(-ENOMEM);
  751. err = mthca_mr_alloc_notrans(to_mdev(pd->device),
  752. to_mpd(pd)->pd_num,
  753. convert_access(acc), mr);
  754. if (err) {
  755. kfree(mr);
  756. return ERR_PTR(err);
  757. }
  758. mr->umem = NULL;
  759. return &mr->ibmr;
  760. }
  761. static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
  762. struct ib_phys_buf *buffer_list,
  763. int num_phys_buf,
  764. int acc,
  765. u64 *iova_start)
  766. {
  767. struct mthca_mr *mr;
  768. u64 *page_list;
  769. u64 total_size;
  770. unsigned long mask;
  771. int shift;
  772. int npages;
  773. int err;
  774. int i, j, n;
  775. mask = buffer_list[0].addr ^ *iova_start;
  776. total_size = 0;
  777. for (i = 0; i < num_phys_buf; ++i) {
  778. if (i != 0)
  779. mask |= buffer_list[i].addr;
  780. if (i != num_phys_buf - 1)
  781. mask |= buffer_list[i].addr + buffer_list[i].size;
  782. total_size += buffer_list[i].size;
  783. }
  784. if (mask & ~PAGE_MASK)
  785. return ERR_PTR(-EINVAL);
  786. shift = __ffs(mask | 1 << 31);
  787. buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
  788. buffer_list[0].addr &= ~0ull << shift;
  789. mr = kmalloc(sizeof *mr, GFP_KERNEL);
  790. if (!mr)
  791. return ERR_PTR(-ENOMEM);
  792. npages = 0;
  793. for (i = 0; i < num_phys_buf; ++i)
  794. npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
  795. if (!npages)
  796. return &mr->ibmr;
  797. page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
  798. if (!page_list) {
  799. kfree(mr);
  800. return ERR_PTR(-ENOMEM);
  801. }
  802. n = 0;
  803. for (i = 0; i < num_phys_buf; ++i)
  804. for (j = 0;
  805. j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
  806. ++j)
  807. page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
  808. mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
  809. "in PD %x; shift %d, npages %d.\n",
  810. (unsigned long long) buffer_list[0].addr,
  811. (unsigned long long) *iova_start,
  812. to_mpd(pd)->pd_num,
  813. shift, npages);
  814. err = mthca_mr_alloc_phys(to_mdev(pd->device),
  815. to_mpd(pd)->pd_num,
  816. page_list, shift, npages,
  817. *iova_start, total_size,
  818. convert_access(acc), mr);
  819. if (err) {
  820. kfree(page_list);
  821. kfree(mr);
  822. return ERR_PTR(err);
  823. }
  824. kfree(page_list);
  825. mr->umem = NULL;
  826. return &mr->ibmr;
  827. }
  828. static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  829. u64 virt, int acc, struct ib_udata *udata)
  830. {
  831. struct mthca_dev *dev = to_mdev(pd->device);
  832. struct ib_umem_chunk *chunk;
  833. struct mthca_mr *mr;
  834. struct mthca_reg_mr ucmd;
  835. u64 *pages;
  836. int shift, n, len;
  837. int i, j, k;
  838. int err = 0;
  839. int write_mtt_size;
  840. if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
  841. if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
  842. mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
  843. current->comm);
  844. mthca_warn(dev, " Update libmthca to fix this.\n");
  845. }
  846. ++to_mucontext(pd->uobject->context)->reg_mr_warned;
  847. ucmd.mr_attrs = 0;
  848. } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
  849. return ERR_PTR(-EFAULT);
  850. mr = kmalloc(sizeof *mr, GFP_KERNEL);
  851. if (!mr)
  852. return ERR_PTR(-ENOMEM);
  853. mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
  854. ucmd.mr_attrs & MTHCA_MR_DMASYNC);
  855. if (IS_ERR(mr->umem)) {
  856. err = PTR_ERR(mr->umem);
  857. goto err;
  858. }
  859. shift = ffs(mr->umem->page_size) - 1;
  860. n = 0;
  861. list_for_each_entry(chunk, &mr->umem->chunk_list, list)
  862. n += chunk->nents;
  863. mr->mtt = mthca_alloc_mtt(dev, n);
  864. if (IS_ERR(mr->mtt)) {
  865. err = PTR_ERR(mr->mtt);
  866. goto err_umem;
  867. }
  868. pages = (u64 *) __get_free_page(GFP_KERNEL);
  869. if (!pages) {
  870. err = -ENOMEM;
  871. goto err_mtt;
  872. }
  873. i = n = 0;
  874. write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
  875. list_for_each_entry(chunk, &mr->umem->chunk_list, list)
  876. for (j = 0; j < chunk->nmap; ++j) {
  877. len = sg_dma_len(&chunk->page_list[j]) >> shift;
  878. for (k = 0; k < len; ++k) {
  879. pages[i++] = sg_dma_address(&chunk->page_list[j]) +
  880. mr->umem->page_size * k;
  881. /*
  882. * Be friendly to write_mtt and pass it chunks
  883. * of appropriate size.
  884. */
  885. if (i == write_mtt_size) {
  886. err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
  887. if (err)
  888. goto mtt_done;
  889. n += i;
  890. i = 0;
  891. }
  892. }
  893. }
  894. if (i)
  895. err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
  896. mtt_done:
  897. free_page((unsigned long) pages);
  898. if (err)
  899. goto err_mtt;
  900. err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
  901. convert_access(acc), mr);
  902. if (err)
  903. goto err_mtt;
  904. return &mr->ibmr;
  905. err_mtt:
  906. mthca_free_mtt(dev, mr->mtt);
  907. err_umem:
  908. ib_umem_release(mr->umem);
  909. err:
  910. kfree(mr);
  911. return ERR_PTR(err);
  912. }
  913. static int mthca_dereg_mr(struct ib_mr *mr)
  914. {
  915. struct mthca_mr *mmr = to_mmr(mr);
  916. mthca_free_mr(to_mdev(mr->device), mmr);
  917. if (mmr->umem)
  918. ib_umem_release(mmr->umem);
  919. kfree(mmr);
  920. return 0;
  921. }
  922. static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
  923. struct ib_fmr_attr *fmr_attr)
  924. {
  925. struct mthca_fmr *fmr;
  926. int err;
  927. fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
  928. if (!fmr)
  929. return ERR_PTR(-ENOMEM);
  930. memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
  931. err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
  932. convert_access(mr_access_flags), fmr);
  933. if (err) {
  934. kfree(fmr);
  935. return ERR_PTR(err);
  936. }
  937. return &fmr->ibmr;
  938. }
  939. static int mthca_dealloc_fmr(struct ib_fmr *fmr)
  940. {
  941. struct mthca_fmr *mfmr = to_mfmr(fmr);
  942. int err;
  943. err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
  944. if (err)
  945. return err;
  946. kfree(mfmr);
  947. return 0;
  948. }
  949. static int mthca_unmap_fmr(struct list_head *fmr_list)
  950. {
  951. struct ib_fmr *fmr;
  952. int err;
  953. u8 status;
  954. struct mthca_dev *mdev = NULL;
  955. list_for_each_entry(fmr, fmr_list, list) {
  956. if (mdev && to_mdev(fmr->device) != mdev)
  957. return -EINVAL;
  958. mdev = to_mdev(fmr->device);
  959. }
  960. if (!mdev)
  961. return 0;
  962. if (mthca_is_memfree(mdev)) {
  963. list_for_each_entry(fmr, fmr_list, list)
  964. mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
  965. wmb();
  966. } else
  967. list_for_each_entry(fmr, fmr_list, list)
  968. mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
  969. err = mthca_SYNC_TPT(mdev, &status);
  970. if (err)
  971. return err;
  972. if (status)
  973. return -EINVAL;
  974. return 0;
  975. }
  976. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  977. char *buf)
  978. {
  979. struct mthca_dev *dev =
  980. container_of(device, struct mthca_dev, ib_dev.dev);
  981. return sprintf(buf, "%x\n", dev->rev_id);
  982. }
  983. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  984. char *buf)
  985. {
  986. struct mthca_dev *dev =
  987. container_of(device, struct mthca_dev, ib_dev.dev);
  988. return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
  989. (int) (dev->fw_ver >> 16) & 0xffff,
  990. (int) dev->fw_ver & 0xffff);
  991. }
  992. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  993. char *buf)
  994. {
  995. struct mthca_dev *dev =
  996. container_of(device, struct mthca_dev, ib_dev.dev);
  997. switch (dev->pdev->device) {
  998. case PCI_DEVICE_ID_MELLANOX_TAVOR:
  999. return sprintf(buf, "MT23108\n");
  1000. case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
  1001. return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
  1002. case PCI_DEVICE_ID_MELLANOX_ARBEL:
  1003. return sprintf(buf, "MT25208\n");
  1004. case PCI_DEVICE_ID_MELLANOX_SINAI:
  1005. case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
  1006. return sprintf(buf, "MT25204\n");
  1007. default:
  1008. return sprintf(buf, "unknown\n");
  1009. }
  1010. }
  1011. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  1012. char *buf)
  1013. {
  1014. struct mthca_dev *dev =
  1015. container_of(device, struct mthca_dev, ib_dev.dev);
  1016. return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
  1017. }
  1018. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  1019. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  1020. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  1021. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  1022. static struct device_attribute *mthca_dev_attributes[] = {
  1023. &dev_attr_hw_rev,
  1024. &dev_attr_fw_ver,
  1025. &dev_attr_hca_type,
  1026. &dev_attr_board_id
  1027. };
  1028. static int mthca_init_node_data(struct mthca_dev *dev)
  1029. {
  1030. struct ib_smp *in_mad = NULL;
  1031. struct ib_smp *out_mad = NULL;
  1032. int err = -ENOMEM;
  1033. u8 status;
  1034. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  1035. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  1036. if (!in_mad || !out_mad)
  1037. goto out;
  1038. init_query_mad(in_mad);
  1039. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  1040. err = mthca_MAD_IFC(dev, 1, 1,
  1041. 1, NULL, NULL, in_mad, out_mad,
  1042. &status);
  1043. if (err)
  1044. goto out;
  1045. if (status) {
  1046. err = -EINVAL;
  1047. goto out;
  1048. }
  1049. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  1050. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  1051. err = mthca_MAD_IFC(dev, 1, 1,
  1052. 1, NULL, NULL, in_mad, out_mad,
  1053. &status);
  1054. if (err)
  1055. goto out;
  1056. if (status) {
  1057. err = -EINVAL;
  1058. goto out;
  1059. }
  1060. if (mthca_is_memfree(dev))
  1061. dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
  1062. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  1063. out:
  1064. kfree(in_mad);
  1065. kfree(out_mad);
  1066. return err;
  1067. }
  1068. int mthca_register_device(struct mthca_dev *dev)
  1069. {
  1070. int ret;
  1071. int i;
  1072. ret = mthca_init_node_data(dev);
  1073. if (ret)
  1074. return ret;
  1075. strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
  1076. dev->ib_dev.owner = THIS_MODULE;
  1077. dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
  1078. dev->ib_dev.uverbs_cmd_mask =
  1079. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1080. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1081. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1082. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1083. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1084. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1085. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1086. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1087. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1088. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1089. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1090. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1091. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1092. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1093. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1094. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1095. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
  1096. dev->ib_dev.node_type = RDMA_NODE_IB_CA;
  1097. dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
  1098. dev->ib_dev.num_comp_vectors = 1;
  1099. dev->ib_dev.dma_device = &dev->pdev->dev;
  1100. dev->ib_dev.query_device = mthca_query_device;
  1101. dev->ib_dev.query_port = mthca_query_port;
  1102. dev->ib_dev.modify_device = mthca_modify_device;
  1103. dev->ib_dev.modify_port = mthca_modify_port;
  1104. dev->ib_dev.query_pkey = mthca_query_pkey;
  1105. dev->ib_dev.query_gid = mthca_query_gid;
  1106. dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
  1107. dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
  1108. dev->ib_dev.mmap = mthca_mmap_uar;
  1109. dev->ib_dev.alloc_pd = mthca_alloc_pd;
  1110. dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
  1111. dev->ib_dev.create_ah = mthca_ah_create;
  1112. dev->ib_dev.query_ah = mthca_ah_query;
  1113. dev->ib_dev.destroy_ah = mthca_ah_destroy;
  1114. if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
  1115. dev->ib_dev.create_srq = mthca_create_srq;
  1116. dev->ib_dev.modify_srq = mthca_modify_srq;
  1117. dev->ib_dev.query_srq = mthca_query_srq;
  1118. dev->ib_dev.destroy_srq = mthca_destroy_srq;
  1119. dev->ib_dev.uverbs_cmd_mask |=
  1120. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1121. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1122. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1123. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
  1124. if (mthca_is_memfree(dev))
  1125. dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
  1126. else
  1127. dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
  1128. }
  1129. dev->ib_dev.create_qp = mthca_create_qp;
  1130. dev->ib_dev.modify_qp = mthca_modify_qp;
  1131. dev->ib_dev.query_qp = mthca_query_qp;
  1132. dev->ib_dev.destroy_qp = mthca_destroy_qp;
  1133. dev->ib_dev.create_cq = mthca_create_cq;
  1134. dev->ib_dev.resize_cq = mthca_resize_cq;
  1135. dev->ib_dev.destroy_cq = mthca_destroy_cq;
  1136. dev->ib_dev.poll_cq = mthca_poll_cq;
  1137. dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
  1138. dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
  1139. dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
  1140. dev->ib_dev.dereg_mr = mthca_dereg_mr;
  1141. if (dev->mthca_flags & MTHCA_FLAG_FMR) {
  1142. dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
  1143. dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
  1144. dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
  1145. if (mthca_is_memfree(dev))
  1146. dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
  1147. else
  1148. dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
  1149. }
  1150. dev->ib_dev.attach_mcast = mthca_multicast_attach;
  1151. dev->ib_dev.detach_mcast = mthca_multicast_detach;
  1152. dev->ib_dev.process_mad = mthca_process_mad;
  1153. if (mthca_is_memfree(dev)) {
  1154. dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
  1155. dev->ib_dev.post_send = mthca_arbel_post_send;
  1156. dev->ib_dev.post_recv = mthca_arbel_post_receive;
  1157. } else {
  1158. dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
  1159. dev->ib_dev.post_send = mthca_tavor_post_send;
  1160. dev->ib_dev.post_recv = mthca_tavor_post_receive;
  1161. }
  1162. mutex_init(&dev->cap_mask_mutex);
  1163. ret = ib_register_device(&dev->ib_dev);
  1164. if (ret)
  1165. return ret;
  1166. for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
  1167. ret = device_create_file(&dev->ib_dev.dev,
  1168. mthca_dev_attributes[i]);
  1169. if (ret) {
  1170. ib_unregister_device(&dev->ib_dev);
  1171. return ret;
  1172. }
  1173. }
  1174. mthca_start_catas_poll(dev);
  1175. return 0;
  1176. }
  1177. void mthca_unregister_device(struct mthca_dev *dev)
  1178. {
  1179. mthca_stop_catas_poll(dev);
  1180. ib_unregister_device(&dev->ib_dev);
  1181. }