qp.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524
  1. /*
  2. * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <rdma/ib_umem.h>
  34. #include "mlx5_ib.h"
  35. #include "user.h"
  36. /* not supported currently */
  37. static int wq_signature;
  38. enum {
  39. MLX5_IB_ACK_REQ_FREQ = 8,
  40. };
  41. enum {
  42. MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
  43. MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
  44. MLX5_IB_LINK_TYPE_IB = 0,
  45. MLX5_IB_LINK_TYPE_ETH = 1
  46. };
  47. enum {
  48. MLX5_IB_SQ_STRIDE = 6,
  49. MLX5_IB_CACHE_LINE_SIZE = 64,
  50. };
  51. static const u32 mlx5_ib_opcode[] = {
  52. [IB_WR_SEND] = MLX5_OPCODE_SEND,
  53. [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
  54. [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
  55. [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
  56. [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
  57. [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
  58. [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
  59. [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
  60. [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
  61. [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
  62. [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
  63. [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
  64. [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
  65. };
  66. struct umr_wr {
  67. u64 virt_addr;
  68. struct ib_pd *pd;
  69. unsigned int page_shift;
  70. unsigned int npages;
  71. u32 length;
  72. int access_flags;
  73. u32 mkey;
  74. };
  75. static int is_qp0(enum ib_qp_type qp_type)
  76. {
  77. return qp_type == IB_QPT_SMI;
  78. }
  79. static int is_qp1(enum ib_qp_type qp_type)
  80. {
  81. return qp_type == IB_QPT_GSI;
  82. }
  83. static int is_sqp(enum ib_qp_type qp_type)
  84. {
  85. return is_qp0(qp_type) || is_qp1(qp_type);
  86. }
  87. static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
  88. {
  89. return mlx5_buf_offset(&qp->buf, offset);
  90. }
  91. static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
  92. {
  93. return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
  94. }
  95. void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
  96. {
  97. return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
  98. }
  99. static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
  100. {
  101. struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
  102. struct ib_event event;
  103. if (type == MLX5_EVENT_TYPE_PATH_MIG)
  104. to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
  105. if (ibqp->event_handler) {
  106. event.device = ibqp->device;
  107. event.element.qp = ibqp;
  108. switch (type) {
  109. case MLX5_EVENT_TYPE_PATH_MIG:
  110. event.event = IB_EVENT_PATH_MIG;
  111. break;
  112. case MLX5_EVENT_TYPE_COMM_EST:
  113. event.event = IB_EVENT_COMM_EST;
  114. break;
  115. case MLX5_EVENT_TYPE_SQ_DRAINED:
  116. event.event = IB_EVENT_SQ_DRAINED;
  117. break;
  118. case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
  119. event.event = IB_EVENT_QP_LAST_WQE_REACHED;
  120. break;
  121. case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
  122. event.event = IB_EVENT_QP_FATAL;
  123. break;
  124. case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
  125. event.event = IB_EVENT_PATH_MIG_ERR;
  126. break;
  127. case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  128. event.event = IB_EVENT_QP_REQ_ERR;
  129. break;
  130. case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
  131. event.event = IB_EVENT_QP_ACCESS_ERR;
  132. break;
  133. default:
  134. pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
  135. return;
  136. }
  137. ibqp->event_handler(&event, ibqp->qp_context);
  138. }
  139. }
  140. static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
  141. int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
  142. {
  143. int wqe_size;
  144. int wq_size;
  145. /* Sanity check RQ size before proceeding */
  146. if (cap->max_recv_wr > dev->mdev.caps.max_wqes)
  147. return -EINVAL;
  148. if (!has_rq) {
  149. qp->rq.max_gs = 0;
  150. qp->rq.wqe_cnt = 0;
  151. qp->rq.wqe_shift = 0;
  152. } else {
  153. if (ucmd) {
  154. qp->rq.wqe_cnt = ucmd->rq_wqe_count;
  155. qp->rq.wqe_shift = ucmd->rq_wqe_shift;
  156. qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
  157. qp->rq.max_post = qp->rq.wqe_cnt;
  158. } else {
  159. wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
  160. wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
  161. wqe_size = roundup_pow_of_two(wqe_size);
  162. wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
  163. wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
  164. qp->rq.wqe_cnt = wq_size / wqe_size;
  165. if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
  166. mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
  167. wqe_size,
  168. dev->mdev.caps.max_rq_desc_sz);
  169. return -EINVAL;
  170. }
  171. qp->rq.wqe_shift = ilog2(wqe_size);
  172. qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
  173. qp->rq.max_post = qp->rq.wqe_cnt;
  174. }
  175. }
  176. return 0;
  177. }
  178. static int sq_overhead(enum ib_qp_type qp_type)
  179. {
  180. int size = 0;
  181. switch (qp_type) {
  182. case IB_QPT_XRC_INI:
  183. size = sizeof(struct mlx5_wqe_xrc_seg);
  184. /* fall through */
  185. case IB_QPT_RC:
  186. size += sizeof(struct mlx5_wqe_ctrl_seg) +
  187. sizeof(struct mlx5_wqe_atomic_seg) +
  188. sizeof(struct mlx5_wqe_raddr_seg);
  189. break;
  190. case IB_QPT_UC:
  191. size = sizeof(struct mlx5_wqe_ctrl_seg) +
  192. sizeof(struct mlx5_wqe_raddr_seg);
  193. break;
  194. case IB_QPT_UD:
  195. case IB_QPT_SMI:
  196. case IB_QPT_GSI:
  197. size = sizeof(struct mlx5_wqe_ctrl_seg) +
  198. sizeof(struct mlx5_wqe_datagram_seg);
  199. break;
  200. case MLX5_IB_QPT_REG_UMR:
  201. size = sizeof(struct mlx5_wqe_ctrl_seg) +
  202. sizeof(struct mlx5_wqe_umr_ctrl_seg) +
  203. sizeof(struct mlx5_mkey_seg);
  204. break;
  205. default:
  206. return -EINVAL;
  207. }
  208. return size;
  209. }
  210. static int calc_send_wqe(struct ib_qp_init_attr *attr)
  211. {
  212. int inl_size = 0;
  213. int size;
  214. size = sq_overhead(attr->qp_type);
  215. if (size < 0)
  216. return size;
  217. if (attr->cap.max_inline_data) {
  218. inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
  219. attr->cap.max_inline_data;
  220. }
  221. size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
  222. return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
  223. }
  224. static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
  225. struct mlx5_ib_qp *qp)
  226. {
  227. int wqe_size;
  228. int wq_size;
  229. if (!attr->cap.max_send_wr)
  230. return 0;
  231. wqe_size = calc_send_wqe(attr);
  232. mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
  233. if (wqe_size < 0)
  234. return wqe_size;
  235. if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
  236. mlx5_ib_dbg(dev, "\n");
  237. return -EINVAL;
  238. }
  239. qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
  240. sizeof(struct mlx5_wqe_inline_seg);
  241. attr->cap.max_inline_data = qp->max_inline_data;
  242. wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
  243. qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
  244. qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
  245. qp->sq.max_gs = attr->cap.max_send_sge;
  246. qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
  247. return wq_size;
  248. }
  249. static int set_user_buf_size(struct mlx5_ib_dev *dev,
  250. struct mlx5_ib_qp *qp,
  251. struct mlx5_ib_create_qp *ucmd)
  252. {
  253. int desc_sz = 1 << qp->sq.wqe_shift;
  254. if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
  255. mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
  256. desc_sz, dev->mdev.caps.max_sq_desc_sz);
  257. return -EINVAL;
  258. }
  259. if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
  260. mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
  261. ucmd->sq_wqe_count, ucmd->sq_wqe_count);
  262. return -EINVAL;
  263. }
  264. qp->sq.wqe_cnt = ucmd->sq_wqe_count;
  265. if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
  266. mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
  267. qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
  268. return -EINVAL;
  269. }
  270. qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
  271. (qp->sq.wqe_cnt << 6);
  272. return 0;
  273. }
  274. static int qp_has_rq(struct ib_qp_init_attr *attr)
  275. {
  276. if (attr->qp_type == IB_QPT_XRC_INI ||
  277. attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
  278. attr->qp_type == MLX5_IB_QPT_REG_UMR ||
  279. !attr->cap.max_recv_wr)
  280. return 0;
  281. return 1;
  282. }
  283. static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
  284. {
  285. int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
  286. int start_uuar;
  287. int i;
  288. start_uuar = nuuars - uuari->num_low_latency_uuars;
  289. for (i = start_uuar; i < nuuars; i++) {
  290. if (!test_bit(i, uuari->bitmap)) {
  291. set_bit(i, uuari->bitmap);
  292. uuari->count[i]++;
  293. return i;
  294. }
  295. }
  296. return -ENOMEM;
  297. }
  298. static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
  299. {
  300. int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
  301. int minidx = 1;
  302. int uuarn;
  303. int end;
  304. int i;
  305. end = nuuars - uuari->num_low_latency_uuars;
  306. for (i = 1; i < end; i++) {
  307. uuarn = i & 3;
  308. if (uuarn == 2 || uuarn == 3)
  309. continue;
  310. if (uuari->count[i] < uuari->count[minidx])
  311. minidx = i;
  312. }
  313. uuari->count[minidx]++;
  314. return minidx;
  315. }
  316. static int alloc_uuar(struct mlx5_uuar_info *uuari,
  317. enum mlx5_ib_latency_class lat)
  318. {
  319. int uuarn = -EINVAL;
  320. mutex_lock(&uuari->lock);
  321. switch (lat) {
  322. case MLX5_IB_LATENCY_CLASS_LOW:
  323. uuarn = 0;
  324. uuari->count[uuarn]++;
  325. break;
  326. case MLX5_IB_LATENCY_CLASS_MEDIUM:
  327. uuarn = alloc_med_class_uuar(uuari);
  328. break;
  329. case MLX5_IB_LATENCY_CLASS_HIGH:
  330. uuarn = alloc_high_class_uuar(uuari);
  331. break;
  332. case MLX5_IB_LATENCY_CLASS_FAST_PATH:
  333. uuarn = 2;
  334. break;
  335. }
  336. mutex_unlock(&uuari->lock);
  337. return uuarn;
  338. }
  339. static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
  340. {
  341. clear_bit(uuarn, uuari->bitmap);
  342. --uuari->count[uuarn];
  343. }
  344. static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
  345. {
  346. clear_bit(uuarn, uuari->bitmap);
  347. --uuari->count[uuarn];
  348. }
  349. static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
  350. {
  351. int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
  352. int high_uuar = nuuars - uuari->num_low_latency_uuars;
  353. mutex_lock(&uuari->lock);
  354. if (uuarn == 0) {
  355. --uuari->count[uuarn];
  356. goto out;
  357. }
  358. if (uuarn < high_uuar) {
  359. free_med_class_uuar(uuari, uuarn);
  360. goto out;
  361. }
  362. free_high_class_uuar(uuari, uuarn);
  363. out:
  364. mutex_unlock(&uuari->lock);
  365. }
  366. static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
  367. {
  368. switch (state) {
  369. case IB_QPS_RESET: return MLX5_QP_STATE_RST;
  370. case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
  371. case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
  372. case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
  373. case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
  374. case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
  375. case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
  376. default: return -1;
  377. }
  378. }
  379. static int to_mlx5_st(enum ib_qp_type type)
  380. {
  381. switch (type) {
  382. case IB_QPT_RC: return MLX5_QP_ST_RC;
  383. case IB_QPT_UC: return MLX5_QP_ST_UC;
  384. case IB_QPT_UD: return MLX5_QP_ST_UD;
  385. case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
  386. case IB_QPT_XRC_INI:
  387. case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
  388. case IB_QPT_SMI: return MLX5_QP_ST_QP0;
  389. case IB_QPT_GSI: return MLX5_QP_ST_QP1;
  390. case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
  391. case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
  392. case IB_QPT_RAW_PACKET:
  393. case IB_QPT_MAX:
  394. default: return -EINVAL;
  395. }
  396. }
  397. static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
  398. {
  399. return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
  400. }
  401. static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
  402. struct mlx5_ib_qp *qp, struct ib_udata *udata,
  403. struct mlx5_create_qp_mbox_in **in,
  404. struct mlx5_ib_create_qp_resp *resp, int *inlen)
  405. {
  406. struct mlx5_ib_ucontext *context;
  407. struct mlx5_ib_create_qp ucmd;
  408. int page_shift;
  409. int uar_index;
  410. int npages;
  411. u32 offset;
  412. int uuarn;
  413. int ncont;
  414. int err;
  415. err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
  416. if (err) {
  417. mlx5_ib_dbg(dev, "copy failed\n");
  418. return err;
  419. }
  420. context = to_mucontext(pd->uobject->context);
  421. /*
  422. * TBD: should come from the verbs when we have the API
  423. */
  424. uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
  425. if (uuarn < 0) {
  426. mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
  427. mlx5_ib_dbg(dev, "reverting to high latency\n");
  428. uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
  429. if (uuarn < 0) {
  430. mlx5_ib_dbg(dev, "uuar allocation failed\n");
  431. return uuarn;
  432. }
  433. }
  434. uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
  435. mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
  436. err = set_user_buf_size(dev, qp, &ucmd);
  437. if (err)
  438. goto err_uuar;
  439. qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
  440. qp->buf_size, 0, 0);
  441. if (IS_ERR(qp->umem)) {
  442. mlx5_ib_dbg(dev, "umem_get failed\n");
  443. err = PTR_ERR(qp->umem);
  444. goto err_uuar;
  445. }
  446. mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
  447. &ncont, NULL);
  448. err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
  449. if (err) {
  450. mlx5_ib_warn(dev, "bad offset\n");
  451. goto err_umem;
  452. }
  453. mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
  454. ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
  455. *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
  456. *in = mlx5_vzalloc(*inlen);
  457. if (!*in) {
  458. err = -ENOMEM;
  459. goto err_umem;
  460. }
  461. mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
  462. (*in)->ctx.log_pg_sz_remote_qpn =
  463. cpu_to_be32((page_shift - PAGE_SHIFT) << 24);
  464. (*in)->ctx.params2 = cpu_to_be32(offset << 6);
  465. (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
  466. resp->uuar_index = uuarn;
  467. qp->uuarn = uuarn;
  468. err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
  469. if (err) {
  470. mlx5_ib_dbg(dev, "map failed\n");
  471. goto err_free;
  472. }
  473. err = ib_copy_to_udata(udata, resp, sizeof(*resp));
  474. if (err) {
  475. mlx5_ib_dbg(dev, "copy failed\n");
  476. goto err_unmap;
  477. }
  478. qp->create_type = MLX5_QP_USER;
  479. return 0;
  480. err_unmap:
  481. mlx5_ib_db_unmap_user(context, &qp->db);
  482. err_free:
  483. mlx5_vfree(*in);
  484. err_umem:
  485. ib_umem_release(qp->umem);
  486. err_uuar:
  487. free_uuar(&context->uuari, uuarn);
  488. return err;
  489. }
  490. static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
  491. {
  492. struct mlx5_ib_ucontext *context;
  493. context = to_mucontext(pd->uobject->context);
  494. mlx5_ib_db_unmap_user(context, &qp->db);
  495. ib_umem_release(qp->umem);
  496. free_uuar(&context->uuari, qp->uuarn);
  497. }
  498. static int create_kernel_qp(struct mlx5_ib_dev *dev,
  499. struct ib_qp_init_attr *init_attr,
  500. struct mlx5_ib_qp *qp,
  501. struct mlx5_create_qp_mbox_in **in, int *inlen)
  502. {
  503. enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
  504. struct mlx5_uuar_info *uuari;
  505. int uar_index;
  506. int uuarn;
  507. int err;
  508. uuari = &dev->mdev.priv.uuari;
  509. if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
  510. qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
  511. if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
  512. lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
  513. uuarn = alloc_uuar(uuari, lc);
  514. if (uuarn < 0) {
  515. mlx5_ib_dbg(dev, "\n");
  516. return -ENOMEM;
  517. }
  518. qp->bf = &uuari->bfs[uuarn];
  519. uar_index = qp->bf->uar->index;
  520. err = calc_sq_size(dev, init_attr, qp);
  521. if (err < 0) {
  522. mlx5_ib_dbg(dev, "err %d\n", err);
  523. goto err_uuar;
  524. }
  525. qp->rq.offset = 0;
  526. qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
  527. qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
  528. err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
  529. if (err) {
  530. mlx5_ib_dbg(dev, "err %d\n", err);
  531. goto err_uuar;
  532. }
  533. qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
  534. *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
  535. *in = mlx5_vzalloc(*inlen);
  536. if (!*in) {
  537. err = -ENOMEM;
  538. goto err_buf;
  539. }
  540. (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
  541. (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24);
  542. /* Set "fast registration enabled" for all kernel QPs */
  543. (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
  544. (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
  545. mlx5_fill_page_array(&qp->buf, (*in)->pas);
  546. err = mlx5_db_alloc(&dev->mdev, &qp->db);
  547. if (err) {
  548. mlx5_ib_dbg(dev, "err %d\n", err);
  549. goto err_free;
  550. }
  551. qp->db.db[0] = 0;
  552. qp->db.db[1] = 0;
  553. qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
  554. qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
  555. qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
  556. qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
  557. qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
  558. if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
  559. !qp->sq.w_list || !qp->sq.wqe_head) {
  560. err = -ENOMEM;
  561. goto err_wrid;
  562. }
  563. qp->create_type = MLX5_QP_KERNEL;
  564. return 0;
  565. err_wrid:
  566. mlx5_db_free(&dev->mdev, &qp->db);
  567. kfree(qp->sq.wqe_head);
  568. kfree(qp->sq.w_list);
  569. kfree(qp->sq.wrid);
  570. kfree(qp->sq.wr_data);
  571. kfree(qp->rq.wrid);
  572. err_free:
  573. mlx5_vfree(*in);
  574. err_buf:
  575. mlx5_buf_free(&dev->mdev, &qp->buf);
  576. err_uuar:
  577. free_uuar(&dev->mdev.priv.uuari, uuarn);
  578. return err;
  579. }
  580. static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
  581. {
  582. mlx5_db_free(&dev->mdev, &qp->db);
  583. kfree(qp->sq.wqe_head);
  584. kfree(qp->sq.w_list);
  585. kfree(qp->sq.wrid);
  586. kfree(qp->sq.wr_data);
  587. kfree(qp->rq.wrid);
  588. mlx5_buf_free(&dev->mdev, &qp->buf);
  589. free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
  590. }
  591. static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
  592. {
  593. if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
  594. (attr->qp_type == IB_QPT_XRC_INI))
  595. return cpu_to_be32(MLX5_SRQ_RQ);
  596. else if (!qp->has_rq)
  597. return cpu_to_be32(MLX5_ZERO_LEN_RQ);
  598. else
  599. return cpu_to_be32(MLX5_NON_ZERO_RQ);
  600. }
  601. static int is_connected(enum ib_qp_type qp_type)
  602. {
  603. if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
  604. return 1;
  605. return 0;
  606. }
  607. static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
  608. struct ib_qp_init_attr *init_attr,
  609. struct ib_udata *udata, struct mlx5_ib_qp *qp)
  610. {
  611. struct mlx5_ib_resources *devr = &dev->devr;
  612. struct mlx5_ib_create_qp_resp resp;
  613. struct mlx5_create_qp_mbox_in *in;
  614. struct mlx5_ib_create_qp ucmd;
  615. int inlen = sizeof(*in);
  616. int err;
  617. mutex_init(&qp->mutex);
  618. spin_lock_init(&qp->sq.lock);
  619. spin_lock_init(&qp->rq.lock);
  620. if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
  621. qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
  622. if (pd && pd->uobject) {
  623. if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
  624. mlx5_ib_dbg(dev, "copy failed\n");
  625. return -EFAULT;
  626. }
  627. qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
  628. qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
  629. } else {
  630. qp->wq_sig = !!wq_signature;
  631. }
  632. qp->has_rq = qp_has_rq(init_attr);
  633. err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
  634. qp, (pd && pd->uobject) ? &ucmd : NULL);
  635. if (err) {
  636. mlx5_ib_dbg(dev, "err %d\n", err);
  637. return err;
  638. }
  639. if (pd) {
  640. if (pd->uobject) {
  641. mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
  642. if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
  643. ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
  644. mlx5_ib_dbg(dev, "invalid rq params\n");
  645. return -EINVAL;
  646. }
  647. if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
  648. mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
  649. ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
  650. return -EINVAL;
  651. }
  652. err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
  653. if (err)
  654. mlx5_ib_dbg(dev, "err %d\n", err);
  655. } else {
  656. err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
  657. if (err)
  658. mlx5_ib_dbg(dev, "err %d\n", err);
  659. else
  660. qp->pa_lkey = to_mpd(pd)->pa_lkey;
  661. }
  662. if (err)
  663. return err;
  664. } else {
  665. in = mlx5_vzalloc(sizeof(*in));
  666. if (!in)
  667. return -ENOMEM;
  668. qp->create_type = MLX5_QP_EMPTY;
  669. }
  670. if (is_sqp(init_attr->qp_type))
  671. qp->port = init_attr->port_num;
  672. in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
  673. MLX5_QP_PM_MIGRATED << 11);
  674. if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
  675. in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
  676. else
  677. in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
  678. if (qp->wq_sig)
  679. in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
  680. if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
  681. int rcqe_sz;
  682. int scqe_sz;
  683. rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
  684. scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
  685. if (rcqe_sz == 128)
  686. in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
  687. else
  688. in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
  689. if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
  690. if (scqe_sz == 128)
  691. in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
  692. else
  693. in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
  694. }
  695. }
  696. if (qp->rq.wqe_cnt) {
  697. in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
  698. in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
  699. }
  700. in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
  701. if (qp->sq.wqe_cnt)
  702. in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
  703. else
  704. in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
  705. /* Set default resources */
  706. switch (init_attr->qp_type) {
  707. case IB_QPT_XRC_TGT:
  708. in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
  709. in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
  710. in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
  711. in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
  712. break;
  713. case IB_QPT_XRC_INI:
  714. in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
  715. in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
  716. in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
  717. break;
  718. default:
  719. if (init_attr->srq) {
  720. in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
  721. in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
  722. } else {
  723. in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
  724. in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
  725. }
  726. }
  727. if (init_attr->send_cq)
  728. in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
  729. if (init_attr->recv_cq)
  730. in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
  731. in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
  732. err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
  733. if (err) {
  734. mlx5_ib_dbg(dev, "create qp failed\n");
  735. goto err_create;
  736. }
  737. mlx5_vfree(in);
  738. /* Hardware wants QPN written in big-endian order (after
  739. * shifting) for send doorbell. Precompute this value to save
  740. * a little bit when posting sends.
  741. */
  742. qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
  743. qp->mqp.event = mlx5_ib_qp_event;
  744. return 0;
  745. err_create:
  746. if (qp->create_type == MLX5_QP_USER)
  747. destroy_qp_user(pd, qp);
  748. else if (qp->create_type == MLX5_QP_KERNEL)
  749. destroy_qp_kernel(dev, qp);
  750. mlx5_vfree(in);
  751. return err;
  752. }
  753. static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
  754. __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
  755. {
  756. if (send_cq) {
  757. if (recv_cq) {
  758. if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
  759. spin_lock_irq(&send_cq->lock);
  760. spin_lock_nested(&recv_cq->lock,
  761. SINGLE_DEPTH_NESTING);
  762. } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
  763. spin_lock_irq(&send_cq->lock);
  764. __acquire(&recv_cq->lock);
  765. } else {
  766. spin_lock_irq(&recv_cq->lock);
  767. spin_lock_nested(&send_cq->lock,
  768. SINGLE_DEPTH_NESTING);
  769. }
  770. } else {
  771. spin_lock_irq(&send_cq->lock);
  772. }
  773. } else if (recv_cq) {
  774. spin_lock_irq(&recv_cq->lock);
  775. }
  776. }
  777. static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
  778. __releases(&send_cq->lock) __releases(&recv_cq->lock)
  779. {
  780. if (send_cq) {
  781. if (recv_cq) {
  782. if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
  783. spin_unlock(&recv_cq->lock);
  784. spin_unlock_irq(&send_cq->lock);
  785. } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
  786. __release(&recv_cq->lock);
  787. spin_unlock_irq(&send_cq->lock);
  788. } else {
  789. spin_unlock(&send_cq->lock);
  790. spin_unlock_irq(&recv_cq->lock);
  791. }
  792. } else {
  793. spin_unlock_irq(&send_cq->lock);
  794. }
  795. } else if (recv_cq) {
  796. spin_unlock_irq(&recv_cq->lock);
  797. }
  798. }
  799. static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
  800. {
  801. return to_mpd(qp->ibqp.pd);
  802. }
  803. static void get_cqs(struct mlx5_ib_qp *qp,
  804. struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
  805. {
  806. switch (qp->ibqp.qp_type) {
  807. case IB_QPT_XRC_TGT:
  808. *send_cq = NULL;
  809. *recv_cq = NULL;
  810. break;
  811. case MLX5_IB_QPT_REG_UMR:
  812. case IB_QPT_XRC_INI:
  813. *send_cq = to_mcq(qp->ibqp.send_cq);
  814. *recv_cq = NULL;
  815. break;
  816. case IB_QPT_SMI:
  817. case IB_QPT_GSI:
  818. case IB_QPT_RC:
  819. case IB_QPT_UC:
  820. case IB_QPT_UD:
  821. case IB_QPT_RAW_IPV6:
  822. case IB_QPT_RAW_ETHERTYPE:
  823. *send_cq = to_mcq(qp->ibqp.send_cq);
  824. *recv_cq = to_mcq(qp->ibqp.recv_cq);
  825. break;
  826. case IB_QPT_RAW_PACKET:
  827. case IB_QPT_MAX:
  828. default:
  829. *send_cq = NULL;
  830. *recv_cq = NULL;
  831. break;
  832. }
  833. }
  834. static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
  835. {
  836. struct mlx5_ib_cq *send_cq, *recv_cq;
  837. struct mlx5_modify_qp_mbox_in *in;
  838. int err;
  839. in = kzalloc(sizeof(*in), GFP_KERNEL);
  840. if (!in)
  841. return;
  842. if (qp->state != IB_QPS_RESET)
  843. if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
  844. MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
  845. mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
  846. qp->mqp.qpn);
  847. get_cqs(qp, &send_cq, &recv_cq);
  848. if (qp->create_type == MLX5_QP_KERNEL) {
  849. mlx5_ib_lock_cqs(send_cq, recv_cq);
  850. __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
  851. qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  852. if (send_cq != recv_cq)
  853. __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
  854. mlx5_ib_unlock_cqs(send_cq, recv_cq);
  855. }
  856. err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
  857. if (err)
  858. mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
  859. kfree(in);
  860. if (qp->create_type == MLX5_QP_KERNEL)
  861. destroy_qp_kernel(dev, qp);
  862. else if (qp->create_type == MLX5_QP_USER)
  863. destroy_qp_user(&get_pd(qp)->ibpd, qp);
  864. }
  865. static const char *ib_qp_type_str(enum ib_qp_type type)
  866. {
  867. switch (type) {
  868. case IB_QPT_SMI:
  869. return "IB_QPT_SMI";
  870. case IB_QPT_GSI:
  871. return "IB_QPT_GSI";
  872. case IB_QPT_RC:
  873. return "IB_QPT_RC";
  874. case IB_QPT_UC:
  875. return "IB_QPT_UC";
  876. case IB_QPT_UD:
  877. return "IB_QPT_UD";
  878. case IB_QPT_RAW_IPV6:
  879. return "IB_QPT_RAW_IPV6";
  880. case IB_QPT_RAW_ETHERTYPE:
  881. return "IB_QPT_RAW_ETHERTYPE";
  882. case IB_QPT_XRC_INI:
  883. return "IB_QPT_XRC_INI";
  884. case IB_QPT_XRC_TGT:
  885. return "IB_QPT_XRC_TGT";
  886. case IB_QPT_RAW_PACKET:
  887. return "IB_QPT_RAW_PACKET";
  888. case MLX5_IB_QPT_REG_UMR:
  889. return "MLX5_IB_QPT_REG_UMR";
  890. case IB_QPT_MAX:
  891. default:
  892. return "Invalid QP type";
  893. }
  894. }
  895. struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
  896. struct ib_qp_init_attr *init_attr,
  897. struct ib_udata *udata)
  898. {
  899. struct mlx5_ib_dev *dev;
  900. struct mlx5_ib_qp *qp;
  901. u16 xrcdn = 0;
  902. int err;
  903. if (pd) {
  904. dev = to_mdev(pd->device);
  905. } else {
  906. /* being cautious here */
  907. if (init_attr->qp_type != IB_QPT_XRC_TGT &&
  908. init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
  909. pr_warn("%s: no PD for transport %s\n", __func__,
  910. ib_qp_type_str(init_attr->qp_type));
  911. return ERR_PTR(-EINVAL);
  912. }
  913. dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
  914. }
  915. switch (init_attr->qp_type) {
  916. case IB_QPT_XRC_TGT:
  917. case IB_QPT_XRC_INI:
  918. if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
  919. mlx5_ib_dbg(dev, "XRC not supported\n");
  920. return ERR_PTR(-ENOSYS);
  921. }
  922. init_attr->recv_cq = NULL;
  923. if (init_attr->qp_type == IB_QPT_XRC_TGT) {
  924. xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
  925. init_attr->send_cq = NULL;
  926. }
  927. /* fall through */
  928. case IB_QPT_RC:
  929. case IB_QPT_UC:
  930. case IB_QPT_UD:
  931. case IB_QPT_SMI:
  932. case IB_QPT_GSI:
  933. case MLX5_IB_QPT_REG_UMR:
  934. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  935. if (!qp)
  936. return ERR_PTR(-ENOMEM);
  937. err = create_qp_common(dev, pd, init_attr, udata, qp);
  938. if (err) {
  939. mlx5_ib_dbg(dev, "create_qp_common failed\n");
  940. kfree(qp);
  941. return ERR_PTR(err);
  942. }
  943. if (is_qp0(init_attr->qp_type))
  944. qp->ibqp.qp_num = 0;
  945. else if (is_qp1(init_attr->qp_type))
  946. qp->ibqp.qp_num = 1;
  947. else
  948. qp->ibqp.qp_num = qp->mqp.qpn;
  949. mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
  950. qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
  951. to_mcq(init_attr->send_cq)->mcq.cqn);
  952. qp->xrcdn = xrcdn;
  953. break;
  954. case IB_QPT_RAW_IPV6:
  955. case IB_QPT_RAW_ETHERTYPE:
  956. case IB_QPT_RAW_PACKET:
  957. case IB_QPT_MAX:
  958. default:
  959. mlx5_ib_dbg(dev, "unsupported qp type %d\n",
  960. init_attr->qp_type);
  961. /* Don't support raw QPs */
  962. return ERR_PTR(-EINVAL);
  963. }
  964. return &qp->ibqp;
  965. }
  966. int mlx5_ib_destroy_qp(struct ib_qp *qp)
  967. {
  968. struct mlx5_ib_dev *dev = to_mdev(qp->device);
  969. struct mlx5_ib_qp *mqp = to_mqp(qp);
  970. destroy_qp_common(dev, mqp);
  971. kfree(mqp);
  972. return 0;
  973. }
  974. static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
  975. int attr_mask)
  976. {
  977. u32 hw_access_flags = 0;
  978. u8 dest_rd_atomic;
  979. u32 access_flags;
  980. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
  981. dest_rd_atomic = attr->max_dest_rd_atomic;
  982. else
  983. dest_rd_atomic = qp->resp_depth;
  984. if (attr_mask & IB_QP_ACCESS_FLAGS)
  985. access_flags = attr->qp_access_flags;
  986. else
  987. access_flags = qp->atomic_rd_en;
  988. if (!dest_rd_atomic)
  989. access_flags &= IB_ACCESS_REMOTE_WRITE;
  990. if (access_flags & IB_ACCESS_REMOTE_READ)
  991. hw_access_flags |= MLX5_QP_BIT_RRE;
  992. if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
  993. hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
  994. if (access_flags & IB_ACCESS_REMOTE_WRITE)
  995. hw_access_flags |= MLX5_QP_BIT_RWE;
  996. return cpu_to_be32(hw_access_flags);
  997. }
  998. enum {
  999. MLX5_PATH_FLAG_FL = 1 << 0,
  1000. MLX5_PATH_FLAG_FREE_AR = 1 << 1,
  1001. MLX5_PATH_FLAG_COUNTER = 1 << 2,
  1002. };
  1003. static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
  1004. {
  1005. if (rate == IB_RATE_PORT_CURRENT) {
  1006. return 0;
  1007. } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
  1008. return -EINVAL;
  1009. } else {
  1010. while (rate != IB_RATE_2_5_GBPS &&
  1011. !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
  1012. dev->mdev.caps.stat_rate_support))
  1013. --rate;
  1014. }
  1015. return rate + MLX5_STAT_RATE_OFFSET;
  1016. }
  1017. static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
  1018. struct mlx5_qp_path *path, u8 port, int attr_mask,
  1019. u32 path_flags, const struct ib_qp_attr *attr)
  1020. {
  1021. int err;
  1022. path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
  1023. path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
  1024. if (attr_mask & IB_QP_PKEY_INDEX)
  1025. path->pkey_index = attr->pkey_index;
  1026. path->grh_mlid = ah->src_path_bits & 0x7f;
  1027. path->rlid = cpu_to_be16(ah->dlid);
  1028. if (ah->ah_flags & IB_AH_GRH) {
  1029. path->grh_mlid |= 1 << 7;
  1030. path->mgid_index = ah->grh.sgid_index;
  1031. path->hop_limit = ah->grh.hop_limit;
  1032. path->tclass_flowlabel =
  1033. cpu_to_be32((ah->grh.traffic_class << 20) |
  1034. (ah->grh.flow_label));
  1035. memcpy(path->rgid, ah->grh.dgid.raw, 16);
  1036. }
  1037. err = ib_rate_to_mlx5(dev, ah->static_rate);
  1038. if (err < 0)
  1039. return err;
  1040. path->static_rate = err;
  1041. path->port = port;
  1042. if (ah->ah_flags & IB_AH_GRH) {
  1043. if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
  1044. pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
  1045. ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
  1046. return -EINVAL;
  1047. }
  1048. path->grh_mlid |= 1 << 7;
  1049. path->mgid_index = ah->grh.sgid_index;
  1050. path->hop_limit = ah->grh.hop_limit;
  1051. path->tclass_flowlabel =
  1052. cpu_to_be32((ah->grh.traffic_class << 20) |
  1053. (ah->grh.flow_label));
  1054. memcpy(path->rgid, ah->grh.dgid.raw, 16);
  1055. }
  1056. if (attr_mask & IB_QP_TIMEOUT)
  1057. path->ackto_lt = attr->timeout << 3;
  1058. path->sl = ah->sl & 0xf;
  1059. return 0;
  1060. }
  1061. static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
  1062. [MLX5_QP_STATE_INIT] = {
  1063. [MLX5_QP_STATE_INIT] = {
  1064. [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
  1065. MLX5_QP_OPTPAR_RAE |
  1066. MLX5_QP_OPTPAR_RWE |
  1067. MLX5_QP_OPTPAR_PKEY_INDEX |
  1068. MLX5_QP_OPTPAR_PRI_PORT,
  1069. [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
  1070. MLX5_QP_OPTPAR_PKEY_INDEX |
  1071. MLX5_QP_OPTPAR_PRI_PORT,
  1072. [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
  1073. MLX5_QP_OPTPAR_Q_KEY |
  1074. MLX5_QP_OPTPAR_PRI_PORT,
  1075. },
  1076. [MLX5_QP_STATE_RTR] = {
  1077. [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
  1078. MLX5_QP_OPTPAR_RRE |
  1079. MLX5_QP_OPTPAR_RAE |
  1080. MLX5_QP_OPTPAR_RWE |
  1081. MLX5_QP_OPTPAR_PKEY_INDEX,
  1082. [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
  1083. MLX5_QP_OPTPAR_RWE |
  1084. MLX5_QP_OPTPAR_PKEY_INDEX,
  1085. [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
  1086. MLX5_QP_OPTPAR_Q_KEY,
  1087. [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
  1088. MLX5_QP_OPTPAR_Q_KEY,
  1089. },
  1090. },
  1091. [MLX5_QP_STATE_RTR] = {
  1092. [MLX5_QP_STATE_RTS] = {
  1093. [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
  1094. MLX5_QP_OPTPAR_RRE |
  1095. MLX5_QP_OPTPAR_RAE |
  1096. MLX5_QP_OPTPAR_RWE |
  1097. MLX5_QP_OPTPAR_PM_STATE |
  1098. MLX5_QP_OPTPAR_RNR_TIMEOUT,
  1099. [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
  1100. MLX5_QP_OPTPAR_RWE |
  1101. MLX5_QP_OPTPAR_PM_STATE,
  1102. [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
  1103. },
  1104. },
  1105. [MLX5_QP_STATE_RTS] = {
  1106. [MLX5_QP_STATE_RTS] = {
  1107. [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
  1108. MLX5_QP_OPTPAR_RAE |
  1109. MLX5_QP_OPTPAR_RWE |
  1110. MLX5_QP_OPTPAR_RNR_TIMEOUT |
  1111. MLX5_QP_OPTPAR_PM_STATE,
  1112. [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
  1113. MLX5_QP_OPTPAR_PM_STATE,
  1114. [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
  1115. MLX5_QP_OPTPAR_SRQN |
  1116. MLX5_QP_OPTPAR_CQN_RCV,
  1117. },
  1118. },
  1119. [MLX5_QP_STATE_SQER] = {
  1120. [MLX5_QP_STATE_RTS] = {
  1121. [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
  1122. [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
  1123. },
  1124. },
  1125. };
  1126. static int ib_nr_to_mlx5_nr(int ib_mask)
  1127. {
  1128. switch (ib_mask) {
  1129. case IB_QP_STATE:
  1130. return 0;
  1131. case IB_QP_CUR_STATE:
  1132. return 0;
  1133. case IB_QP_EN_SQD_ASYNC_NOTIFY:
  1134. return 0;
  1135. case IB_QP_ACCESS_FLAGS:
  1136. return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
  1137. MLX5_QP_OPTPAR_RAE;
  1138. case IB_QP_PKEY_INDEX:
  1139. return MLX5_QP_OPTPAR_PKEY_INDEX;
  1140. case IB_QP_PORT:
  1141. return MLX5_QP_OPTPAR_PRI_PORT;
  1142. case IB_QP_QKEY:
  1143. return MLX5_QP_OPTPAR_Q_KEY;
  1144. case IB_QP_AV:
  1145. return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
  1146. MLX5_QP_OPTPAR_PRI_PORT;
  1147. case IB_QP_PATH_MTU:
  1148. return 0;
  1149. case IB_QP_TIMEOUT:
  1150. return MLX5_QP_OPTPAR_ACK_TIMEOUT;
  1151. case IB_QP_RETRY_CNT:
  1152. return MLX5_QP_OPTPAR_RETRY_COUNT;
  1153. case IB_QP_RNR_RETRY:
  1154. return MLX5_QP_OPTPAR_RNR_RETRY;
  1155. case IB_QP_RQ_PSN:
  1156. return 0;
  1157. case IB_QP_MAX_QP_RD_ATOMIC:
  1158. return MLX5_QP_OPTPAR_SRA_MAX;
  1159. case IB_QP_ALT_PATH:
  1160. return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
  1161. case IB_QP_MIN_RNR_TIMER:
  1162. return MLX5_QP_OPTPAR_RNR_TIMEOUT;
  1163. case IB_QP_SQ_PSN:
  1164. return 0;
  1165. case IB_QP_MAX_DEST_RD_ATOMIC:
  1166. return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
  1167. MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
  1168. case IB_QP_PATH_MIG_STATE:
  1169. return MLX5_QP_OPTPAR_PM_STATE;
  1170. case IB_QP_CAP:
  1171. return 0;
  1172. case IB_QP_DEST_QPN:
  1173. return 0;
  1174. }
  1175. return 0;
  1176. }
  1177. static int ib_mask_to_mlx5_opt(int ib_mask)
  1178. {
  1179. int result = 0;
  1180. int i;
  1181. for (i = 0; i < 8 * sizeof(int); i++) {
  1182. if ((1 << i) & ib_mask)
  1183. result |= ib_nr_to_mlx5_nr(1 << i);
  1184. }
  1185. return result;
  1186. }
  1187. static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
  1188. const struct ib_qp_attr *attr, int attr_mask,
  1189. enum ib_qp_state cur_state, enum ib_qp_state new_state)
  1190. {
  1191. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  1192. struct mlx5_ib_qp *qp = to_mqp(ibqp);
  1193. struct mlx5_ib_cq *send_cq, *recv_cq;
  1194. struct mlx5_qp_context *context;
  1195. struct mlx5_modify_qp_mbox_in *in;
  1196. struct mlx5_ib_pd *pd;
  1197. enum mlx5_qp_state mlx5_cur, mlx5_new;
  1198. enum mlx5_qp_optpar optpar;
  1199. int sqd_event;
  1200. int mlx5_st;
  1201. int err;
  1202. in = kzalloc(sizeof(*in), GFP_KERNEL);
  1203. if (!in)
  1204. return -ENOMEM;
  1205. context = &in->ctx;
  1206. err = to_mlx5_st(ibqp->qp_type);
  1207. if (err < 0)
  1208. goto out;
  1209. context->flags = cpu_to_be32(err << 16);
  1210. if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
  1211. context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
  1212. } else {
  1213. switch (attr->path_mig_state) {
  1214. case IB_MIG_MIGRATED:
  1215. context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
  1216. break;
  1217. case IB_MIG_REARM:
  1218. context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
  1219. break;
  1220. case IB_MIG_ARMED:
  1221. context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
  1222. break;
  1223. }
  1224. }
  1225. if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
  1226. context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
  1227. } else if (ibqp->qp_type == IB_QPT_UD ||
  1228. ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
  1229. context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
  1230. } else if (attr_mask & IB_QP_PATH_MTU) {
  1231. if (attr->path_mtu < IB_MTU_256 ||
  1232. attr->path_mtu > IB_MTU_4096) {
  1233. mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
  1234. err = -EINVAL;
  1235. goto out;
  1236. }
  1237. context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
  1238. }
  1239. if (attr_mask & IB_QP_DEST_QPN)
  1240. context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
  1241. if (attr_mask & IB_QP_PKEY_INDEX)
  1242. context->pri_path.pkey_index = attr->pkey_index;
  1243. /* todo implement counter_index functionality */
  1244. if (is_sqp(ibqp->qp_type))
  1245. context->pri_path.port = qp->port;
  1246. if (attr_mask & IB_QP_PORT)
  1247. context->pri_path.port = attr->port_num;
  1248. if (attr_mask & IB_QP_AV) {
  1249. err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
  1250. attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
  1251. attr_mask, 0, attr);
  1252. if (err)
  1253. goto out;
  1254. }
  1255. if (attr_mask & IB_QP_TIMEOUT)
  1256. context->pri_path.ackto_lt |= attr->timeout << 3;
  1257. if (attr_mask & IB_QP_ALT_PATH) {
  1258. err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
  1259. attr->alt_port_num, attr_mask, 0, attr);
  1260. if (err)
  1261. goto out;
  1262. }
  1263. pd = get_pd(qp);
  1264. get_cqs(qp, &send_cq, &recv_cq);
  1265. context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
  1266. context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
  1267. context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
  1268. context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
  1269. if (attr_mask & IB_QP_RNR_RETRY)
  1270. context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
  1271. if (attr_mask & IB_QP_RETRY_CNT)
  1272. context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
  1273. if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
  1274. if (attr->max_rd_atomic)
  1275. context->params1 |=
  1276. cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
  1277. }
  1278. if (attr_mask & IB_QP_SQ_PSN)
  1279. context->next_send_psn = cpu_to_be32(attr->sq_psn);
  1280. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  1281. if (attr->max_dest_rd_atomic)
  1282. context->params2 |=
  1283. cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
  1284. }
  1285. if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
  1286. context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
  1287. if (attr_mask & IB_QP_MIN_RNR_TIMER)
  1288. context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
  1289. if (attr_mask & IB_QP_RQ_PSN)
  1290. context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
  1291. if (attr_mask & IB_QP_QKEY)
  1292. context->qkey = cpu_to_be32(attr->qkey);
  1293. if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
  1294. context->db_rec_addr = cpu_to_be64(qp->db.dma);
  1295. if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
  1296. attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
  1297. sqd_event = 1;
  1298. else
  1299. sqd_event = 0;
  1300. if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
  1301. context->sq_crq_size |= cpu_to_be16(1 << 4);
  1302. mlx5_cur = to_mlx5_state(cur_state);
  1303. mlx5_new = to_mlx5_state(new_state);
  1304. mlx5_st = to_mlx5_st(ibqp->qp_type);
  1305. if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0)
  1306. goto out;
  1307. optpar = ib_mask_to_mlx5_opt(attr_mask);
  1308. optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
  1309. in->optparam = cpu_to_be32(optpar);
  1310. err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
  1311. to_mlx5_state(new_state), in, sqd_event,
  1312. &qp->mqp);
  1313. if (err)
  1314. goto out;
  1315. qp->state = new_state;
  1316. if (attr_mask & IB_QP_ACCESS_FLAGS)
  1317. qp->atomic_rd_en = attr->qp_access_flags;
  1318. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
  1319. qp->resp_depth = attr->max_dest_rd_atomic;
  1320. if (attr_mask & IB_QP_PORT)
  1321. qp->port = attr->port_num;
  1322. if (attr_mask & IB_QP_ALT_PATH)
  1323. qp->alt_port = attr->alt_port_num;
  1324. /*
  1325. * If we moved a kernel QP to RESET, clean up all old CQ
  1326. * entries and reinitialize the QP.
  1327. */
  1328. if (new_state == IB_QPS_RESET && !ibqp->uobject) {
  1329. mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
  1330. ibqp->srq ? to_msrq(ibqp->srq) : NULL);
  1331. if (send_cq != recv_cq)
  1332. mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
  1333. qp->rq.head = 0;
  1334. qp->rq.tail = 0;
  1335. qp->sq.head = 0;
  1336. qp->sq.tail = 0;
  1337. qp->sq.cur_post = 0;
  1338. qp->sq.last_poll = 0;
  1339. qp->db.db[MLX5_RCV_DBR] = 0;
  1340. qp->db.db[MLX5_SND_DBR] = 0;
  1341. }
  1342. out:
  1343. kfree(in);
  1344. return err;
  1345. }
  1346. int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1347. int attr_mask, struct ib_udata *udata)
  1348. {
  1349. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  1350. struct mlx5_ib_qp *qp = to_mqp(ibqp);
  1351. enum ib_qp_state cur_state, new_state;
  1352. int err = -EINVAL;
  1353. int port;
  1354. mutex_lock(&qp->mutex);
  1355. cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
  1356. new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
  1357. if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
  1358. !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
  1359. goto out;
  1360. if ((attr_mask & IB_QP_PORT) &&
  1361. (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
  1362. goto out;
  1363. if (attr_mask & IB_QP_PKEY_INDEX) {
  1364. port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
  1365. if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
  1366. goto out;
  1367. }
  1368. if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
  1369. attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
  1370. goto out;
  1371. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
  1372. attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
  1373. goto out;
  1374. if (cur_state == new_state && cur_state == IB_QPS_RESET) {
  1375. err = 0;
  1376. goto out;
  1377. }
  1378. err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
  1379. out:
  1380. mutex_unlock(&qp->mutex);
  1381. return err;
  1382. }
  1383. static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
  1384. {
  1385. struct mlx5_ib_cq *cq;
  1386. unsigned cur;
  1387. cur = wq->head - wq->tail;
  1388. if (likely(cur + nreq < wq->max_post))
  1389. return 0;
  1390. cq = to_mcq(ib_cq);
  1391. spin_lock(&cq->lock);
  1392. cur = wq->head - wq->tail;
  1393. spin_unlock(&cq->lock);
  1394. return cur + nreq >= wq->max_post;
  1395. }
  1396. static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
  1397. u64 remote_addr, u32 rkey)
  1398. {
  1399. rseg->raddr = cpu_to_be64(remote_addr);
  1400. rseg->rkey = cpu_to_be32(rkey);
  1401. rseg->reserved = 0;
  1402. }
  1403. static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
  1404. {
  1405. if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  1406. aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
  1407. aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
  1408. } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
  1409. aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
  1410. aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
  1411. } else {
  1412. aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
  1413. aseg->compare = 0;
  1414. }
  1415. }
  1416. static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
  1417. struct ib_send_wr *wr)
  1418. {
  1419. aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
  1420. aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
  1421. aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
  1422. aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
  1423. }
  1424. static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
  1425. struct ib_send_wr *wr)
  1426. {
  1427. memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
  1428. dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
  1429. dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
  1430. }
  1431. static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
  1432. {
  1433. dseg->byte_count = cpu_to_be32(sg->length);
  1434. dseg->lkey = cpu_to_be32(sg->lkey);
  1435. dseg->addr = cpu_to_be64(sg->addr);
  1436. }
  1437. static __be16 get_klm_octo(int npages)
  1438. {
  1439. return cpu_to_be16(ALIGN(npages, 8) / 2);
  1440. }
  1441. static __be64 frwr_mkey_mask(void)
  1442. {
  1443. u64 result;
  1444. result = MLX5_MKEY_MASK_LEN |
  1445. MLX5_MKEY_MASK_PAGE_SIZE |
  1446. MLX5_MKEY_MASK_START_ADDR |
  1447. MLX5_MKEY_MASK_EN_RINVAL |
  1448. MLX5_MKEY_MASK_KEY |
  1449. MLX5_MKEY_MASK_LR |
  1450. MLX5_MKEY_MASK_LW |
  1451. MLX5_MKEY_MASK_RR |
  1452. MLX5_MKEY_MASK_RW |
  1453. MLX5_MKEY_MASK_A |
  1454. MLX5_MKEY_MASK_SMALL_FENCE |
  1455. MLX5_MKEY_MASK_FREE;
  1456. return cpu_to_be64(result);
  1457. }
  1458. static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
  1459. struct ib_send_wr *wr, int li)
  1460. {
  1461. memset(umr, 0, sizeof(*umr));
  1462. if (li) {
  1463. umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
  1464. umr->flags = 1 << 7;
  1465. return;
  1466. }
  1467. umr->flags = (1 << 5); /* fail if not free */
  1468. umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
  1469. umr->mkey_mask = frwr_mkey_mask();
  1470. }
  1471. static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
  1472. struct ib_send_wr *wr)
  1473. {
  1474. struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
  1475. u64 mask;
  1476. memset(umr, 0, sizeof(*umr));
  1477. if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
  1478. umr->flags = 1 << 5; /* fail if not free */
  1479. umr->klm_octowords = get_klm_octo(umrwr->npages);
  1480. mask = MLX5_MKEY_MASK_LEN |
  1481. MLX5_MKEY_MASK_PAGE_SIZE |
  1482. MLX5_MKEY_MASK_START_ADDR |
  1483. MLX5_MKEY_MASK_PD |
  1484. MLX5_MKEY_MASK_LR |
  1485. MLX5_MKEY_MASK_LW |
  1486. MLX5_MKEY_MASK_RR |
  1487. MLX5_MKEY_MASK_RW |
  1488. MLX5_MKEY_MASK_A |
  1489. MLX5_MKEY_MASK_FREE;
  1490. umr->mkey_mask = cpu_to_be64(mask);
  1491. } else {
  1492. umr->flags = 2 << 5; /* fail if free */
  1493. mask = MLX5_MKEY_MASK_FREE;
  1494. umr->mkey_mask = cpu_to_be64(mask);
  1495. }
  1496. if (!wr->num_sge)
  1497. umr->flags |= (1 << 7); /* inline */
  1498. }
  1499. static u8 get_umr_flags(int acc)
  1500. {
  1501. return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
  1502. (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
  1503. (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
  1504. (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
  1505. MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
  1506. }
  1507. static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
  1508. int li, int *writ)
  1509. {
  1510. memset(seg, 0, sizeof(*seg));
  1511. if (li) {
  1512. seg->status = 1 << 6;
  1513. return;
  1514. }
  1515. seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags);
  1516. *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
  1517. seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
  1518. seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
  1519. seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
  1520. seg->len = cpu_to_be64(wr->wr.fast_reg.length);
  1521. seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
  1522. seg->log2_page_size = wr->wr.fast_reg.page_shift;
  1523. }
  1524. static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
  1525. {
  1526. memset(seg, 0, sizeof(*seg));
  1527. if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
  1528. seg->status = 1 << 6;
  1529. return;
  1530. }
  1531. seg->flags = convert_access(wr->wr.fast_reg.access_flags);
  1532. seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
  1533. seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
  1534. seg->len = cpu_to_be64(wr->wr.fast_reg.length);
  1535. seg->log2_page_size = wr->wr.fast_reg.page_shift;
  1536. seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  1537. }
  1538. static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
  1539. struct ib_send_wr *wr,
  1540. struct mlx5_core_dev *mdev,
  1541. struct mlx5_ib_pd *pd,
  1542. int writ)
  1543. {
  1544. struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
  1545. u64 *page_list = wr->wr.fast_reg.page_list->page_list;
  1546. u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
  1547. int i;
  1548. for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
  1549. mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
  1550. dseg->addr = cpu_to_be64(mfrpl->map);
  1551. dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
  1552. dseg->lkey = cpu_to_be32(pd->pa_lkey);
  1553. }
  1554. static __be32 send_ieth(struct ib_send_wr *wr)
  1555. {
  1556. switch (wr->opcode) {
  1557. case IB_WR_SEND_WITH_IMM:
  1558. case IB_WR_RDMA_WRITE_WITH_IMM:
  1559. return wr->ex.imm_data;
  1560. case IB_WR_SEND_WITH_INV:
  1561. return cpu_to_be32(wr->ex.invalidate_rkey);
  1562. default:
  1563. return 0;
  1564. }
  1565. }
  1566. static u8 calc_sig(void *wqe, int size)
  1567. {
  1568. u8 *p = wqe;
  1569. u8 res = 0;
  1570. int i;
  1571. for (i = 0; i < size; i++)
  1572. res ^= p[i];
  1573. return ~res;
  1574. }
  1575. static u8 wq_sig(void *wqe)
  1576. {
  1577. return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
  1578. }
  1579. static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
  1580. void *wqe, int *sz)
  1581. {
  1582. struct mlx5_wqe_inline_seg *seg;
  1583. void *qend = qp->sq.qend;
  1584. void *addr;
  1585. int inl = 0;
  1586. int copy;
  1587. int len;
  1588. int i;
  1589. seg = wqe;
  1590. wqe += sizeof(*seg);
  1591. for (i = 0; i < wr->num_sge; i++) {
  1592. addr = (void *)(unsigned long)(wr->sg_list[i].addr);
  1593. len = wr->sg_list[i].length;
  1594. inl += len;
  1595. if (unlikely(inl > qp->max_inline_data))
  1596. return -ENOMEM;
  1597. if (unlikely(wqe + len > qend)) {
  1598. copy = qend - wqe;
  1599. memcpy(wqe, addr, copy);
  1600. addr += copy;
  1601. len -= copy;
  1602. wqe = mlx5_get_send_wqe(qp, 0);
  1603. }
  1604. memcpy(wqe, addr, len);
  1605. wqe += len;
  1606. }
  1607. seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
  1608. *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
  1609. return 0;
  1610. }
  1611. static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
  1612. struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
  1613. {
  1614. int writ = 0;
  1615. int li;
  1616. li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
  1617. if (unlikely(wr->send_flags & IB_SEND_INLINE))
  1618. return -EINVAL;
  1619. set_frwr_umr_segment(*seg, wr, li);
  1620. *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
  1621. *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
  1622. if (unlikely((*seg == qp->sq.qend)))
  1623. *seg = mlx5_get_send_wqe(qp, 0);
  1624. set_mkey_segment(*seg, wr, li, &writ);
  1625. *seg += sizeof(struct mlx5_mkey_seg);
  1626. *size += sizeof(struct mlx5_mkey_seg) / 16;
  1627. if (unlikely((*seg == qp->sq.qend)))
  1628. *seg = mlx5_get_send_wqe(qp, 0);
  1629. if (!li) {
  1630. set_frwr_pages(*seg, wr, mdev, pd, writ);
  1631. *seg += sizeof(struct mlx5_wqe_data_seg);
  1632. *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
  1633. }
  1634. return 0;
  1635. }
  1636. static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
  1637. {
  1638. __be32 *p = NULL;
  1639. int tidx = idx;
  1640. int i, j;
  1641. pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
  1642. for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
  1643. if ((i & 0xf) == 0) {
  1644. void *buf = mlx5_get_send_wqe(qp, tidx);
  1645. tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
  1646. p = buf;
  1647. j = 0;
  1648. }
  1649. pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
  1650. be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
  1651. be32_to_cpu(p[j + 3]));
  1652. }
  1653. }
  1654. static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
  1655. unsigned bytecnt, struct mlx5_ib_qp *qp)
  1656. {
  1657. while (bytecnt > 0) {
  1658. __iowrite64_copy(dst++, src++, 8);
  1659. __iowrite64_copy(dst++, src++, 8);
  1660. __iowrite64_copy(dst++, src++, 8);
  1661. __iowrite64_copy(dst++, src++, 8);
  1662. __iowrite64_copy(dst++, src++, 8);
  1663. __iowrite64_copy(dst++, src++, 8);
  1664. __iowrite64_copy(dst++, src++, 8);
  1665. __iowrite64_copy(dst++, src++, 8);
  1666. bytecnt -= 64;
  1667. if (unlikely(src == qp->sq.qend))
  1668. src = mlx5_get_send_wqe(qp, 0);
  1669. }
  1670. }
  1671. static u8 get_fence(u8 fence, struct ib_send_wr *wr)
  1672. {
  1673. if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
  1674. wr->send_flags & IB_SEND_FENCE))
  1675. return MLX5_FENCE_MODE_STRONG_ORDERING;
  1676. if (unlikely(fence)) {
  1677. if (wr->send_flags & IB_SEND_FENCE)
  1678. return MLX5_FENCE_MODE_SMALL_AND_FENCE;
  1679. else
  1680. return fence;
  1681. } else {
  1682. return 0;
  1683. }
  1684. }
  1685. int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  1686. struct ib_send_wr **bad_wr)
  1687. {
  1688. struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
  1689. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  1690. struct mlx5_core_dev *mdev = &dev->mdev;
  1691. struct mlx5_ib_qp *qp = to_mqp(ibqp);
  1692. struct mlx5_wqe_data_seg *dpseg;
  1693. struct mlx5_wqe_xrc_seg *xrc;
  1694. struct mlx5_bf *bf = qp->bf;
  1695. int uninitialized_var(size);
  1696. void *qend = qp->sq.qend;
  1697. unsigned long flags;
  1698. u32 mlx5_opcode;
  1699. unsigned idx;
  1700. int err = 0;
  1701. int inl = 0;
  1702. int num_sge;
  1703. void *seg;
  1704. int nreq;
  1705. int i;
  1706. u8 next_fence = 0;
  1707. u8 opmod = 0;
  1708. u8 fence;
  1709. spin_lock_irqsave(&qp->sq.lock, flags);
  1710. for (nreq = 0; wr; nreq++, wr = wr->next) {
  1711. if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
  1712. mlx5_ib_warn(dev, "\n");
  1713. err = -EINVAL;
  1714. *bad_wr = wr;
  1715. goto out;
  1716. }
  1717. if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
  1718. mlx5_ib_warn(dev, "\n");
  1719. err = -ENOMEM;
  1720. *bad_wr = wr;
  1721. goto out;
  1722. }
  1723. fence = qp->fm_cache;
  1724. num_sge = wr->num_sge;
  1725. if (unlikely(num_sge > qp->sq.max_gs)) {
  1726. mlx5_ib_warn(dev, "\n");
  1727. err = -ENOMEM;
  1728. *bad_wr = wr;
  1729. goto out;
  1730. }
  1731. idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
  1732. seg = mlx5_get_send_wqe(qp, idx);
  1733. ctrl = seg;
  1734. *(uint32_t *)(seg + 8) = 0;
  1735. ctrl->imm = send_ieth(wr);
  1736. ctrl->fm_ce_se = qp->sq_signal_bits |
  1737. (wr->send_flags & IB_SEND_SIGNALED ?
  1738. MLX5_WQE_CTRL_CQ_UPDATE : 0) |
  1739. (wr->send_flags & IB_SEND_SOLICITED ?
  1740. MLX5_WQE_CTRL_SOLICITED : 0);
  1741. seg += sizeof(*ctrl);
  1742. size = sizeof(*ctrl) / 16;
  1743. switch (ibqp->qp_type) {
  1744. case IB_QPT_XRC_INI:
  1745. xrc = seg;
  1746. xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
  1747. seg += sizeof(*xrc);
  1748. size += sizeof(*xrc) / 16;
  1749. /* fall through */
  1750. case IB_QPT_RC:
  1751. switch (wr->opcode) {
  1752. case IB_WR_RDMA_READ:
  1753. case IB_WR_RDMA_WRITE:
  1754. case IB_WR_RDMA_WRITE_WITH_IMM:
  1755. set_raddr_seg(seg, wr->wr.rdma.remote_addr,
  1756. wr->wr.rdma.rkey);
  1757. seg += sizeof(struct mlx5_wqe_raddr_seg);
  1758. size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
  1759. break;
  1760. case IB_WR_ATOMIC_CMP_AND_SWP:
  1761. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1762. set_raddr_seg(seg, wr->wr.atomic.remote_addr,
  1763. wr->wr.atomic.rkey);
  1764. seg += sizeof(struct mlx5_wqe_raddr_seg);
  1765. set_atomic_seg(seg, wr);
  1766. seg += sizeof(struct mlx5_wqe_atomic_seg);
  1767. size += (sizeof(struct mlx5_wqe_raddr_seg) +
  1768. sizeof(struct mlx5_wqe_atomic_seg)) / 16;
  1769. break;
  1770. case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
  1771. set_raddr_seg(seg, wr->wr.atomic.remote_addr,
  1772. wr->wr.atomic.rkey);
  1773. seg += sizeof(struct mlx5_wqe_raddr_seg);
  1774. set_masked_atomic_seg(seg, wr);
  1775. seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
  1776. size += (sizeof(struct mlx5_wqe_raddr_seg) +
  1777. sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
  1778. break;
  1779. case IB_WR_LOCAL_INV:
  1780. next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
  1781. qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
  1782. ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
  1783. err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
  1784. if (err) {
  1785. mlx5_ib_warn(dev, "\n");
  1786. *bad_wr = wr;
  1787. goto out;
  1788. }
  1789. num_sge = 0;
  1790. break;
  1791. case IB_WR_FAST_REG_MR:
  1792. next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
  1793. qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
  1794. ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
  1795. err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
  1796. if (err) {
  1797. mlx5_ib_warn(dev, "\n");
  1798. *bad_wr = wr;
  1799. goto out;
  1800. }
  1801. num_sge = 0;
  1802. break;
  1803. default:
  1804. break;
  1805. }
  1806. break;
  1807. case IB_QPT_UC:
  1808. switch (wr->opcode) {
  1809. case IB_WR_RDMA_WRITE:
  1810. case IB_WR_RDMA_WRITE_WITH_IMM:
  1811. set_raddr_seg(seg, wr->wr.rdma.remote_addr,
  1812. wr->wr.rdma.rkey);
  1813. seg += sizeof(struct mlx5_wqe_raddr_seg);
  1814. size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
  1815. break;
  1816. default:
  1817. break;
  1818. }
  1819. break;
  1820. case IB_QPT_UD:
  1821. case IB_QPT_SMI:
  1822. case IB_QPT_GSI:
  1823. set_datagram_seg(seg, wr);
  1824. seg += sizeof(struct mlx5_wqe_datagram_seg);
  1825. size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
  1826. if (unlikely((seg == qend)))
  1827. seg = mlx5_get_send_wqe(qp, 0);
  1828. break;
  1829. case MLX5_IB_QPT_REG_UMR:
  1830. if (wr->opcode != MLX5_IB_WR_UMR) {
  1831. err = -EINVAL;
  1832. mlx5_ib_warn(dev, "bad opcode\n");
  1833. goto out;
  1834. }
  1835. qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
  1836. ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
  1837. set_reg_umr_segment(seg, wr);
  1838. seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
  1839. size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
  1840. if (unlikely((seg == qend)))
  1841. seg = mlx5_get_send_wqe(qp, 0);
  1842. set_reg_mkey_segment(seg, wr);
  1843. seg += sizeof(struct mlx5_mkey_seg);
  1844. size += sizeof(struct mlx5_mkey_seg) / 16;
  1845. if (unlikely((seg == qend)))
  1846. seg = mlx5_get_send_wqe(qp, 0);
  1847. break;
  1848. default:
  1849. break;
  1850. }
  1851. if (wr->send_flags & IB_SEND_INLINE && num_sge) {
  1852. int uninitialized_var(sz);
  1853. err = set_data_inl_seg(qp, wr, seg, &sz);
  1854. if (unlikely(err)) {
  1855. mlx5_ib_warn(dev, "\n");
  1856. *bad_wr = wr;
  1857. goto out;
  1858. }
  1859. inl = 1;
  1860. size += sz;
  1861. } else {
  1862. dpseg = seg;
  1863. for (i = 0; i < num_sge; i++) {
  1864. if (unlikely(dpseg == qend)) {
  1865. seg = mlx5_get_send_wqe(qp, 0);
  1866. dpseg = seg;
  1867. }
  1868. if (likely(wr->sg_list[i].length)) {
  1869. set_data_ptr_seg(dpseg, wr->sg_list + i);
  1870. size += sizeof(struct mlx5_wqe_data_seg) / 16;
  1871. dpseg++;
  1872. }
  1873. }
  1874. }
  1875. mlx5_opcode = mlx5_ib_opcode[wr->opcode];
  1876. ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
  1877. mlx5_opcode |
  1878. ((u32)opmod << 24));
  1879. ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
  1880. ctrl->fm_ce_se |= get_fence(fence, wr);
  1881. qp->fm_cache = next_fence;
  1882. if (unlikely(qp->wq_sig))
  1883. ctrl->signature = wq_sig(ctrl);
  1884. qp->sq.wrid[idx] = wr->wr_id;
  1885. qp->sq.w_list[idx].opcode = mlx5_opcode;
  1886. qp->sq.wqe_head[idx] = qp->sq.head + nreq;
  1887. qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
  1888. qp->sq.w_list[idx].next = qp->sq.cur_post;
  1889. if (0)
  1890. dump_wqe(qp, idx, size);
  1891. }
  1892. out:
  1893. if (likely(nreq)) {
  1894. qp->sq.head += nreq;
  1895. /* Make sure that descriptors are written before
  1896. * updating doorbell record and ringing the doorbell
  1897. */
  1898. wmb();
  1899. qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
  1900. if (bf->need_lock)
  1901. spin_lock(&bf->lock);
  1902. /* TBD enable WC */
  1903. if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
  1904. mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
  1905. /* wc_wmb(); */
  1906. } else {
  1907. mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
  1908. MLX5_GET_DOORBELL_LOCK(&bf->lock32));
  1909. /* Make sure doorbells don't leak out of SQ spinlock
  1910. * and reach the HCA out of order.
  1911. */
  1912. mmiowb();
  1913. }
  1914. bf->offset ^= bf->buf_size;
  1915. if (bf->need_lock)
  1916. spin_unlock(&bf->lock);
  1917. }
  1918. spin_unlock_irqrestore(&qp->sq.lock, flags);
  1919. return err;
  1920. }
  1921. static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
  1922. {
  1923. sig->signature = calc_sig(sig, size);
  1924. }
  1925. int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  1926. struct ib_recv_wr **bad_wr)
  1927. {
  1928. struct mlx5_ib_qp *qp = to_mqp(ibqp);
  1929. struct mlx5_wqe_data_seg *scat;
  1930. struct mlx5_rwqe_sig *sig;
  1931. unsigned long flags;
  1932. int err = 0;
  1933. int nreq;
  1934. int ind;
  1935. int i;
  1936. spin_lock_irqsave(&qp->rq.lock, flags);
  1937. ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
  1938. for (nreq = 0; wr; nreq++, wr = wr->next) {
  1939. if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
  1940. err = -ENOMEM;
  1941. *bad_wr = wr;
  1942. goto out;
  1943. }
  1944. if (unlikely(wr->num_sge > qp->rq.max_gs)) {
  1945. err = -EINVAL;
  1946. *bad_wr = wr;
  1947. goto out;
  1948. }
  1949. scat = get_recv_wqe(qp, ind);
  1950. if (qp->wq_sig)
  1951. scat++;
  1952. for (i = 0; i < wr->num_sge; i++)
  1953. set_data_ptr_seg(scat + i, wr->sg_list + i);
  1954. if (i < qp->rq.max_gs) {
  1955. scat[i].byte_count = 0;
  1956. scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
  1957. scat[i].addr = 0;
  1958. }
  1959. if (qp->wq_sig) {
  1960. sig = (struct mlx5_rwqe_sig *)scat;
  1961. set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
  1962. }
  1963. qp->rq.wrid[ind] = wr->wr_id;
  1964. ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
  1965. }
  1966. out:
  1967. if (likely(nreq)) {
  1968. qp->rq.head += nreq;
  1969. /* Make sure that descriptors are written before
  1970. * doorbell record.
  1971. */
  1972. wmb();
  1973. *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
  1974. }
  1975. spin_unlock_irqrestore(&qp->rq.lock, flags);
  1976. return err;
  1977. }
  1978. static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
  1979. {
  1980. switch (mlx5_state) {
  1981. case MLX5_QP_STATE_RST: return IB_QPS_RESET;
  1982. case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
  1983. case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
  1984. case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
  1985. case MLX5_QP_STATE_SQ_DRAINING:
  1986. case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
  1987. case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
  1988. case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
  1989. default: return -1;
  1990. }
  1991. }
  1992. static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
  1993. {
  1994. switch (mlx5_mig_state) {
  1995. case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
  1996. case MLX5_QP_PM_REARM: return IB_MIG_REARM;
  1997. case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
  1998. default: return -1;
  1999. }
  2000. }
  2001. static int to_ib_qp_access_flags(int mlx5_flags)
  2002. {
  2003. int ib_flags = 0;
  2004. if (mlx5_flags & MLX5_QP_BIT_RRE)
  2005. ib_flags |= IB_ACCESS_REMOTE_READ;
  2006. if (mlx5_flags & MLX5_QP_BIT_RWE)
  2007. ib_flags |= IB_ACCESS_REMOTE_WRITE;
  2008. if (mlx5_flags & MLX5_QP_BIT_RAE)
  2009. ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
  2010. return ib_flags;
  2011. }
  2012. static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
  2013. struct mlx5_qp_path *path)
  2014. {
  2015. struct mlx5_core_dev *dev = &ibdev->mdev;
  2016. memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
  2017. ib_ah_attr->port_num = path->port;
  2018. if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
  2019. return;
  2020. ib_ah_attr->sl = path->sl & 0xf;
  2021. ib_ah_attr->dlid = be16_to_cpu(path->rlid);
  2022. ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
  2023. ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
  2024. ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
  2025. if (ib_ah_attr->ah_flags) {
  2026. ib_ah_attr->grh.sgid_index = path->mgid_index;
  2027. ib_ah_attr->grh.hop_limit = path->hop_limit;
  2028. ib_ah_attr->grh.traffic_class =
  2029. (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
  2030. ib_ah_attr->grh.flow_label =
  2031. be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
  2032. memcpy(ib_ah_attr->grh.dgid.raw,
  2033. path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
  2034. }
  2035. }
  2036. int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
  2037. struct ib_qp_init_attr *qp_init_attr)
  2038. {
  2039. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  2040. struct mlx5_ib_qp *qp = to_mqp(ibqp);
  2041. struct mlx5_query_qp_mbox_out *outb;
  2042. struct mlx5_qp_context *context;
  2043. int mlx5_state;
  2044. int err = 0;
  2045. mutex_lock(&qp->mutex);
  2046. outb = kzalloc(sizeof(*outb), GFP_KERNEL);
  2047. if (!outb) {
  2048. err = -ENOMEM;
  2049. goto out;
  2050. }
  2051. context = &outb->ctx;
  2052. err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
  2053. if (err)
  2054. goto out_free;
  2055. mlx5_state = be32_to_cpu(context->flags) >> 28;
  2056. qp->state = to_ib_qp_state(mlx5_state);
  2057. qp_attr->qp_state = qp->state;
  2058. qp_attr->path_mtu = context->mtu_msgmax >> 5;
  2059. qp_attr->path_mig_state =
  2060. to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
  2061. qp_attr->qkey = be32_to_cpu(context->qkey);
  2062. qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
  2063. qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
  2064. qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
  2065. qp_attr->qp_access_flags =
  2066. to_ib_qp_access_flags(be32_to_cpu(context->params2));
  2067. if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
  2068. to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
  2069. to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
  2070. qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
  2071. qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
  2072. }
  2073. qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
  2074. qp_attr->port_num = context->pri_path.port;
  2075. /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
  2076. qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
  2077. qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
  2078. qp_attr->max_dest_rd_atomic =
  2079. 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
  2080. qp_attr->min_rnr_timer =
  2081. (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
  2082. qp_attr->timeout = context->pri_path.ackto_lt >> 3;
  2083. qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
  2084. qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
  2085. qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
  2086. qp_attr->cur_qp_state = qp_attr->qp_state;
  2087. qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
  2088. qp_attr->cap.max_recv_sge = qp->rq.max_gs;
  2089. if (!ibqp->uobject) {
  2090. qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
  2091. qp_attr->cap.max_send_sge = qp->sq.max_gs;
  2092. } else {
  2093. qp_attr->cap.max_send_wr = 0;
  2094. qp_attr->cap.max_send_sge = 0;
  2095. }
  2096. /* We don't support inline sends for kernel QPs (yet), and we
  2097. * don't know what userspace's value should be.
  2098. */
  2099. qp_attr->cap.max_inline_data = 0;
  2100. qp_init_attr->cap = qp_attr->cap;
  2101. qp_init_attr->create_flags = 0;
  2102. if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
  2103. qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
  2104. qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
  2105. IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
  2106. out_free:
  2107. kfree(outb);
  2108. out:
  2109. mutex_unlock(&qp->mutex);
  2110. return err;
  2111. }
  2112. struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
  2113. struct ib_ucontext *context,
  2114. struct ib_udata *udata)
  2115. {
  2116. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2117. struct mlx5_ib_xrcd *xrcd;
  2118. int err;
  2119. if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
  2120. return ERR_PTR(-ENOSYS);
  2121. xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
  2122. if (!xrcd)
  2123. return ERR_PTR(-ENOMEM);
  2124. err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
  2125. if (err) {
  2126. kfree(xrcd);
  2127. return ERR_PTR(-ENOMEM);
  2128. }
  2129. return &xrcd->ibxrcd;
  2130. }
  2131. int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  2132. {
  2133. struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
  2134. u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
  2135. int err;
  2136. err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
  2137. if (err) {
  2138. mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
  2139. return err;
  2140. }
  2141. kfree(xrcd);
  2142. return 0;
  2143. }