mthca_qp.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
  36. */
  37. #include <linux/init.h>
  38. #include <rdma/ib_verbs.h>
  39. #include <rdma/ib_cache.h>
  40. #include <rdma/ib_pack.h>
  41. #include "mthca_dev.h"
  42. #include "mthca_cmd.h"
  43. #include "mthca_memfree.h"
  44. #include "mthca_wqe.h"
  45. enum {
  46. MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
  47. MTHCA_ACK_REQ_FREQ = 10,
  48. MTHCA_FLIGHT_LIMIT = 9,
  49. MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
  50. MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
  51. MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
  52. };
  53. enum {
  54. MTHCA_QP_STATE_RST = 0,
  55. MTHCA_QP_STATE_INIT = 1,
  56. MTHCA_QP_STATE_RTR = 2,
  57. MTHCA_QP_STATE_RTS = 3,
  58. MTHCA_QP_STATE_SQE = 4,
  59. MTHCA_QP_STATE_SQD = 5,
  60. MTHCA_QP_STATE_ERR = 6,
  61. MTHCA_QP_STATE_DRAINING = 7
  62. };
  63. enum {
  64. MTHCA_QP_ST_RC = 0x0,
  65. MTHCA_QP_ST_UC = 0x1,
  66. MTHCA_QP_ST_RD = 0x2,
  67. MTHCA_QP_ST_UD = 0x3,
  68. MTHCA_QP_ST_MLX = 0x7
  69. };
  70. enum {
  71. MTHCA_QP_PM_MIGRATED = 0x3,
  72. MTHCA_QP_PM_ARMED = 0x0,
  73. MTHCA_QP_PM_REARM = 0x1
  74. };
  75. enum {
  76. /* qp_context flags */
  77. MTHCA_QP_BIT_DE = 1 << 8,
  78. /* params1 */
  79. MTHCA_QP_BIT_SRE = 1 << 15,
  80. MTHCA_QP_BIT_SWE = 1 << 14,
  81. MTHCA_QP_BIT_SAE = 1 << 13,
  82. MTHCA_QP_BIT_SIC = 1 << 4,
  83. MTHCA_QP_BIT_SSC = 1 << 3,
  84. /* params2 */
  85. MTHCA_QP_BIT_RRE = 1 << 15,
  86. MTHCA_QP_BIT_RWE = 1 << 14,
  87. MTHCA_QP_BIT_RAE = 1 << 13,
  88. MTHCA_QP_BIT_RIC = 1 << 4,
  89. MTHCA_QP_BIT_RSC = 1 << 3
  90. };
  91. struct mthca_qp_path {
  92. __be32 port_pkey;
  93. u8 rnr_retry;
  94. u8 g_mylmc;
  95. __be16 rlid;
  96. u8 ackto;
  97. u8 mgid_index;
  98. u8 static_rate;
  99. u8 hop_limit;
  100. __be32 sl_tclass_flowlabel;
  101. u8 rgid[16];
  102. } __attribute__((packed));
  103. struct mthca_qp_context {
  104. __be32 flags;
  105. __be32 tavor_sched_queue; /* Reserved on Arbel */
  106. u8 mtu_msgmax;
  107. u8 rq_size_stride; /* Reserved on Tavor */
  108. u8 sq_size_stride; /* Reserved on Tavor */
  109. u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
  110. __be32 usr_page;
  111. __be32 local_qpn;
  112. __be32 remote_qpn;
  113. u32 reserved1[2];
  114. struct mthca_qp_path pri_path;
  115. struct mthca_qp_path alt_path;
  116. __be32 rdd;
  117. __be32 pd;
  118. __be32 wqe_base;
  119. __be32 wqe_lkey;
  120. __be32 params1;
  121. __be32 reserved2;
  122. __be32 next_send_psn;
  123. __be32 cqn_snd;
  124. __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
  125. __be32 snd_db_index; /* (debugging only entries) */
  126. __be32 last_acked_psn;
  127. __be32 ssn;
  128. __be32 params2;
  129. __be32 rnr_nextrecvpsn;
  130. __be32 ra_buff_indx;
  131. __be32 cqn_rcv;
  132. __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
  133. __be32 rcv_db_index; /* (debugging only entries) */
  134. __be32 qkey;
  135. __be32 srqn;
  136. __be32 rmsn;
  137. __be16 rq_wqe_counter; /* reserved on Tavor */
  138. __be16 sq_wqe_counter; /* reserved on Tavor */
  139. u32 reserved3[18];
  140. } __attribute__((packed));
  141. struct mthca_qp_param {
  142. __be32 opt_param_mask;
  143. u32 reserved1;
  144. struct mthca_qp_context context;
  145. u32 reserved2[62];
  146. } __attribute__((packed));
  147. enum {
  148. MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
  149. MTHCA_QP_OPTPAR_RRE = 1 << 1,
  150. MTHCA_QP_OPTPAR_RAE = 1 << 2,
  151. MTHCA_QP_OPTPAR_RWE = 1 << 3,
  152. MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
  153. MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
  154. MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
  155. MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
  156. MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
  157. MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
  158. MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
  159. MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
  160. MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
  161. MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
  162. MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
  163. MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
  164. MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
  165. };
  166. static const u8 mthca_opcode[] = {
  167. [IB_WR_SEND] = MTHCA_OPCODE_SEND,
  168. [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
  169. [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
  170. [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
  171. [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
  172. [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
  173. [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
  174. };
  175. static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
  176. {
  177. return qp->qpn >= dev->qp_table.sqp_start &&
  178. qp->qpn <= dev->qp_table.sqp_start + 3;
  179. }
  180. static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
  181. {
  182. return qp->qpn >= dev->qp_table.sqp_start &&
  183. qp->qpn <= dev->qp_table.sqp_start + 1;
  184. }
  185. static void *get_recv_wqe(struct mthca_qp *qp, int n)
  186. {
  187. if (qp->is_direct)
  188. return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
  189. else
  190. return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
  191. ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
  192. }
  193. static void *get_send_wqe(struct mthca_qp *qp, int n)
  194. {
  195. if (qp->is_direct)
  196. return qp->queue.direct.buf + qp->send_wqe_offset +
  197. (n << qp->sq.wqe_shift);
  198. else
  199. return qp->queue.page_list[(qp->send_wqe_offset +
  200. (n << qp->sq.wqe_shift)) >>
  201. PAGE_SHIFT].buf +
  202. ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
  203. (PAGE_SIZE - 1));
  204. }
  205. static void mthca_wq_init(struct mthca_wq *wq)
  206. {
  207. spin_lock_init(&wq->lock);
  208. wq->next_ind = 0;
  209. wq->last_comp = wq->max - 1;
  210. wq->head = 0;
  211. wq->tail = 0;
  212. }
  213. void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
  214. enum ib_event_type event_type)
  215. {
  216. struct mthca_qp *qp;
  217. struct ib_event event;
  218. spin_lock(&dev->qp_table.lock);
  219. qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
  220. if (qp)
  221. atomic_inc(&qp->refcount);
  222. spin_unlock(&dev->qp_table.lock);
  223. if (!qp) {
  224. mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
  225. return;
  226. }
  227. event.device = &dev->ib_dev;
  228. event.event = event_type;
  229. event.element.qp = &qp->ibqp;
  230. if (qp->ibqp.event_handler)
  231. qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
  232. if (atomic_dec_and_test(&qp->refcount))
  233. wake_up(&qp->wait);
  234. }
  235. static int to_mthca_state(enum ib_qp_state ib_state)
  236. {
  237. switch (ib_state) {
  238. case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
  239. case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
  240. case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
  241. case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
  242. case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
  243. case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
  244. case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
  245. default: return -1;
  246. }
  247. }
  248. enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
  249. static int to_mthca_st(int transport)
  250. {
  251. switch (transport) {
  252. case RC: return MTHCA_QP_ST_RC;
  253. case UC: return MTHCA_QP_ST_UC;
  254. case UD: return MTHCA_QP_ST_UD;
  255. case RD: return MTHCA_QP_ST_RD;
  256. case MLX: return MTHCA_QP_ST_MLX;
  257. default: return -1;
  258. }
  259. }
  260. static const struct {
  261. int trans;
  262. u32 req_param[NUM_TRANS];
  263. u32 opt_param[NUM_TRANS];
  264. } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
  265. [IB_QPS_RESET] = {
  266. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  267. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
  268. [IB_QPS_INIT] = {
  269. .trans = MTHCA_TRANS_RST2INIT,
  270. .req_param = {
  271. [UD] = (IB_QP_PKEY_INDEX |
  272. IB_QP_PORT |
  273. IB_QP_QKEY),
  274. [UC] = (IB_QP_PKEY_INDEX |
  275. IB_QP_PORT |
  276. IB_QP_ACCESS_FLAGS),
  277. [RC] = (IB_QP_PKEY_INDEX |
  278. IB_QP_PORT |
  279. IB_QP_ACCESS_FLAGS),
  280. [MLX] = (IB_QP_PKEY_INDEX |
  281. IB_QP_QKEY),
  282. },
  283. /* bug-for-bug compatibility with VAPI: */
  284. .opt_param = {
  285. [MLX] = IB_QP_PORT
  286. }
  287. },
  288. },
  289. [IB_QPS_INIT] = {
  290. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  291. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
  292. [IB_QPS_INIT] = {
  293. .trans = MTHCA_TRANS_INIT2INIT,
  294. .opt_param = {
  295. [UD] = (IB_QP_PKEY_INDEX |
  296. IB_QP_PORT |
  297. IB_QP_QKEY),
  298. [UC] = (IB_QP_PKEY_INDEX |
  299. IB_QP_PORT |
  300. IB_QP_ACCESS_FLAGS),
  301. [RC] = (IB_QP_PKEY_INDEX |
  302. IB_QP_PORT |
  303. IB_QP_ACCESS_FLAGS),
  304. [MLX] = (IB_QP_PKEY_INDEX |
  305. IB_QP_QKEY),
  306. }
  307. },
  308. [IB_QPS_RTR] = {
  309. .trans = MTHCA_TRANS_INIT2RTR,
  310. .req_param = {
  311. [UC] = (IB_QP_AV |
  312. IB_QP_PATH_MTU |
  313. IB_QP_DEST_QPN |
  314. IB_QP_RQ_PSN |
  315. IB_QP_MAX_DEST_RD_ATOMIC),
  316. [RC] = (IB_QP_AV |
  317. IB_QP_PATH_MTU |
  318. IB_QP_DEST_QPN |
  319. IB_QP_RQ_PSN |
  320. IB_QP_MAX_DEST_RD_ATOMIC |
  321. IB_QP_MIN_RNR_TIMER),
  322. },
  323. .opt_param = {
  324. [UD] = (IB_QP_PKEY_INDEX |
  325. IB_QP_QKEY),
  326. [UC] = (IB_QP_ALT_PATH |
  327. IB_QP_ACCESS_FLAGS |
  328. IB_QP_PKEY_INDEX),
  329. [RC] = (IB_QP_ALT_PATH |
  330. IB_QP_ACCESS_FLAGS |
  331. IB_QP_PKEY_INDEX),
  332. [MLX] = (IB_QP_PKEY_INDEX |
  333. IB_QP_QKEY),
  334. }
  335. }
  336. },
  337. [IB_QPS_RTR] = {
  338. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  339. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
  340. [IB_QPS_RTS] = {
  341. .trans = MTHCA_TRANS_RTR2RTS,
  342. .req_param = {
  343. [UD] = IB_QP_SQ_PSN,
  344. [UC] = (IB_QP_SQ_PSN |
  345. IB_QP_MAX_QP_RD_ATOMIC),
  346. [RC] = (IB_QP_TIMEOUT |
  347. IB_QP_RETRY_CNT |
  348. IB_QP_RNR_RETRY |
  349. IB_QP_SQ_PSN |
  350. IB_QP_MAX_QP_RD_ATOMIC),
  351. [MLX] = IB_QP_SQ_PSN,
  352. },
  353. .opt_param = {
  354. [UD] = (IB_QP_CUR_STATE |
  355. IB_QP_QKEY),
  356. [UC] = (IB_QP_CUR_STATE |
  357. IB_QP_ALT_PATH |
  358. IB_QP_ACCESS_FLAGS |
  359. IB_QP_PKEY_INDEX |
  360. IB_QP_PATH_MIG_STATE),
  361. [RC] = (IB_QP_CUR_STATE |
  362. IB_QP_ALT_PATH |
  363. IB_QP_ACCESS_FLAGS |
  364. IB_QP_PKEY_INDEX |
  365. IB_QP_MIN_RNR_TIMER |
  366. IB_QP_PATH_MIG_STATE),
  367. [MLX] = (IB_QP_CUR_STATE |
  368. IB_QP_QKEY),
  369. }
  370. }
  371. },
  372. [IB_QPS_RTS] = {
  373. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  374. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
  375. [IB_QPS_RTS] = {
  376. .trans = MTHCA_TRANS_RTS2RTS,
  377. .opt_param = {
  378. [UD] = (IB_QP_CUR_STATE |
  379. IB_QP_QKEY),
  380. [UC] = (IB_QP_ACCESS_FLAGS |
  381. IB_QP_ALT_PATH |
  382. IB_QP_PATH_MIG_STATE),
  383. [RC] = (IB_QP_ACCESS_FLAGS |
  384. IB_QP_ALT_PATH |
  385. IB_QP_PATH_MIG_STATE |
  386. IB_QP_MIN_RNR_TIMER),
  387. [MLX] = (IB_QP_CUR_STATE |
  388. IB_QP_QKEY),
  389. }
  390. },
  391. [IB_QPS_SQD] = {
  392. .trans = MTHCA_TRANS_RTS2SQD,
  393. },
  394. },
  395. [IB_QPS_SQD] = {
  396. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  397. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
  398. [IB_QPS_RTS] = {
  399. .trans = MTHCA_TRANS_SQD2RTS,
  400. .opt_param = {
  401. [UD] = (IB_QP_CUR_STATE |
  402. IB_QP_QKEY),
  403. [UC] = (IB_QP_CUR_STATE |
  404. IB_QP_ALT_PATH |
  405. IB_QP_ACCESS_FLAGS |
  406. IB_QP_PATH_MIG_STATE),
  407. [RC] = (IB_QP_CUR_STATE |
  408. IB_QP_ALT_PATH |
  409. IB_QP_ACCESS_FLAGS |
  410. IB_QP_MIN_RNR_TIMER |
  411. IB_QP_PATH_MIG_STATE),
  412. [MLX] = (IB_QP_CUR_STATE |
  413. IB_QP_QKEY),
  414. }
  415. },
  416. [IB_QPS_SQD] = {
  417. .trans = MTHCA_TRANS_SQD2SQD,
  418. .opt_param = {
  419. [UD] = (IB_QP_PKEY_INDEX |
  420. IB_QP_QKEY),
  421. [UC] = (IB_QP_AV |
  422. IB_QP_MAX_QP_RD_ATOMIC |
  423. IB_QP_MAX_DEST_RD_ATOMIC |
  424. IB_QP_CUR_STATE |
  425. IB_QP_ALT_PATH |
  426. IB_QP_ACCESS_FLAGS |
  427. IB_QP_PKEY_INDEX |
  428. IB_QP_PATH_MIG_STATE),
  429. [RC] = (IB_QP_AV |
  430. IB_QP_TIMEOUT |
  431. IB_QP_RETRY_CNT |
  432. IB_QP_RNR_RETRY |
  433. IB_QP_MAX_QP_RD_ATOMIC |
  434. IB_QP_MAX_DEST_RD_ATOMIC |
  435. IB_QP_CUR_STATE |
  436. IB_QP_ALT_PATH |
  437. IB_QP_ACCESS_FLAGS |
  438. IB_QP_PKEY_INDEX |
  439. IB_QP_MIN_RNR_TIMER |
  440. IB_QP_PATH_MIG_STATE),
  441. [MLX] = (IB_QP_PKEY_INDEX |
  442. IB_QP_QKEY),
  443. }
  444. }
  445. },
  446. [IB_QPS_SQE] = {
  447. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  448. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
  449. [IB_QPS_RTS] = {
  450. .trans = MTHCA_TRANS_SQERR2RTS,
  451. .opt_param = {
  452. [UD] = (IB_QP_CUR_STATE |
  453. IB_QP_QKEY),
  454. [UC] = (IB_QP_CUR_STATE),
  455. [RC] = (IB_QP_CUR_STATE |
  456. IB_QP_MIN_RNR_TIMER),
  457. [MLX] = (IB_QP_CUR_STATE |
  458. IB_QP_QKEY),
  459. }
  460. }
  461. },
  462. [IB_QPS_ERR] = {
  463. [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
  464. [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
  465. }
  466. };
  467. static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
  468. int attr_mask)
  469. {
  470. if (attr_mask & IB_QP_PKEY_INDEX)
  471. sqp->pkey_index = attr->pkey_index;
  472. if (attr_mask & IB_QP_QKEY)
  473. sqp->qkey = attr->qkey;
  474. if (attr_mask & IB_QP_SQ_PSN)
  475. sqp->send_psn = attr->sq_psn;
  476. }
  477. static void init_port(struct mthca_dev *dev, int port)
  478. {
  479. int err;
  480. u8 status;
  481. struct mthca_init_ib_param param;
  482. memset(&param, 0, sizeof param);
  483. param.port_width = dev->limits.port_width_cap;
  484. param.vl_cap = dev->limits.vl_cap;
  485. param.mtu_cap = dev->limits.mtu_cap;
  486. param.gid_cap = dev->limits.gid_table_len;
  487. param.pkey_cap = dev->limits.pkey_table_len;
  488. err = mthca_INIT_IB(dev, &param, port, &status);
  489. if (err)
  490. mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
  491. if (status)
  492. mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
  493. }
  494. int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
  495. {
  496. struct mthca_dev *dev = to_mdev(ibqp->device);
  497. struct mthca_qp *qp = to_mqp(ibqp);
  498. enum ib_qp_state cur_state, new_state;
  499. struct mthca_mailbox *mailbox;
  500. struct mthca_qp_param *qp_param;
  501. struct mthca_qp_context *qp_context;
  502. u32 req_param, opt_param;
  503. u8 status;
  504. int err;
  505. if (attr_mask & IB_QP_CUR_STATE) {
  506. if (attr->cur_qp_state != IB_QPS_RTR &&
  507. attr->cur_qp_state != IB_QPS_RTS &&
  508. attr->cur_qp_state != IB_QPS_SQD &&
  509. attr->cur_qp_state != IB_QPS_SQE)
  510. return -EINVAL;
  511. else
  512. cur_state = attr->cur_qp_state;
  513. } else {
  514. spin_lock_irq(&qp->sq.lock);
  515. spin_lock(&qp->rq.lock);
  516. cur_state = qp->state;
  517. spin_unlock(&qp->rq.lock);
  518. spin_unlock_irq(&qp->sq.lock);
  519. }
  520. if (attr_mask & IB_QP_STATE) {
  521. if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
  522. return -EINVAL;
  523. new_state = attr->qp_state;
  524. } else
  525. new_state = cur_state;
  526. if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
  527. mthca_dbg(dev, "Illegal QP transition "
  528. "%d->%d\n", cur_state, new_state);
  529. return -EINVAL;
  530. }
  531. req_param = state_table[cur_state][new_state].req_param[qp->transport];
  532. opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
  533. if ((req_param & attr_mask) != req_param) {
  534. mthca_dbg(dev, "QP transition "
  535. "%d->%d missing req attr 0x%08x\n",
  536. cur_state, new_state,
  537. req_param & ~attr_mask);
  538. return -EINVAL;
  539. }
  540. if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
  541. mthca_dbg(dev, "QP transition (transport %d) "
  542. "%d->%d has extra attr 0x%08x\n",
  543. qp->transport,
  544. cur_state, new_state,
  545. attr_mask & ~(req_param | opt_param |
  546. IB_QP_STATE));
  547. return -EINVAL;
  548. }
  549. mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  550. if (IS_ERR(mailbox))
  551. return PTR_ERR(mailbox);
  552. qp_param = mailbox->buf;
  553. qp_context = &qp_param->context;
  554. memset(qp_param, 0, sizeof *qp_param);
  555. qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
  556. (to_mthca_st(qp->transport) << 16));
  557. qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
  558. if (!(attr_mask & IB_QP_PATH_MIG_STATE))
  559. qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
  560. else {
  561. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
  562. switch (attr->path_mig_state) {
  563. case IB_MIG_MIGRATED:
  564. qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
  565. break;
  566. case IB_MIG_REARM:
  567. qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
  568. break;
  569. case IB_MIG_ARMED:
  570. qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
  571. break;
  572. }
  573. }
  574. /* leave tavor_sched_queue as 0 */
  575. if (qp->transport == MLX || qp->transport == UD)
  576. qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
  577. else if (attr_mask & IB_QP_PATH_MTU)
  578. qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
  579. if (mthca_is_memfree(dev)) {
  580. if (qp->rq.max)
  581. qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
  582. qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
  583. if (qp->sq.max)
  584. qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
  585. qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
  586. }
  587. /* leave arbel_sched_queue as 0 */
  588. if (qp->ibqp.uobject)
  589. qp_context->usr_page =
  590. cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
  591. else
  592. qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
  593. qp_context->local_qpn = cpu_to_be32(qp->qpn);
  594. if (attr_mask & IB_QP_DEST_QPN) {
  595. qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
  596. }
  597. if (qp->transport == MLX)
  598. qp_context->pri_path.port_pkey |=
  599. cpu_to_be32(to_msqp(qp)->port << 24);
  600. else {
  601. if (attr_mask & IB_QP_PORT) {
  602. qp_context->pri_path.port_pkey |=
  603. cpu_to_be32(attr->port_num << 24);
  604. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
  605. }
  606. }
  607. if (attr_mask & IB_QP_PKEY_INDEX) {
  608. qp_context->pri_path.port_pkey |=
  609. cpu_to_be32(attr->pkey_index);
  610. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
  611. }
  612. if (attr_mask & IB_QP_RNR_RETRY) {
  613. qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
  614. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
  615. }
  616. if (attr_mask & IB_QP_AV) {
  617. qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
  618. qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
  619. qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
  620. if (attr->ah_attr.ah_flags & IB_AH_GRH) {
  621. qp_context->pri_path.g_mylmc |= 1 << 7;
  622. qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
  623. qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
  624. qp_context->pri_path.sl_tclass_flowlabel =
  625. cpu_to_be32((attr->ah_attr.sl << 28) |
  626. (attr->ah_attr.grh.traffic_class << 20) |
  627. (attr->ah_attr.grh.flow_label));
  628. memcpy(qp_context->pri_path.rgid,
  629. attr->ah_attr.grh.dgid.raw, 16);
  630. } else {
  631. qp_context->pri_path.sl_tclass_flowlabel =
  632. cpu_to_be32(attr->ah_attr.sl << 28);
  633. }
  634. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
  635. }
  636. if (attr_mask & IB_QP_TIMEOUT) {
  637. qp_context->pri_path.ackto = attr->timeout << 3;
  638. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
  639. }
  640. /* XXX alt_path */
  641. /* leave rdd as 0 */
  642. qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
  643. /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
  644. qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
  645. qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
  646. (MTHCA_FLIGHT_LIMIT << 24) |
  647. MTHCA_QP_BIT_SRE |
  648. MTHCA_QP_BIT_SWE |
  649. MTHCA_QP_BIT_SAE);
  650. if (qp->sq_policy == IB_SIGNAL_ALL_WR)
  651. qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
  652. if (attr_mask & IB_QP_RETRY_CNT) {
  653. qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
  654. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
  655. }
  656. if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
  657. qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
  658. ffs(attr->max_rd_atomic) - 1 : 0,
  659. 7) << 21);
  660. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
  661. }
  662. if (attr_mask & IB_QP_SQ_PSN)
  663. qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
  664. qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
  665. if (mthca_is_memfree(dev)) {
  666. qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
  667. qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
  668. }
  669. if (attr_mask & IB_QP_ACCESS_FLAGS) {
  670. /*
  671. * Only enable RDMA/atomics if we have responder
  672. * resources set to a non-zero value.
  673. */
  674. if (qp->resp_depth) {
  675. qp_context->params2 |=
  676. cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
  677. MTHCA_QP_BIT_RWE : 0);
  678. qp_context->params2 |=
  679. cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
  680. MTHCA_QP_BIT_RRE : 0);
  681. qp_context->params2 |=
  682. cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
  683. MTHCA_QP_BIT_RAE : 0);
  684. }
  685. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
  686. MTHCA_QP_OPTPAR_RRE |
  687. MTHCA_QP_OPTPAR_RAE);
  688. qp->atomic_rd_en = attr->qp_access_flags;
  689. }
  690. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  691. u8 rra_max;
  692. if (qp->resp_depth && !attr->max_dest_rd_atomic) {
  693. /*
  694. * Lowering our responder resources to zero.
  695. * Turn off RDMA/atomics as responder.
  696. * (RWE/RRE/RAE in params2 already zero)
  697. */
  698. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
  699. MTHCA_QP_OPTPAR_RRE |
  700. MTHCA_QP_OPTPAR_RAE);
  701. }
  702. if (!qp->resp_depth && attr->max_dest_rd_atomic) {
  703. /*
  704. * Increasing our responder resources from
  705. * zero. Turn on RDMA/atomics as appropriate.
  706. */
  707. qp_context->params2 |=
  708. cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
  709. MTHCA_QP_BIT_RWE : 0);
  710. qp_context->params2 |=
  711. cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
  712. MTHCA_QP_BIT_RRE : 0);
  713. qp_context->params2 |=
  714. cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
  715. MTHCA_QP_BIT_RAE : 0);
  716. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
  717. MTHCA_QP_OPTPAR_RRE |
  718. MTHCA_QP_OPTPAR_RAE);
  719. }
  720. for (rra_max = 0;
  721. 1 << rra_max < attr->max_dest_rd_atomic &&
  722. rra_max < dev->qp_table.rdb_shift;
  723. ++rra_max)
  724. ; /* nothing */
  725. qp_context->params2 |= cpu_to_be32(rra_max << 21);
  726. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
  727. qp->resp_depth = attr->max_dest_rd_atomic;
  728. }
  729. qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
  730. if (ibqp->srq)
  731. qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
  732. if (attr_mask & IB_QP_MIN_RNR_TIMER) {
  733. qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
  734. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
  735. }
  736. if (attr_mask & IB_QP_RQ_PSN)
  737. qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
  738. qp_context->ra_buff_indx =
  739. cpu_to_be32(dev->qp_table.rdb_base +
  740. ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
  741. dev->qp_table.rdb_shift));
  742. qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
  743. if (mthca_is_memfree(dev))
  744. qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
  745. if (attr_mask & IB_QP_QKEY) {
  746. qp_context->qkey = cpu_to_be32(attr->qkey);
  747. qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
  748. }
  749. if (ibqp->srq)
  750. qp_context->srqn = cpu_to_be32(1 << 24 |
  751. to_msrq(ibqp->srq)->srqn);
  752. err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
  753. qp->qpn, 0, mailbox, 0, &status);
  754. if (status) {
  755. mthca_warn(dev, "modify QP %d returned status %02x.\n",
  756. state_table[cur_state][new_state].trans, status);
  757. err = -EINVAL;
  758. }
  759. if (!err)
  760. qp->state = new_state;
  761. mthca_free_mailbox(dev, mailbox);
  762. if (is_sqp(dev, qp))
  763. store_attrs(to_msqp(qp), attr, attr_mask);
  764. /*
  765. * If we moved QP0 to RTR, bring the IB link up; if we moved
  766. * QP0 to RESET or ERROR, bring the link back down.
  767. */
  768. if (is_qp0(dev, qp)) {
  769. if (cur_state != IB_QPS_RTR &&
  770. new_state == IB_QPS_RTR)
  771. init_port(dev, to_msqp(qp)->port);
  772. if (cur_state != IB_QPS_RESET &&
  773. cur_state != IB_QPS_ERR &&
  774. (new_state == IB_QPS_RESET ||
  775. new_state == IB_QPS_ERR))
  776. mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
  777. }
  778. /*
  779. * If we moved a kernel QP to RESET, clean up all old CQ
  780. * entries and reinitialize the QP.
  781. */
  782. if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
  783. mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
  784. qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  785. if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
  786. mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
  787. qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  788. mthca_wq_init(&qp->sq);
  789. mthca_wq_init(&qp->rq);
  790. if (mthca_is_memfree(dev)) {
  791. *qp->sq.db = 0;
  792. *qp->rq.db = 0;
  793. }
  794. }
  795. return err;
  796. }
  797. /*
  798. * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
  799. * rq.max_gs and sq.max_gs must all be assigned.
  800. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
  801. * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
  802. * queue)
  803. */
  804. static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
  805. struct mthca_pd *pd,
  806. struct mthca_qp *qp)
  807. {
  808. int size;
  809. int err = -ENOMEM;
  810. size = sizeof (struct mthca_next_seg) +
  811. qp->rq.max_gs * sizeof (struct mthca_data_seg);
  812. for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
  813. qp->rq.wqe_shift++)
  814. ; /* nothing */
  815. size = sizeof (struct mthca_next_seg) +
  816. qp->sq.max_gs * sizeof (struct mthca_data_seg);
  817. switch (qp->transport) {
  818. case MLX:
  819. size += 2 * sizeof (struct mthca_data_seg);
  820. break;
  821. case UD:
  822. if (mthca_is_memfree(dev))
  823. size += sizeof (struct mthca_arbel_ud_seg);
  824. else
  825. size += sizeof (struct mthca_tavor_ud_seg);
  826. break;
  827. default:
  828. /* bind seg is as big as atomic + raddr segs */
  829. size += sizeof (struct mthca_bind_seg);
  830. }
  831. for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
  832. qp->sq.wqe_shift++)
  833. ; /* nothing */
  834. qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
  835. 1 << qp->sq.wqe_shift);
  836. /*
  837. * If this is a userspace QP, we don't actually have to
  838. * allocate anything. All we need is to calculate the WQE
  839. * sizes and the send_wqe_offset, so we're done now.
  840. */
  841. if (pd->ibpd.uobject)
  842. return 0;
  843. size = PAGE_ALIGN(qp->send_wqe_offset +
  844. (qp->sq.max << qp->sq.wqe_shift));
  845. qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
  846. GFP_KERNEL);
  847. if (!qp->wrid)
  848. goto err_out;
  849. err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
  850. &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
  851. if (err)
  852. goto err_out;
  853. return 0;
  854. err_out:
  855. kfree(qp->wrid);
  856. return err;
  857. }
  858. static void mthca_free_wqe_buf(struct mthca_dev *dev,
  859. struct mthca_qp *qp)
  860. {
  861. mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
  862. (qp->sq.max << qp->sq.wqe_shift)),
  863. &qp->queue, qp->is_direct, &qp->mr);
  864. kfree(qp->wrid);
  865. }
  866. static int mthca_map_memfree(struct mthca_dev *dev,
  867. struct mthca_qp *qp)
  868. {
  869. int ret;
  870. if (mthca_is_memfree(dev)) {
  871. ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
  872. if (ret)
  873. return ret;
  874. ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
  875. if (ret)
  876. goto err_qpc;
  877. ret = mthca_table_get(dev, dev->qp_table.rdb_table,
  878. qp->qpn << dev->qp_table.rdb_shift);
  879. if (ret)
  880. goto err_eqpc;
  881. }
  882. return 0;
  883. err_eqpc:
  884. mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
  885. err_qpc:
  886. mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
  887. return ret;
  888. }
  889. static void mthca_unmap_memfree(struct mthca_dev *dev,
  890. struct mthca_qp *qp)
  891. {
  892. mthca_table_put(dev, dev->qp_table.rdb_table,
  893. qp->qpn << dev->qp_table.rdb_shift);
  894. mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
  895. mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
  896. }
  897. static int mthca_alloc_memfree(struct mthca_dev *dev,
  898. struct mthca_qp *qp)
  899. {
  900. int ret = 0;
  901. if (mthca_is_memfree(dev)) {
  902. qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
  903. qp->qpn, &qp->rq.db);
  904. if (qp->rq.db_index < 0)
  905. return ret;
  906. qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
  907. qp->qpn, &qp->sq.db);
  908. if (qp->sq.db_index < 0)
  909. mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
  910. }
  911. return ret;
  912. }
  913. static void mthca_free_memfree(struct mthca_dev *dev,
  914. struct mthca_qp *qp)
  915. {
  916. if (mthca_is_memfree(dev)) {
  917. mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
  918. mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
  919. }
  920. }
  921. static int mthca_alloc_qp_common(struct mthca_dev *dev,
  922. struct mthca_pd *pd,
  923. struct mthca_cq *send_cq,
  924. struct mthca_cq *recv_cq,
  925. enum ib_sig_type send_policy,
  926. struct mthca_qp *qp)
  927. {
  928. int ret;
  929. int i;
  930. atomic_set(&qp->refcount, 1);
  931. init_waitqueue_head(&qp->wait);
  932. qp->state = IB_QPS_RESET;
  933. qp->atomic_rd_en = 0;
  934. qp->resp_depth = 0;
  935. qp->sq_policy = send_policy;
  936. mthca_wq_init(&qp->sq);
  937. mthca_wq_init(&qp->rq);
  938. ret = mthca_map_memfree(dev, qp);
  939. if (ret)
  940. return ret;
  941. ret = mthca_alloc_wqe_buf(dev, pd, qp);
  942. if (ret) {
  943. mthca_unmap_memfree(dev, qp);
  944. return ret;
  945. }
  946. /*
  947. * If this is a userspace QP, we're done now. The doorbells
  948. * will be allocated and buffers will be initialized in
  949. * userspace.
  950. */
  951. if (pd->ibpd.uobject)
  952. return 0;
  953. ret = mthca_alloc_memfree(dev, qp);
  954. if (ret) {
  955. mthca_free_wqe_buf(dev, qp);
  956. mthca_unmap_memfree(dev, qp);
  957. return ret;
  958. }
  959. if (mthca_is_memfree(dev)) {
  960. struct mthca_next_seg *next;
  961. struct mthca_data_seg *scatter;
  962. int size = (sizeof (struct mthca_next_seg) +
  963. qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
  964. for (i = 0; i < qp->rq.max; ++i) {
  965. next = get_recv_wqe(qp, i);
  966. next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
  967. qp->rq.wqe_shift);
  968. next->ee_nds = cpu_to_be32(size);
  969. for (scatter = (void *) (next + 1);
  970. (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
  971. ++scatter)
  972. scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
  973. }
  974. for (i = 0; i < qp->sq.max; ++i) {
  975. next = get_send_wqe(qp, i);
  976. next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
  977. qp->sq.wqe_shift) +
  978. qp->send_wqe_offset);
  979. }
  980. }
  981. qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
  982. qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
  983. return 0;
  984. }
  985. static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
  986. struct mthca_qp *qp)
  987. {
  988. /* Sanity check QP size before proceeding */
  989. if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 ||
  990. cap->max_send_sge > 64 || cap->max_recv_sge > 64)
  991. return -EINVAL;
  992. if (mthca_is_memfree(dev)) {
  993. qp->rq.max = cap->max_recv_wr ?
  994. roundup_pow_of_two(cap->max_recv_wr) : 0;
  995. qp->sq.max = cap->max_send_wr ?
  996. roundup_pow_of_two(cap->max_send_wr) : 0;
  997. } else {
  998. qp->rq.max = cap->max_recv_wr;
  999. qp->sq.max = cap->max_send_wr;
  1000. }
  1001. qp->rq.max_gs = cap->max_recv_sge;
  1002. qp->sq.max_gs = max_t(int, cap->max_send_sge,
  1003. ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
  1004. MTHCA_INLINE_CHUNK_SIZE) /
  1005. sizeof (struct mthca_data_seg));
  1006. /*
  1007. * For MLX transport we need 2 extra S/G entries:
  1008. * one for the header and one for the checksum at the end
  1009. */
  1010. if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
  1011. qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
  1012. return -EINVAL;
  1013. return 0;
  1014. }
  1015. int mthca_alloc_qp(struct mthca_dev *dev,
  1016. struct mthca_pd *pd,
  1017. struct mthca_cq *send_cq,
  1018. struct mthca_cq *recv_cq,
  1019. enum ib_qp_type type,
  1020. enum ib_sig_type send_policy,
  1021. struct ib_qp_cap *cap,
  1022. struct mthca_qp *qp)
  1023. {
  1024. int err;
  1025. err = mthca_set_qp_size(dev, cap, qp);
  1026. if (err)
  1027. return err;
  1028. switch (type) {
  1029. case IB_QPT_RC: qp->transport = RC; break;
  1030. case IB_QPT_UC: qp->transport = UC; break;
  1031. case IB_QPT_UD: qp->transport = UD; break;
  1032. default: return -EINVAL;
  1033. }
  1034. qp->qpn = mthca_alloc(&dev->qp_table.alloc);
  1035. if (qp->qpn == -1)
  1036. return -ENOMEM;
  1037. err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
  1038. send_policy, qp);
  1039. if (err) {
  1040. mthca_free(&dev->qp_table.alloc, qp->qpn);
  1041. return err;
  1042. }
  1043. spin_lock_irq(&dev->qp_table.lock);
  1044. mthca_array_set(&dev->qp_table.qp,
  1045. qp->qpn & (dev->limits.num_qps - 1), qp);
  1046. spin_unlock_irq(&dev->qp_table.lock);
  1047. return 0;
  1048. }
  1049. int mthca_alloc_sqp(struct mthca_dev *dev,
  1050. struct mthca_pd *pd,
  1051. struct mthca_cq *send_cq,
  1052. struct mthca_cq *recv_cq,
  1053. enum ib_sig_type send_policy,
  1054. struct ib_qp_cap *cap,
  1055. int qpn,
  1056. int port,
  1057. struct mthca_sqp *sqp)
  1058. {
  1059. u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
  1060. int err;
  1061. err = mthca_set_qp_size(dev, cap, &sqp->qp);
  1062. if (err)
  1063. return err;
  1064. sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
  1065. sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
  1066. &sqp->header_dma, GFP_KERNEL);
  1067. if (!sqp->header_buf)
  1068. return -ENOMEM;
  1069. spin_lock_irq(&dev->qp_table.lock);
  1070. if (mthca_array_get(&dev->qp_table.qp, mqpn))
  1071. err = -EBUSY;
  1072. else
  1073. mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
  1074. spin_unlock_irq(&dev->qp_table.lock);
  1075. if (err)
  1076. goto err_out;
  1077. sqp->port = port;
  1078. sqp->qp.qpn = mqpn;
  1079. sqp->qp.transport = MLX;
  1080. err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
  1081. send_policy, &sqp->qp);
  1082. if (err)
  1083. goto err_out_free;
  1084. atomic_inc(&pd->sqp_count);
  1085. return 0;
  1086. err_out_free:
  1087. /*
  1088. * Lock CQs here, so that CQ polling code can do QP lookup
  1089. * without taking a lock.
  1090. */
  1091. spin_lock_irq(&send_cq->lock);
  1092. if (send_cq != recv_cq)
  1093. spin_lock(&recv_cq->lock);
  1094. spin_lock(&dev->qp_table.lock);
  1095. mthca_array_clear(&dev->qp_table.qp, mqpn);
  1096. spin_unlock(&dev->qp_table.lock);
  1097. if (send_cq != recv_cq)
  1098. spin_unlock(&recv_cq->lock);
  1099. spin_unlock_irq(&send_cq->lock);
  1100. err_out:
  1101. dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
  1102. sqp->header_buf, sqp->header_dma);
  1103. return err;
  1104. }
  1105. void mthca_free_qp(struct mthca_dev *dev,
  1106. struct mthca_qp *qp)
  1107. {
  1108. u8 status;
  1109. struct mthca_cq *send_cq;
  1110. struct mthca_cq *recv_cq;
  1111. send_cq = to_mcq(qp->ibqp.send_cq);
  1112. recv_cq = to_mcq(qp->ibqp.recv_cq);
  1113. /*
  1114. * Lock CQs here, so that CQ polling code can do QP lookup
  1115. * without taking a lock.
  1116. */
  1117. spin_lock_irq(&send_cq->lock);
  1118. if (send_cq != recv_cq)
  1119. spin_lock(&recv_cq->lock);
  1120. spin_lock(&dev->qp_table.lock);
  1121. mthca_array_clear(&dev->qp_table.qp,
  1122. qp->qpn & (dev->limits.num_qps - 1));
  1123. spin_unlock(&dev->qp_table.lock);
  1124. if (send_cq != recv_cq)
  1125. spin_unlock(&recv_cq->lock);
  1126. spin_unlock_irq(&send_cq->lock);
  1127. atomic_dec(&qp->refcount);
  1128. wait_event(qp->wait, !atomic_read(&qp->refcount));
  1129. if (qp->state != IB_QPS_RESET)
  1130. mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
  1131. /*
  1132. * If this is a userspace QP, the buffers, MR, CQs and so on
  1133. * will be cleaned up in userspace, so all we have to do is
  1134. * unref the mem-free tables and free the QPN in our table.
  1135. */
  1136. if (!qp->ibqp.uobject) {
  1137. mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
  1138. qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  1139. if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
  1140. mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
  1141. qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  1142. mthca_free_memfree(dev, qp);
  1143. mthca_free_wqe_buf(dev, qp);
  1144. }
  1145. mthca_unmap_memfree(dev, qp);
  1146. if (is_sqp(dev, qp)) {
  1147. atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
  1148. dma_free_coherent(&dev->pdev->dev,
  1149. to_msqp(qp)->header_buf_size,
  1150. to_msqp(qp)->header_buf,
  1151. to_msqp(qp)->header_dma);
  1152. } else
  1153. mthca_free(&dev->qp_table.alloc, qp->qpn);
  1154. }
  1155. /* Create UD header for an MLX send and build a data segment for it */
  1156. static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
  1157. int ind, struct ib_send_wr *wr,
  1158. struct mthca_mlx_seg *mlx,
  1159. struct mthca_data_seg *data)
  1160. {
  1161. int header_size;
  1162. int err;
  1163. u16 pkey;
  1164. ib_ud_header_init(256, /* assume a MAD */
  1165. sqp->ud_header.grh_present,
  1166. &sqp->ud_header);
  1167. err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
  1168. if (err)
  1169. return err;
  1170. mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
  1171. mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
  1172. (sqp->ud_header.lrh.destination_lid ==
  1173. IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
  1174. (sqp->ud_header.lrh.service_level << 8));
  1175. mlx->rlid = sqp->ud_header.lrh.destination_lid;
  1176. mlx->vcrc = 0;
  1177. switch (wr->opcode) {
  1178. case IB_WR_SEND:
  1179. sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
  1180. sqp->ud_header.immediate_present = 0;
  1181. break;
  1182. case IB_WR_SEND_WITH_IMM:
  1183. sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
  1184. sqp->ud_header.immediate_present = 1;
  1185. sqp->ud_header.immediate_data = wr->imm_data;
  1186. break;
  1187. default:
  1188. return -EINVAL;
  1189. }
  1190. sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
  1191. if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
  1192. sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
  1193. sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
  1194. if (!sqp->qp.ibqp.qp_num)
  1195. ib_get_cached_pkey(&dev->ib_dev, sqp->port,
  1196. sqp->pkey_index, &pkey);
  1197. else
  1198. ib_get_cached_pkey(&dev->ib_dev, sqp->port,
  1199. wr->wr.ud.pkey_index, &pkey);
  1200. sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
  1201. sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
  1202. sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
  1203. sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
  1204. sqp->qkey : wr->wr.ud.remote_qkey);
  1205. sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
  1206. header_size = ib_ud_header_pack(&sqp->ud_header,
  1207. sqp->header_buf +
  1208. ind * MTHCA_UD_HEADER_SIZE);
  1209. data->byte_count = cpu_to_be32(header_size);
  1210. data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
  1211. data->addr = cpu_to_be64(sqp->header_dma +
  1212. ind * MTHCA_UD_HEADER_SIZE);
  1213. return 0;
  1214. }
  1215. static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
  1216. struct ib_cq *ib_cq)
  1217. {
  1218. unsigned cur;
  1219. struct mthca_cq *cq;
  1220. cur = wq->head - wq->tail;
  1221. if (likely(cur + nreq < wq->max))
  1222. return 0;
  1223. cq = to_mcq(ib_cq);
  1224. spin_lock(&cq->lock);
  1225. cur = wq->head - wq->tail;
  1226. spin_unlock(&cq->lock);
  1227. return cur + nreq >= wq->max;
  1228. }
  1229. int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  1230. struct ib_send_wr **bad_wr)
  1231. {
  1232. struct mthca_dev *dev = to_mdev(ibqp->device);
  1233. struct mthca_qp *qp = to_mqp(ibqp);
  1234. void *wqe;
  1235. void *prev_wqe;
  1236. unsigned long flags;
  1237. int err = 0;
  1238. int nreq;
  1239. int i;
  1240. int size;
  1241. int size0 = 0;
  1242. u32 f0 = 0;
  1243. int ind;
  1244. u8 op0 = 0;
  1245. spin_lock_irqsave(&qp->sq.lock, flags);
  1246. /* XXX check that state is OK to post send */
  1247. ind = qp->sq.next_ind;
  1248. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  1249. if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  1250. mthca_err(dev, "SQ %06x full (%u head, %u tail,"
  1251. " %d max, %d nreq)\n", qp->qpn,
  1252. qp->sq.head, qp->sq.tail,
  1253. qp->sq.max, nreq);
  1254. err = -ENOMEM;
  1255. *bad_wr = wr;
  1256. goto out;
  1257. }
  1258. wqe = get_send_wqe(qp, ind);
  1259. prev_wqe = qp->sq.last;
  1260. qp->sq.last = wqe;
  1261. ((struct mthca_next_seg *) wqe)->nda_op = 0;
  1262. ((struct mthca_next_seg *) wqe)->ee_nds = 0;
  1263. ((struct mthca_next_seg *) wqe)->flags =
  1264. ((wr->send_flags & IB_SEND_SIGNALED) ?
  1265. cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
  1266. ((wr->send_flags & IB_SEND_SOLICITED) ?
  1267. cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
  1268. cpu_to_be32(1);
  1269. if (wr->opcode == IB_WR_SEND_WITH_IMM ||
  1270. wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
  1271. ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
  1272. wqe += sizeof (struct mthca_next_seg);
  1273. size = sizeof (struct mthca_next_seg) / 16;
  1274. switch (qp->transport) {
  1275. case RC:
  1276. switch (wr->opcode) {
  1277. case IB_WR_ATOMIC_CMP_AND_SWP:
  1278. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1279. ((struct mthca_raddr_seg *) wqe)->raddr =
  1280. cpu_to_be64(wr->wr.atomic.remote_addr);
  1281. ((struct mthca_raddr_seg *) wqe)->rkey =
  1282. cpu_to_be32(wr->wr.atomic.rkey);
  1283. ((struct mthca_raddr_seg *) wqe)->reserved = 0;
  1284. wqe += sizeof (struct mthca_raddr_seg);
  1285. if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  1286. ((struct mthca_atomic_seg *) wqe)->swap_add =
  1287. cpu_to_be64(wr->wr.atomic.swap);
  1288. ((struct mthca_atomic_seg *) wqe)->compare =
  1289. cpu_to_be64(wr->wr.atomic.compare_add);
  1290. } else {
  1291. ((struct mthca_atomic_seg *) wqe)->swap_add =
  1292. cpu_to_be64(wr->wr.atomic.compare_add);
  1293. ((struct mthca_atomic_seg *) wqe)->compare = 0;
  1294. }
  1295. wqe += sizeof (struct mthca_atomic_seg);
  1296. size += sizeof (struct mthca_raddr_seg) / 16 +
  1297. sizeof (struct mthca_atomic_seg);
  1298. break;
  1299. case IB_WR_RDMA_WRITE:
  1300. case IB_WR_RDMA_WRITE_WITH_IMM:
  1301. case IB_WR_RDMA_READ:
  1302. ((struct mthca_raddr_seg *) wqe)->raddr =
  1303. cpu_to_be64(wr->wr.rdma.remote_addr);
  1304. ((struct mthca_raddr_seg *) wqe)->rkey =
  1305. cpu_to_be32(wr->wr.rdma.rkey);
  1306. ((struct mthca_raddr_seg *) wqe)->reserved = 0;
  1307. wqe += sizeof (struct mthca_raddr_seg);
  1308. size += sizeof (struct mthca_raddr_seg) / 16;
  1309. break;
  1310. default:
  1311. /* No extra segments required for sends */
  1312. break;
  1313. }
  1314. break;
  1315. case UC:
  1316. switch (wr->opcode) {
  1317. case IB_WR_RDMA_WRITE:
  1318. case IB_WR_RDMA_WRITE_WITH_IMM:
  1319. ((struct mthca_raddr_seg *) wqe)->raddr =
  1320. cpu_to_be64(wr->wr.rdma.remote_addr);
  1321. ((struct mthca_raddr_seg *) wqe)->rkey =
  1322. cpu_to_be32(wr->wr.rdma.rkey);
  1323. ((struct mthca_raddr_seg *) wqe)->reserved = 0;
  1324. wqe += sizeof (struct mthca_raddr_seg);
  1325. size += sizeof (struct mthca_raddr_seg) / 16;
  1326. break;
  1327. default:
  1328. /* No extra segments required for sends */
  1329. break;
  1330. }
  1331. break;
  1332. case UD:
  1333. ((struct mthca_tavor_ud_seg *) wqe)->lkey =
  1334. cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
  1335. ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
  1336. cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
  1337. ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
  1338. cpu_to_be32(wr->wr.ud.remote_qpn);
  1339. ((struct mthca_tavor_ud_seg *) wqe)->qkey =
  1340. cpu_to_be32(wr->wr.ud.remote_qkey);
  1341. wqe += sizeof (struct mthca_tavor_ud_seg);
  1342. size += sizeof (struct mthca_tavor_ud_seg) / 16;
  1343. break;
  1344. case MLX:
  1345. err = build_mlx_header(dev, to_msqp(qp), ind, wr,
  1346. wqe - sizeof (struct mthca_next_seg),
  1347. wqe);
  1348. if (err) {
  1349. *bad_wr = wr;
  1350. goto out;
  1351. }
  1352. wqe += sizeof (struct mthca_data_seg);
  1353. size += sizeof (struct mthca_data_seg) / 16;
  1354. break;
  1355. }
  1356. if (wr->num_sge > qp->sq.max_gs) {
  1357. mthca_err(dev, "too many gathers\n");
  1358. err = -EINVAL;
  1359. *bad_wr = wr;
  1360. goto out;
  1361. }
  1362. for (i = 0; i < wr->num_sge; ++i) {
  1363. ((struct mthca_data_seg *) wqe)->byte_count =
  1364. cpu_to_be32(wr->sg_list[i].length);
  1365. ((struct mthca_data_seg *) wqe)->lkey =
  1366. cpu_to_be32(wr->sg_list[i].lkey);
  1367. ((struct mthca_data_seg *) wqe)->addr =
  1368. cpu_to_be64(wr->sg_list[i].addr);
  1369. wqe += sizeof (struct mthca_data_seg);
  1370. size += sizeof (struct mthca_data_seg) / 16;
  1371. }
  1372. /* Add one more inline data segment for ICRC */
  1373. if (qp->transport == MLX) {
  1374. ((struct mthca_data_seg *) wqe)->byte_count =
  1375. cpu_to_be32((1 << 31) | 4);
  1376. ((u32 *) wqe)[1] = 0;
  1377. wqe += sizeof (struct mthca_data_seg);
  1378. size += sizeof (struct mthca_data_seg) / 16;
  1379. }
  1380. qp->wrid[ind + qp->rq.max] = wr->wr_id;
  1381. if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
  1382. mthca_err(dev, "opcode invalid\n");
  1383. err = -EINVAL;
  1384. *bad_wr = wr;
  1385. goto out;
  1386. }
  1387. ((struct mthca_next_seg *) prev_wqe)->nda_op =
  1388. cpu_to_be32(((ind << qp->sq.wqe_shift) +
  1389. qp->send_wqe_offset) |
  1390. mthca_opcode[wr->opcode]);
  1391. wmb();
  1392. ((struct mthca_next_seg *) prev_wqe)->ee_nds =
  1393. cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
  1394. if (!size0) {
  1395. size0 = size;
  1396. op0 = mthca_opcode[wr->opcode];
  1397. }
  1398. ++ind;
  1399. if (unlikely(ind >= qp->sq.max))
  1400. ind -= qp->sq.max;
  1401. }
  1402. out:
  1403. if (likely(nreq)) {
  1404. __be32 doorbell[2];
  1405. doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
  1406. qp->send_wqe_offset) | f0 | op0);
  1407. doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
  1408. wmb();
  1409. mthca_write64(doorbell,
  1410. dev->kar + MTHCA_SEND_DOORBELL,
  1411. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  1412. }
  1413. qp->sq.next_ind = ind;
  1414. qp->sq.head += nreq;
  1415. spin_unlock_irqrestore(&qp->sq.lock, flags);
  1416. return err;
  1417. }
  1418. int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  1419. struct ib_recv_wr **bad_wr)
  1420. {
  1421. struct mthca_dev *dev = to_mdev(ibqp->device);
  1422. struct mthca_qp *qp = to_mqp(ibqp);
  1423. unsigned long flags;
  1424. int err = 0;
  1425. int nreq;
  1426. int i;
  1427. int size;
  1428. int size0 = 0;
  1429. int ind;
  1430. void *wqe;
  1431. void *prev_wqe;
  1432. spin_lock_irqsave(&qp->rq.lock, flags);
  1433. /* XXX check that state is OK to post receive */
  1434. ind = qp->rq.next_ind;
  1435. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  1436. if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
  1437. mthca_err(dev, "RQ %06x full (%u head, %u tail,"
  1438. " %d max, %d nreq)\n", qp->qpn,
  1439. qp->rq.head, qp->rq.tail,
  1440. qp->rq.max, nreq);
  1441. err = -ENOMEM;
  1442. *bad_wr = wr;
  1443. goto out;
  1444. }
  1445. wqe = get_recv_wqe(qp, ind);
  1446. prev_wqe = qp->rq.last;
  1447. qp->rq.last = wqe;
  1448. ((struct mthca_next_seg *) wqe)->nda_op = 0;
  1449. ((struct mthca_next_seg *) wqe)->ee_nds =
  1450. cpu_to_be32(MTHCA_NEXT_DBD);
  1451. ((struct mthca_next_seg *) wqe)->flags = 0;
  1452. wqe += sizeof (struct mthca_next_seg);
  1453. size = sizeof (struct mthca_next_seg) / 16;
  1454. if (unlikely(wr->num_sge > qp->rq.max_gs)) {
  1455. err = -EINVAL;
  1456. *bad_wr = wr;
  1457. goto out;
  1458. }
  1459. for (i = 0; i < wr->num_sge; ++i) {
  1460. ((struct mthca_data_seg *) wqe)->byte_count =
  1461. cpu_to_be32(wr->sg_list[i].length);
  1462. ((struct mthca_data_seg *) wqe)->lkey =
  1463. cpu_to_be32(wr->sg_list[i].lkey);
  1464. ((struct mthca_data_seg *) wqe)->addr =
  1465. cpu_to_be64(wr->sg_list[i].addr);
  1466. wqe += sizeof (struct mthca_data_seg);
  1467. size += sizeof (struct mthca_data_seg) / 16;
  1468. }
  1469. qp->wrid[ind] = wr->wr_id;
  1470. ((struct mthca_next_seg *) prev_wqe)->nda_op =
  1471. cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
  1472. wmb();
  1473. ((struct mthca_next_seg *) prev_wqe)->ee_nds =
  1474. cpu_to_be32(MTHCA_NEXT_DBD | size);
  1475. if (!size0)
  1476. size0 = size;
  1477. ++ind;
  1478. if (unlikely(ind >= qp->rq.max))
  1479. ind -= qp->rq.max;
  1480. }
  1481. out:
  1482. if (likely(nreq)) {
  1483. __be32 doorbell[2];
  1484. doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
  1485. doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
  1486. wmb();
  1487. mthca_write64(doorbell,
  1488. dev->kar + MTHCA_RECEIVE_DOORBELL,
  1489. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  1490. }
  1491. qp->rq.next_ind = ind;
  1492. qp->rq.head += nreq;
  1493. spin_unlock_irqrestore(&qp->rq.lock, flags);
  1494. return err;
  1495. }
  1496. int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  1497. struct ib_send_wr **bad_wr)
  1498. {
  1499. struct mthca_dev *dev = to_mdev(ibqp->device);
  1500. struct mthca_qp *qp = to_mqp(ibqp);
  1501. void *wqe;
  1502. void *prev_wqe;
  1503. unsigned long flags;
  1504. int err = 0;
  1505. int nreq;
  1506. int i;
  1507. int size;
  1508. int size0 = 0;
  1509. u32 f0 = 0;
  1510. int ind;
  1511. u8 op0 = 0;
  1512. spin_lock_irqsave(&qp->sq.lock, flags);
  1513. /* XXX check that state is OK to post send */
  1514. ind = qp->sq.head & (qp->sq.max - 1);
  1515. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  1516. if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  1517. mthca_err(dev, "SQ %06x full (%u head, %u tail,"
  1518. " %d max, %d nreq)\n", qp->qpn,
  1519. qp->sq.head, qp->sq.tail,
  1520. qp->sq.max, nreq);
  1521. err = -ENOMEM;
  1522. *bad_wr = wr;
  1523. goto out;
  1524. }
  1525. wqe = get_send_wqe(qp, ind);
  1526. prev_wqe = qp->sq.last;
  1527. qp->sq.last = wqe;
  1528. ((struct mthca_next_seg *) wqe)->flags =
  1529. ((wr->send_flags & IB_SEND_SIGNALED) ?
  1530. cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
  1531. ((wr->send_flags & IB_SEND_SOLICITED) ?
  1532. cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
  1533. cpu_to_be32(1);
  1534. if (wr->opcode == IB_WR_SEND_WITH_IMM ||
  1535. wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
  1536. ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
  1537. wqe += sizeof (struct mthca_next_seg);
  1538. size = sizeof (struct mthca_next_seg) / 16;
  1539. switch (qp->transport) {
  1540. case RC:
  1541. switch (wr->opcode) {
  1542. case IB_WR_ATOMIC_CMP_AND_SWP:
  1543. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1544. ((struct mthca_raddr_seg *) wqe)->raddr =
  1545. cpu_to_be64(wr->wr.atomic.remote_addr);
  1546. ((struct mthca_raddr_seg *) wqe)->rkey =
  1547. cpu_to_be32(wr->wr.atomic.rkey);
  1548. ((struct mthca_raddr_seg *) wqe)->reserved = 0;
  1549. wqe += sizeof (struct mthca_raddr_seg);
  1550. if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  1551. ((struct mthca_atomic_seg *) wqe)->swap_add =
  1552. cpu_to_be64(wr->wr.atomic.swap);
  1553. ((struct mthca_atomic_seg *) wqe)->compare =
  1554. cpu_to_be64(wr->wr.atomic.compare_add);
  1555. } else {
  1556. ((struct mthca_atomic_seg *) wqe)->swap_add =
  1557. cpu_to_be64(wr->wr.atomic.compare_add);
  1558. ((struct mthca_atomic_seg *) wqe)->compare = 0;
  1559. }
  1560. wqe += sizeof (struct mthca_atomic_seg);
  1561. size += sizeof (struct mthca_raddr_seg) / 16 +
  1562. sizeof (struct mthca_atomic_seg);
  1563. break;
  1564. case IB_WR_RDMA_READ:
  1565. case IB_WR_RDMA_WRITE:
  1566. case IB_WR_RDMA_WRITE_WITH_IMM:
  1567. ((struct mthca_raddr_seg *) wqe)->raddr =
  1568. cpu_to_be64(wr->wr.rdma.remote_addr);
  1569. ((struct mthca_raddr_seg *) wqe)->rkey =
  1570. cpu_to_be32(wr->wr.rdma.rkey);
  1571. ((struct mthca_raddr_seg *) wqe)->reserved = 0;
  1572. wqe += sizeof (struct mthca_raddr_seg);
  1573. size += sizeof (struct mthca_raddr_seg) / 16;
  1574. break;
  1575. default:
  1576. /* No extra segments required for sends */
  1577. break;
  1578. }
  1579. break;
  1580. case UC:
  1581. switch (wr->opcode) {
  1582. case IB_WR_RDMA_WRITE:
  1583. case IB_WR_RDMA_WRITE_WITH_IMM:
  1584. ((struct mthca_raddr_seg *) wqe)->raddr =
  1585. cpu_to_be64(wr->wr.rdma.remote_addr);
  1586. ((struct mthca_raddr_seg *) wqe)->rkey =
  1587. cpu_to_be32(wr->wr.rdma.rkey);
  1588. ((struct mthca_raddr_seg *) wqe)->reserved = 0;
  1589. wqe += sizeof (struct mthca_raddr_seg);
  1590. size += sizeof (struct mthca_raddr_seg) / 16;
  1591. break;
  1592. default:
  1593. /* No extra segments required for sends */
  1594. break;
  1595. }
  1596. break;
  1597. case UD:
  1598. memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
  1599. to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
  1600. ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
  1601. cpu_to_be32(wr->wr.ud.remote_qpn);
  1602. ((struct mthca_arbel_ud_seg *) wqe)->qkey =
  1603. cpu_to_be32(wr->wr.ud.remote_qkey);
  1604. wqe += sizeof (struct mthca_arbel_ud_seg);
  1605. size += sizeof (struct mthca_arbel_ud_seg) / 16;
  1606. break;
  1607. case MLX:
  1608. err = build_mlx_header(dev, to_msqp(qp), ind, wr,
  1609. wqe - sizeof (struct mthca_next_seg),
  1610. wqe);
  1611. if (err) {
  1612. *bad_wr = wr;
  1613. goto out;
  1614. }
  1615. wqe += sizeof (struct mthca_data_seg);
  1616. size += sizeof (struct mthca_data_seg) / 16;
  1617. break;
  1618. }
  1619. if (wr->num_sge > qp->sq.max_gs) {
  1620. mthca_err(dev, "too many gathers\n");
  1621. err = -EINVAL;
  1622. *bad_wr = wr;
  1623. goto out;
  1624. }
  1625. for (i = 0; i < wr->num_sge; ++i) {
  1626. ((struct mthca_data_seg *) wqe)->byte_count =
  1627. cpu_to_be32(wr->sg_list[i].length);
  1628. ((struct mthca_data_seg *) wqe)->lkey =
  1629. cpu_to_be32(wr->sg_list[i].lkey);
  1630. ((struct mthca_data_seg *) wqe)->addr =
  1631. cpu_to_be64(wr->sg_list[i].addr);
  1632. wqe += sizeof (struct mthca_data_seg);
  1633. size += sizeof (struct mthca_data_seg) / 16;
  1634. }
  1635. /* Add one more inline data segment for ICRC */
  1636. if (qp->transport == MLX) {
  1637. ((struct mthca_data_seg *) wqe)->byte_count =
  1638. cpu_to_be32((1 << 31) | 4);
  1639. ((u32 *) wqe)[1] = 0;
  1640. wqe += sizeof (struct mthca_data_seg);
  1641. size += sizeof (struct mthca_data_seg) / 16;
  1642. }
  1643. qp->wrid[ind + qp->rq.max] = wr->wr_id;
  1644. if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
  1645. mthca_err(dev, "opcode invalid\n");
  1646. err = -EINVAL;
  1647. *bad_wr = wr;
  1648. goto out;
  1649. }
  1650. ((struct mthca_next_seg *) prev_wqe)->nda_op =
  1651. cpu_to_be32(((ind << qp->sq.wqe_shift) +
  1652. qp->send_wqe_offset) |
  1653. mthca_opcode[wr->opcode]);
  1654. wmb();
  1655. ((struct mthca_next_seg *) prev_wqe)->ee_nds =
  1656. cpu_to_be32(MTHCA_NEXT_DBD | size);
  1657. if (!size0) {
  1658. size0 = size;
  1659. op0 = mthca_opcode[wr->opcode];
  1660. }
  1661. ++ind;
  1662. if (unlikely(ind >= qp->sq.max))
  1663. ind -= qp->sq.max;
  1664. }
  1665. out:
  1666. if (likely(nreq)) {
  1667. __be32 doorbell[2];
  1668. doorbell[0] = cpu_to_be32((nreq << 24) |
  1669. ((qp->sq.head & 0xffff) << 8) |
  1670. f0 | op0);
  1671. doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
  1672. qp->sq.head += nreq;
  1673. /*
  1674. * Make sure that descriptors are written before
  1675. * doorbell record.
  1676. */
  1677. wmb();
  1678. *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
  1679. /*
  1680. * Make sure doorbell record is written before we
  1681. * write MMIO send doorbell.
  1682. */
  1683. wmb();
  1684. mthca_write64(doorbell,
  1685. dev->kar + MTHCA_SEND_DOORBELL,
  1686. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  1687. }
  1688. spin_unlock_irqrestore(&qp->sq.lock, flags);
  1689. return err;
  1690. }
  1691. int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  1692. struct ib_recv_wr **bad_wr)
  1693. {
  1694. struct mthca_dev *dev = to_mdev(ibqp->device);
  1695. struct mthca_qp *qp = to_mqp(ibqp);
  1696. unsigned long flags;
  1697. int err = 0;
  1698. int nreq;
  1699. int ind;
  1700. int i;
  1701. void *wqe;
  1702. spin_lock_irqsave(&qp->rq.lock, flags);
  1703. /* XXX check that state is OK to post receive */
  1704. ind = qp->rq.head & (qp->rq.max - 1);
  1705. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  1706. if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
  1707. mthca_err(dev, "RQ %06x full (%u head, %u tail,"
  1708. " %d max, %d nreq)\n", qp->qpn,
  1709. qp->rq.head, qp->rq.tail,
  1710. qp->rq.max, nreq);
  1711. err = -ENOMEM;
  1712. *bad_wr = wr;
  1713. goto out;
  1714. }
  1715. wqe = get_recv_wqe(qp, ind);
  1716. ((struct mthca_next_seg *) wqe)->flags = 0;
  1717. wqe += sizeof (struct mthca_next_seg);
  1718. if (unlikely(wr->num_sge > qp->rq.max_gs)) {
  1719. err = -EINVAL;
  1720. *bad_wr = wr;
  1721. goto out;
  1722. }
  1723. for (i = 0; i < wr->num_sge; ++i) {
  1724. ((struct mthca_data_seg *) wqe)->byte_count =
  1725. cpu_to_be32(wr->sg_list[i].length);
  1726. ((struct mthca_data_seg *) wqe)->lkey =
  1727. cpu_to_be32(wr->sg_list[i].lkey);
  1728. ((struct mthca_data_seg *) wqe)->addr =
  1729. cpu_to_be64(wr->sg_list[i].addr);
  1730. wqe += sizeof (struct mthca_data_seg);
  1731. }
  1732. if (i < qp->rq.max_gs) {
  1733. ((struct mthca_data_seg *) wqe)->byte_count = 0;
  1734. ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
  1735. ((struct mthca_data_seg *) wqe)->addr = 0;
  1736. }
  1737. qp->wrid[ind] = wr->wr_id;
  1738. ++ind;
  1739. if (unlikely(ind >= qp->rq.max))
  1740. ind -= qp->rq.max;
  1741. }
  1742. out:
  1743. if (likely(nreq)) {
  1744. qp->rq.head += nreq;
  1745. /*
  1746. * Make sure that descriptors are written before
  1747. * doorbell record.
  1748. */
  1749. wmb();
  1750. *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
  1751. }
  1752. spin_unlock_irqrestore(&qp->rq.lock, flags);
  1753. return err;
  1754. }
  1755. int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
  1756. int index, int *dbd, __be32 *new_wqe)
  1757. {
  1758. struct mthca_next_seg *next;
  1759. /*
  1760. * For SRQs, all WQEs generate a CQE, so we're always at the
  1761. * end of the doorbell chain.
  1762. */
  1763. if (qp->ibqp.srq) {
  1764. *new_wqe = 0;
  1765. return 0;
  1766. }
  1767. if (is_send)
  1768. next = get_send_wqe(qp, index);
  1769. else
  1770. next = get_recv_wqe(qp, index);
  1771. *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
  1772. if (next->ee_nds & cpu_to_be32(0x3f))
  1773. *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
  1774. (next->ee_nds & cpu_to_be32(0x3f));
  1775. else
  1776. *new_wqe = 0;
  1777. return 0;
  1778. }
  1779. int __devinit mthca_init_qp_table(struct mthca_dev *dev)
  1780. {
  1781. int err;
  1782. u8 status;
  1783. int i;
  1784. spin_lock_init(&dev->qp_table.lock);
  1785. /*
  1786. * We reserve 2 extra QPs per port for the special QPs. The
  1787. * special QP for port 1 has to be even, so round up.
  1788. */
  1789. dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
  1790. err = mthca_alloc_init(&dev->qp_table.alloc,
  1791. dev->limits.num_qps,
  1792. (1 << 24) - 1,
  1793. dev->qp_table.sqp_start +
  1794. MTHCA_MAX_PORTS * 2);
  1795. if (err)
  1796. return err;
  1797. err = mthca_array_init(&dev->qp_table.qp,
  1798. dev->limits.num_qps);
  1799. if (err) {
  1800. mthca_alloc_cleanup(&dev->qp_table.alloc);
  1801. return err;
  1802. }
  1803. for (i = 0; i < 2; ++i) {
  1804. err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
  1805. dev->qp_table.sqp_start + i * 2,
  1806. &status);
  1807. if (err)
  1808. goto err_out;
  1809. if (status) {
  1810. mthca_warn(dev, "CONF_SPECIAL_QP returned "
  1811. "status %02x, aborting.\n",
  1812. status);
  1813. err = -EINVAL;
  1814. goto err_out;
  1815. }
  1816. }
  1817. return 0;
  1818. err_out:
  1819. for (i = 0; i < 2; ++i)
  1820. mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
  1821. mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
  1822. mthca_alloc_cleanup(&dev->qp_table.alloc);
  1823. return err;
  1824. }
  1825. void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
  1826. {
  1827. int i;
  1828. u8 status;
  1829. for (i = 0; i < 2; ++i)
  1830. mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
  1831. mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
  1832. mthca_alloc_cleanup(&dev->qp_table.alloc);
  1833. }