qib_verbs.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  3. * All rights reserved.
  4. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <rdma/ib_mad.h>
  35. #include <rdma/ib_user_verbs.h>
  36. #include <linux/io.h>
  37. #include <linux/utsname.h>
  38. #include <linux/rculist.h>
  39. #include <linux/mm.h>
  40. #include <linux/random.h>
  41. #include "qib.h"
  42. #include "qib_common.h"
  43. static unsigned int ib_qib_qp_table_size = 256;
  44. module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
  45. MODULE_PARM_DESC(qp_table_size, "QP table size");
  46. unsigned int ib_qib_lkey_table_size = 16;
  47. module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
  48. S_IRUGO);
  49. MODULE_PARM_DESC(lkey_table_size,
  50. "LKEY table size in bits (2^n, 1 <= n <= 23)");
  51. static unsigned int ib_qib_max_pds = 0xFFFF;
  52. module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
  53. MODULE_PARM_DESC(max_pds,
  54. "Maximum number of protection domains to support");
  55. static unsigned int ib_qib_max_ahs = 0xFFFF;
  56. module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
  57. MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
  58. unsigned int ib_qib_max_cqes = 0x2FFFF;
  59. module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
  60. MODULE_PARM_DESC(max_cqes,
  61. "Maximum number of completion queue entries to support");
  62. unsigned int ib_qib_max_cqs = 0x1FFFF;
  63. module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
  64. MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
  65. unsigned int ib_qib_max_qp_wrs = 0x3FFF;
  66. module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
  67. MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
  68. unsigned int ib_qib_max_qps = 16384;
  69. module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
  70. MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
  71. unsigned int ib_qib_max_sges = 0x60;
  72. module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
  73. MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
  74. unsigned int ib_qib_max_mcast_grps = 16384;
  75. module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
  76. MODULE_PARM_DESC(max_mcast_grps,
  77. "Maximum number of multicast groups to support");
  78. unsigned int ib_qib_max_mcast_qp_attached = 16;
  79. module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
  80. uint, S_IRUGO);
  81. MODULE_PARM_DESC(max_mcast_qp_attached,
  82. "Maximum number of attached QPs to support");
  83. unsigned int ib_qib_max_srqs = 1024;
  84. module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
  85. MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
  86. unsigned int ib_qib_max_srq_sges = 128;
  87. module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
  88. MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
  89. unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
  90. module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
  91. MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
  92. static unsigned int ib_qib_disable_sma;
  93. module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
  94. MODULE_PARM_DESC(disable_sma, "Disable the SMA");
  95. /*
  96. * Note that it is OK to post send work requests in the SQE and ERR
  97. * states; qib_do_send() will process them and generate error
  98. * completions as per IB 1.2 C10-96.
  99. */
  100. const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
  101. [IB_QPS_RESET] = 0,
  102. [IB_QPS_INIT] = QIB_POST_RECV_OK,
  103. [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
  104. [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
  105. QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
  106. QIB_PROCESS_NEXT_SEND_OK,
  107. [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
  108. QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
  109. [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
  110. QIB_POST_SEND_OK | QIB_FLUSH_SEND,
  111. [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
  112. QIB_POST_SEND_OK | QIB_FLUSH_SEND,
  113. };
  114. struct qib_ucontext {
  115. struct ib_ucontext ibucontext;
  116. };
  117. static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
  118. *ibucontext)
  119. {
  120. return container_of(ibucontext, struct qib_ucontext, ibucontext);
  121. }
  122. /*
  123. * Translate ib_wr_opcode into ib_wc_opcode.
  124. */
  125. const enum ib_wc_opcode ib_qib_wc_opcode[] = {
  126. [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
  127. [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
  128. [IB_WR_SEND] = IB_WC_SEND,
  129. [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
  130. [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
  131. [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
  132. [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
  133. };
  134. /*
  135. * System image GUID.
  136. */
  137. __be64 ib_qib_sys_image_guid;
  138. /**
  139. * qib_copy_sge - copy data to SGE memory
  140. * @ss: the SGE state
  141. * @data: the data to copy
  142. * @length: the length of the data
  143. */
  144. void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
  145. {
  146. struct qib_sge *sge = &ss->sge;
  147. while (length) {
  148. u32 len = sge->length;
  149. if (len > length)
  150. len = length;
  151. if (len > sge->sge_length)
  152. len = sge->sge_length;
  153. BUG_ON(len == 0);
  154. memcpy(sge->vaddr, data, len);
  155. sge->vaddr += len;
  156. sge->length -= len;
  157. sge->sge_length -= len;
  158. if (sge->sge_length == 0) {
  159. if (release)
  160. atomic_dec(&sge->mr->refcount);
  161. if (--ss->num_sge)
  162. *sge = *ss->sg_list++;
  163. } else if (sge->length == 0 && sge->mr->lkey) {
  164. if (++sge->n >= QIB_SEGSZ) {
  165. if (++sge->m >= sge->mr->mapsz)
  166. break;
  167. sge->n = 0;
  168. }
  169. sge->vaddr =
  170. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  171. sge->length =
  172. sge->mr->map[sge->m]->segs[sge->n].length;
  173. }
  174. data += len;
  175. length -= len;
  176. }
  177. }
  178. /**
  179. * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
  180. * @ss: the SGE state
  181. * @length: the number of bytes to skip
  182. */
  183. void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
  184. {
  185. struct qib_sge *sge = &ss->sge;
  186. while (length) {
  187. u32 len = sge->length;
  188. if (len > length)
  189. len = length;
  190. if (len > sge->sge_length)
  191. len = sge->sge_length;
  192. BUG_ON(len == 0);
  193. sge->vaddr += len;
  194. sge->length -= len;
  195. sge->sge_length -= len;
  196. if (sge->sge_length == 0) {
  197. if (release)
  198. atomic_dec(&sge->mr->refcount);
  199. if (--ss->num_sge)
  200. *sge = *ss->sg_list++;
  201. } else if (sge->length == 0 && sge->mr->lkey) {
  202. if (++sge->n >= QIB_SEGSZ) {
  203. if (++sge->m >= sge->mr->mapsz)
  204. break;
  205. sge->n = 0;
  206. }
  207. sge->vaddr =
  208. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  209. sge->length =
  210. sge->mr->map[sge->m]->segs[sge->n].length;
  211. }
  212. length -= len;
  213. }
  214. }
  215. /*
  216. * Count the number of DMA descriptors needed to send length bytes of data.
  217. * Don't modify the qib_sge_state to get the count.
  218. * Return zero if any of the segments is not aligned.
  219. */
  220. static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
  221. {
  222. struct qib_sge *sg_list = ss->sg_list;
  223. struct qib_sge sge = ss->sge;
  224. u8 num_sge = ss->num_sge;
  225. u32 ndesc = 1; /* count the header */
  226. while (length) {
  227. u32 len = sge.length;
  228. if (len > length)
  229. len = length;
  230. if (len > sge.sge_length)
  231. len = sge.sge_length;
  232. BUG_ON(len == 0);
  233. if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
  234. (len != length && (len & (sizeof(u32) - 1)))) {
  235. ndesc = 0;
  236. break;
  237. }
  238. ndesc++;
  239. sge.vaddr += len;
  240. sge.length -= len;
  241. sge.sge_length -= len;
  242. if (sge.sge_length == 0) {
  243. if (--num_sge)
  244. sge = *sg_list++;
  245. } else if (sge.length == 0 && sge.mr->lkey) {
  246. if (++sge.n >= QIB_SEGSZ) {
  247. if (++sge.m >= sge.mr->mapsz)
  248. break;
  249. sge.n = 0;
  250. }
  251. sge.vaddr =
  252. sge.mr->map[sge.m]->segs[sge.n].vaddr;
  253. sge.length =
  254. sge.mr->map[sge.m]->segs[sge.n].length;
  255. }
  256. length -= len;
  257. }
  258. return ndesc;
  259. }
  260. /*
  261. * Copy from the SGEs to the data buffer.
  262. */
  263. static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
  264. {
  265. struct qib_sge *sge = &ss->sge;
  266. while (length) {
  267. u32 len = sge->length;
  268. if (len > length)
  269. len = length;
  270. if (len > sge->sge_length)
  271. len = sge->sge_length;
  272. BUG_ON(len == 0);
  273. memcpy(data, sge->vaddr, len);
  274. sge->vaddr += len;
  275. sge->length -= len;
  276. sge->sge_length -= len;
  277. if (sge->sge_length == 0) {
  278. if (--ss->num_sge)
  279. *sge = *ss->sg_list++;
  280. } else if (sge->length == 0 && sge->mr->lkey) {
  281. if (++sge->n >= QIB_SEGSZ) {
  282. if (++sge->m >= sge->mr->mapsz)
  283. break;
  284. sge->n = 0;
  285. }
  286. sge->vaddr =
  287. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  288. sge->length =
  289. sge->mr->map[sge->m]->segs[sge->n].length;
  290. }
  291. data += len;
  292. length -= len;
  293. }
  294. }
  295. /**
  296. * qib_post_one_send - post one RC, UC, or UD send work request
  297. * @qp: the QP to post on
  298. * @wr: the work request to send
  299. */
  300. static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
  301. {
  302. struct qib_swqe *wqe;
  303. u32 next;
  304. int i;
  305. int j;
  306. int acc;
  307. int ret;
  308. unsigned long flags;
  309. struct qib_lkey_table *rkt;
  310. struct qib_pd *pd;
  311. spin_lock_irqsave(&qp->s_lock, flags);
  312. /* Check that state is OK to post send. */
  313. if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
  314. goto bail_inval;
  315. /* IB spec says that num_sge == 0 is OK. */
  316. if (wr->num_sge > qp->s_max_sge)
  317. goto bail_inval;
  318. /*
  319. * Don't allow RDMA reads or atomic operations on UC or
  320. * undefined operations.
  321. * Make sure buffer is large enough to hold the result for atomics.
  322. */
  323. if (wr->opcode == IB_WR_FAST_REG_MR) {
  324. if (qib_fast_reg_mr(qp, wr))
  325. goto bail_inval;
  326. } else if (qp->ibqp.qp_type == IB_QPT_UC) {
  327. if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
  328. goto bail_inval;
  329. } else if (qp->ibqp.qp_type != IB_QPT_RC) {
  330. /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
  331. if (wr->opcode != IB_WR_SEND &&
  332. wr->opcode != IB_WR_SEND_WITH_IMM)
  333. goto bail_inval;
  334. /* Check UD destination address PD */
  335. if (qp->ibqp.pd != wr->wr.ud.ah->pd)
  336. goto bail_inval;
  337. } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
  338. goto bail_inval;
  339. else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
  340. (wr->num_sge == 0 ||
  341. wr->sg_list[0].length < sizeof(u64) ||
  342. wr->sg_list[0].addr & (sizeof(u64) - 1)))
  343. goto bail_inval;
  344. else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
  345. goto bail_inval;
  346. next = qp->s_head + 1;
  347. if (next >= qp->s_size)
  348. next = 0;
  349. if (next == qp->s_last) {
  350. ret = -ENOMEM;
  351. goto bail;
  352. }
  353. rkt = &to_idev(qp->ibqp.device)->lk_table;
  354. pd = to_ipd(qp->ibqp.pd);
  355. wqe = get_swqe_ptr(qp, qp->s_head);
  356. wqe->wr = *wr;
  357. wqe->length = 0;
  358. j = 0;
  359. if (wr->num_sge) {
  360. acc = wr->opcode >= IB_WR_RDMA_READ ?
  361. IB_ACCESS_LOCAL_WRITE : 0;
  362. for (i = 0; i < wr->num_sge; i++) {
  363. u32 length = wr->sg_list[i].length;
  364. int ok;
  365. if (length == 0)
  366. continue;
  367. ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
  368. &wr->sg_list[i], acc);
  369. if (!ok)
  370. goto bail_inval_free;
  371. wqe->length += length;
  372. j++;
  373. }
  374. wqe->wr.num_sge = j;
  375. }
  376. if (qp->ibqp.qp_type == IB_QPT_UC ||
  377. qp->ibqp.qp_type == IB_QPT_RC) {
  378. if (wqe->length > 0x80000000U)
  379. goto bail_inval_free;
  380. } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
  381. qp->port_num - 1)->ibmtu)
  382. goto bail_inval_free;
  383. else
  384. atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
  385. wqe->ssn = qp->s_ssn++;
  386. qp->s_head = next;
  387. ret = 0;
  388. goto bail;
  389. bail_inval_free:
  390. while (j) {
  391. struct qib_sge *sge = &wqe->sg_list[--j];
  392. atomic_dec(&sge->mr->refcount);
  393. }
  394. bail_inval:
  395. ret = -EINVAL;
  396. bail:
  397. spin_unlock_irqrestore(&qp->s_lock, flags);
  398. return ret;
  399. }
  400. /**
  401. * qib_post_send - post a send on a QP
  402. * @ibqp: the QP to post the send on
  403. * @wr: the list of work requests to post
  404. * @bad_wr: the first bad WR is put here
  405. *
  406. * This may be called from interrupt context.
  407. */
  408. static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  409. struct ib_send_wr **bad_wr)
  410. {
  411. struct qib_qp *qp = to_iqp(ibqp);
  412. int err = 0;
  413. for (; wr; wr = wr->next) {
  414. err = qib_post_one_send(qp, wr);
  415. if (err) {
  416. *bad_wr = wr;
  417. goto bail;
  418. }
  419. }
  420. /* Try to do the send work in the caller's context. */
  421. qib_do_send(&qp->s_work);
  422. bail:
  423. return err;
  424. }
  425. /**
  426. * qib_post_receive - post a receive on a QP
  427. * @ibqp: the QP to post the receive on
  428. * @wr: the WR to post
  429. * @bad_wr: the first bad WR is put here
  430. *
  431. * This may be called from interrupt context.
  432. */
  433. static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  434. struct ib_recv_wr **bad_wr)
  435. {
  436. struct qib_qp *qp = to_iqp(ibqp);
  437. struct qib_rwq *wq = qp->r_rq.wq;
  438. unsigned long flags;
  439. int ret;
  440. /* Check that state is OK to post receive. */
  441. if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
  442. *bad_wr = wr;
  443. ret = -EINVAL;
  444. goto bail;
  445. }
  446. for (; wr; wr = wr->next) {
  447. struct qib_rwqe *wqe;
  448. u32 next;
  449. int i;
  450. if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
  451. *bad_wr = wr;
  452. ret = -EINVAL;
  453. goto bail;
  454. }
  455. spin_lock_irqsave(&qp->r_rq.lock, flags);
  456. next = wq->head + 1;
  457. if (next >= qp->r_rq.size)
  458. next = 0;
  459. if (next == wq->tail) {
  460. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  461. *bad_wr = wr;
  462. ret = -ENOMEM;
  463. goto bail;
  464. }
  465. wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
  466. wqe->wr_id = wr->wr_id;
  467. wqe->num_sge = wr->num_sge;
  468. for (i = 0; i < wr->num_sge; i++)
  469. wqe->sg_list[i] = wr->sg_list[i];
  470. /* Make sure queue entry is written before the head index. */
  471. smp_wmb();
  472. wq->head = next;
  473. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  474. }
  475. ret = 0;
  476. bail:
  477. return ret;
  478. }
  479. /**
  480. * qib_qp_rcv - processing an incoming packet on a QP
  481. * @rcd: the context pointer
  482. * @hdr: the packet header
  483. * @has_grh: true if the packet has a GRH
  484. * @data: the packet data
  485. * @tlen: the packet length
  486. * @qp: the QP the packet came on
  487. *
  488. * This is called from qib_ib_rcv() to process an incoming packet
  489. * for the given QP.
  490. * Called at interrupt level.
  491. */
  492. static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
  493. int has_grh, void *data, u32 tlen, struct qib_qp *qp)
  494. {
  495. struct qib_ibport *ibp = &rcd->ppd->ibport_data;
  496. spin_lock(&qp->r_lock);
  497. /* Check for valid receive state. */
  498. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
  499. ibp->n_pkt_drops++;
  500. goto unlock;
  501. }
  502. switch (qp->ibqp.qp_type) {
  503. case IB_QPT_SMI:
  504. case IB_QPT_GSI:
  505. if (ib_qib_disable_sma)
  506. break;
  507. /* FALLTHROUGH */
  508. case IB_QPT_UD:
  509. qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
  510. break;
  511. case IB_QPT_RC:
  512. qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
  513. break;
  514. case IB_QPT_UC:
  515. qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
  516. break;
  517. default:
  518. break;
  519. }
  520. unlock:
  521. spin_unlock(&qp->r_lock);
  522. }
  523. /**
  524. * qib_ib_rcv - process an incoming packet
  525. * @rcd: the context pointer
  526. * @rhdr: the header of the packet
  527. * @data: the packet payload
  528. * @tlen: the packet length
  529. *
  530. * This is called from qib_kreceive() to process an incoming packet at
  531. * interrupt level. Tlen is the length of the header + data + CRC in bytes.
  532. */
  533. void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
  534. {
  535. struct qib_pportdata *ppd = rcd->ppd;
  536. struct qib_ibport *ibp = &ppd->ibport_data;
  537. struct qib_ib_header *hdr = rhdr;
  538. struct qib_other_headers *ohdr;
  539. struct qib_qp *qp;
  540. u32 qp_num;
  541. int lnh;
  542. u8 opcode;
  543. u16 lid;
  544. /* 24 == LRH+BTH+CRC */
  545. if (unlikely(tlen < 24))
  546. goto drop;
  547. /* Check for a valid destination LID (see ch. 7.11.1). */
  548. lid = be16_to_cpu(hdr->lrh[1]);
  549. if (lid < QIB_MULTICAST_LID_BASE) {
  550. lid &= ~((1 << ppd->lmc) - 1);
  551. if (unlikely(lid != ppd->lid))
  552. goto drop;
  553. }
  554. /* Check for GRH */
  555. lnh = be16_to_cpu(hdr->lrh[0]) & 3;
  556. if (lnh == QIB_LRH_BTH)
  557. ohdr = &hdr->u.oth;
  558. else if (lnh == QIB_LRH_GRH) {
  559. u32 vtf;
  560. ohdr = &hdr->u.l.oth;
  561. if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
  562. goto drop;
  563. vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
  564. if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
  565. goto drop;
  566. } else
  567. goto drop;
  568. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  569. ibp->opstats[opcode & 0x7f].n_bytes += tlen;
  570. ibp->opstats[opcode & 0x7f].n_packets++;
  571. /* Get the destination QP number. */
  572. qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
  573. if (qp_num == QIB_MULTICAST_QPN) {
  574. struct qib_mcast *mcast;
  575. struct qib_mcast_qp *p;
  576. if (lnh != QIB_LRH_GRH)
  577. goto drop;
  578. mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
  579. if (mcast == NULL)
  580. goto drop;
  581. ibp->n_multicast_rcv++;
  582. list_for_each_entry_rcu(p, &mcast->qp_list, list)
  583. qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
  584. /*
  585. * Notify qib_multicast_detach() if it is waiting for us
  586. * to finish.
  587. */
  588. if (atomic_dec_return(&mcast->refcount) <= 1)
  589. wake_up(&mcast->wait);
  590. } else {
  591. if (rcd->lookaside_qp) {
  592. if (rcd->lookaside_qpn != qp_num) {
  593. if (atomic_dec_and_test(
  594. &rcd->lookaside_qp->refcount))
  595. wake_up(
  596. &rcd->lookaside_qp->wait);
  597. rcd->lookaside_qp = NULL;
  598. }
  599. }
  600. if (!rcd->lookaside_qp) {
  601. qp = qib_lookup_qpn(ibp, qp_num);
  602. if (!qp)
  603. goto drop;
  604. rcd->lookaside_qp = qp;
  605. rcd->lookaside_qpn = qp_num;
  606. } else
  607. qp = rcd->lookaside_qp;
  608. ibp->n_unicast_rcv++;
  609. qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
  610. }
  611. return;
  612. drop:
  613. ibp->n_pkt_drops++;
  614. }
  615. /*
  616. * This is called from a timer to check for QPs
  617. * which need kernel memory in order to send a packet.
  618. */
  619. static void mem_timer(unsigned long data)
  620. {
  621. struct qib_ibdev *dev = (struct qib_ibdev *) data;
  622. struct list_head *list = &dev->memwait;
  623. struct qib_qp *qp = NULL;
  624. unsigned long flags;
  625. spin_lock_irqsave(&dev->pending_lock, flags);
  626. if (!list_empty(list)) {
  627. qp = list_entry(list->next, struct qib_qp, iowait);
  628. list_del_init(&qp->iowait);
  629. atomic_inc(&qp->refcount);
  630. if (!list_empty(list))
  631. mod_timer(&dev->mem_timer, jiffies + 1);
  632. }
  633. spin_unlock_irqrestore(&dev->pending_lock, flags);
  634. if (qp) {
  635. spin_lock_irqsave(&qp->s_lock, flags);
  636. if (qp->s_flags & QIB_S_WAIT_KMEM) {
  637. qp->s_flags &= ~QIB_S_WAIT_KMEM;
  638. qib_schedule_send(qp);
  639. }
  640. spin_unlock_irqrestore(&qp->s_lock, flags);
  641. if (atomic_dec_and_test(&qp->refcount))
  642. wake_up(&qp->wait);
  643. }
  644. }
  645. static void update_sge(struct qib_sge_state *ss, u32 length)
  646. {
  647. struct qib_sge *sge = &ss->sge;
  648. sge->vaddr += length;
  649. sge->length -= length;
  650. sge->sge_length -= length;
  651. if (sge->sge_length == 0) {
  652. if (--ss->num_sge)
  653. *sge = *ss->sg_list++;
  654. } else if (sge->length == 0 && sge->mr->lkey) {
  655. if (++sge->n >= QIB_SEGSZ) {
  656. if (++sge->m >= sge->mr->mapsz)
  657. return;
  658. sge->n = 0;
  659. }
  660. sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
  661. sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
  662. }
  663. }
  664. #ifdef __LITTLE_ENDIAN
  665. static inline u32 get_upper_bits(u32 data, u32 shift)
  666. {
  667. return data >> shift;
  668. }
  669. static inline u32 set_upper_bits(u32 data, u32 shift)
  670. {
  671. return data << shift;
  672. }
  673. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  674. {
  675. data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
  676. data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  677. return data;
  678. }
  679. #else
  680. static inline u32 get_upper_bits(u32 data, u32 shift)
  681. {
  682. return data << shift;
  683. }
  684. static inline u32 set_upper_bits(u32 data, u32 shift)
  685. {
  686. return data >> shift;
  687. }
  688. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  689. {
  690. data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
  691. data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  692. return data;
  693. }
  694. #endif
  695. static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
  696. u32 length, unsigned flush_wc)
  697. {
  698. u32 extra = 0;
  699. u32 data = 0;
  700. u32 last;
  701. while (1) {
  702. u32 len = ss->sge.length;
  703. u32 off;
  704. if (len > length)
  705. len = length;
  706. if (len > ss->sge.sge_length)
  707. len = ss->sge.sge_length;
  708. BUG_ON(len == 0);
  709. /* If the source address is not aligned, try to align it. */
  710. off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
  711. if (off) {
  712. u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
  713. ~(sizeof(u32) - 1));
  714. u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
  715. u32 y;
  716. y = sizeof(u32) - off;
  717. if (len > y)
  718. len = y;
  719. if (len + extra >= sizeof(u32)) {
  720. data |= set_upper_bits(v, extra *
  721. BITS_PER_BYTE);
  722. len = sizeof(u32) - extra;
  723. if (len == length) {
  724. last = data;
  725. break;
  726. }
  727. __raw_writel(data, piobuf);
  728. piobuf++;
  729. extra = 0;
  730. data = 0;
  731. } else {
  732. /* Clear unused upper bytes */
  733. data |= clear_upper_bytes(v, len, extra);
  734. if (len == length) {
  735. last = data;
  736. break;
  737. }
  738. extra += len;
  739. }
  740. } else if (extra) {
  741. /* Source address is aligned. */
  742. u32 *addr = (u32 *) ss->sge.vaddr;
  743. int shift = extra * BITS_PER_BYTE;
  744. int ushift = 32 - shift;
  745. u32 l = len;
  746. while (l >= sizeof(u32)) {
  747. u32 v = *addr;
  748. data |= set_upper_bits(v, shift);
  749. __raw_writel(data, piobuf);
  750. data = get_upper_bits(v, ushift);
  751. piobuf++;
  752. addr++;
  753. l -= sizeof(u32);
  754. }
  755. /*
  756. * We still have 'extra' number of bytes leftover.
  757. */
  758. if (l) {
  759. u32 v = *addr;
  760. if (l + extra >= sizeof(u32)) {
  761. data |= set_upper_bits(v, shift);
  762. len -= l + extra - sizeof(u32);
  763. if (len == length) {
  764. last = data;
  765. break;
  766. }
  767. __raw_writel(data, piobuf);
  768. piobuf++;
  769. extra = 0;
  770. data = 0;
  771. } else {
  772. /* Clear unused upper bytes */
  773. data |= clear_upper_bytes(v, l, extra);
  774. if (len == length) {
  775. last = data;
  776. break;
  777. }
  778. extra += l;
  779. }
  780. } else if (len == length) {
  781. last = data;
  782. break;
  783. }
  784. } else if (len == length) {
  785. u32 w;
  786. /*
  787. * Need to round up for the last dword in the
  788. * packet.
  789. */
  790. w = (len + 3) >> 2;
  791. qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
  792. piobuf += w - 1;
  793. last = ((u32 *) ss->sge.vaddr)[w - 1];
  794. break;
  795. } else {
  796. u32 w = len >> 2;
  797. qib_pio_copy(piobuf, ss->sge.vaddr, w);
  798. piobuf += w;
  799. extra = len & (sizeof(u32) - 1);
  800. if (extra) {
  801. u32 v = ((u32 *) ss->sge.vaddr)[w];
  802. /* Clear unused upper bytes */
  803. data = clear_upper_bytes(v, extra, 0);
  804. }
  805. }
  806. update_sge(ss, len);
  807. length -= len;
  808. }
  809. /* Update address before sending packet. */
  810. update_sge(ss, length);
  811. if (flush_wc) {
  812. /* must flush early everything before trigger word */
  813. qib_flush_wc();
  814. __raw_writel(last, piobuf);
  815. /* be sure trigger word is written */
  816. qib_flush_wc();
  817. } else
  818. __raw_writel(last, piobuf);
  819. }
  820. static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
  821. struct qib_qp *qp, int *retp)
  822. {
  823. struct qib_verbs_txreq *tx;
  824. unsigned long flags;
  825. spin_lock_irqsave(&qp->s_lock, flags);
  826. spin_lock(&dev->pending_lock);
  827. if (!list_empty(&dev->txreq_free)) {
  828. struct list_head *l = dev->txreq_free.next;
  829. list_del(l);
  830. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  831. *retp = 0;
  832. } else {
  833. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
  834. list_empty(&qp->iowait)) {
  835. dev->n_txwait++;
  836. qp->s_flags |= QIB_S_WAIT_TX;
  837. list_add_tail(&qp->iowait, &dev->txwait);
  838. }
  839. tx = NULL;
  840. qp->s_flags &= ~QIB_S_BUSY;
  841. *retp = -EBUSY;
  842. }
  843. spin_unlock(&dev->pending_lock);
  844. spin_unlock_irqrestore(&qp->s_lock, flags);
  845. return tx;
  846. }
  847. void qib_put_txreq(struct qib_verbs_txreq *tx)
  848. {
  849. struct qib_ibdev *dev;
  850. struct qib_qp *qp;
  851. unsigned long flags;
  852. qp = tx->qp;
  853. dev = to_idev(qp->ibqp.device);
  854. if (atomic_dec_and_test(&qp->refcount))
  855. wake_up(&qp->wait);
  856. if (tx->mr) {
  857. atomic_dec(&tx->mr->refcount);
  858. tx->mr = NULL;
  859. }
  860. if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
  861. tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
  862. dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
  863. tx->txreq.addr, tx->hdr_dwords << 2,
  864. DMA_TO_DEVICE);
  865. kfree(tx->align_buf);
  866. }
  867. spin_lock_irqsave(&dev->pending_lock, flags);
  868. /* Put struct back on free list */
  869. list_add(&tx->txreq.list, &dev->txreq_free);
  870. if (!list_empty(&dev->txwait)) {
  871. /* Wake up first QP wanting a free struct */
  872. qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
  873. list_del_init(&qp->iowait);
  874. atomic_inc(&qp->refcount);
  875. spin_unlock_irqrestore(&dev->pending_lock, flags);
  876. spin_lock_irqsave(&qp->s_lock, flags);
  877. if (qp->s_flags & QIB_S_WAIT_TX) {
  878. qp->s_flags &= ~QIB_S_WAIT_TX;
  879. qib_schedule_send(qp);
  880. }
  881. spin_unlock_irqrestore(&qp->s_lock, flags);
  882. if (atomic_dec_and_test(&qp->refcount))
  883. wake_up(&qp->wait);
  884. } else
  885. spin_unlock_irqrestore(&dev->pending_lock, flags);
  886. }
  887. /*
  888. * This is called when there are send DMA descriptors that might be
  889. * available.
  890. *
  891. * This is called with ppd->sdma_lock held.
  892. */
  893. void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
  894. {
  895. struct qib_qp *qp, *nqp;
  896. struct qib_qp *qps[20];
  897. struct qib_ibdev *dev;
  898. unsigned i, n;
  899. n = 0;
  900. dev = &ppd->dd->verbs_dev;
  901. spin_lock(&dev->pending_lock);
  902. /* Search wait list for first QP wanting DMA descriptors. */
  903. list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
  904. if (qp->port_num != ppd->port)
  905. continue;
  906. if (n == ARRAY_SIZE(qps))
  907. break;
  908. if (qp->s_tx->txreq.sg_count > avail)
  909. break;
  910. avail -= qp->s_tx->txreq.sg_count;
  911. list_del_init(&qp->iowait);
  912. atomic_inc(&qp->refcount);
  913. qps[n++] = qp;
  914. }
  915. spin_unlock(&dev->pending_lock);
  916. for (i = 0; i < n; i++) {
  917. qp = qps[i];
  918. spin_lock(&qp->s_lock);
  919. if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
  920. qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
  921. qib_schedule_send(qp);
  922. }
  923. spin_unlock(&qp->s_lock);
  924. if (atomic_dec_and_test(&qp->refcount))
  925. wake_up(&qp->wait);
  926. }
  927. }
  928. /*
  929. * This is called with ppd->sdma_lock held.
  930. */
  931. static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
  932. {
  933. struct qib_verbs_txreq *tx =
  934. container_of(cookie, struct qib_verbs_txreq, txreq);
  935. struct qib_qp *qp = tx->qp;
  936. spin_lock(&qp->s_lock);
  937. if (tx->wqe)
  938. qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
  939. else if (qp->ibqp.qp_type == IB_QPT_RC) {
  940. struct qib_ib_header *hdr;
  941. if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
  942. hdr = &tx->align_buf->hdr;
  943. else {
  944. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  945. hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
  946. }
  947. qib_rc_send_complete(qp, hdr);
  948. }
  949. if (atomic_dec_and_test(&qp->s_dma_busy)) {
  950. if (qp->state == IB_QPS_RESET)
  951. wake_up(&qp->wait_dma);
  952. else if (qp->s_flags & QIB_S_WAIT_DMA) {
  953. qp->s_flags &= ~QIB_S_WAIT_DMA;
  954. qib_schedule_send(qp);
  955. }
  956. }
  957. spin_unlock(&qp->s_lock);
  958. qib_put_txreq(tx);
  959. }
  960. static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
  961. {
  962. unsigned long flags;
  963. int ret = 0;
  964. spin_lock_irqsave(&qp->s_lock, flags);
  965. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
  966. spin_lock(&dev->pending_lock);
  967. if (list_empty(&qp->iowait)) {
  968. if (list_empty(&dev->memwait))
  969. mod_timer(&dev->mem_timer, jiffies + 1);
  970. qp->s_flags |= QIB_S_WAIT_KMEM;
  971. list_add_tail(&qp->iowait, &dev->memwait);
  972. }
  973. spin_unlock(&dev->pending_lock);
  974. qp->s_flags &= ~QIB_S_BUSY;
  975. ret = -EBUSY;
  976. }
  977. spin_unlock_irqrestore(&qp->s_lock, flags);
  978. return ret;
  979. }
  980. static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
  981. u32 hdrwords, struct qib_sge_state *ss, u32 len,
  982. u32 plen, u32 dwords)
  983. {
  984. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  985. struct qib_devdata *dd = dd_from_dev(dev);
  986. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  987. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  988. struct qib_verbs_txreq *tx;
  989. struct qib_pio_header *phdr;
  990. u32 control;
  991. u32 ndesc;
  992. int ret;
  993. tx = qp->s_tx;
  994. if (tx) {
  995. qp->s_tx = NULL;
  996. /* resend previously constructed packet */
  997. ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
  998. goto bail;
  999. }
  1000. tx = get_txreq(dev, qp, &ret);
  1001. if (!tx)
  1002. goto bail;
  1003. control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
  1004. be16_to_cpu(hdr->lrh[0]) >> 12);
  1005. tx->qp = qp;
  1006. atomic_inc(&qp->refcount);
  1007. tx->wqe = qp->s_wqe;
  1008. tx->mr = qp->s_rdma_mr;
  1009. if (qp->s_rdma_mr)
  1010. qp->s_rdma_mr = NULL;
  1011. tx->txreq.callback = sdma_complete;
  1012. if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
  1013. tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
  1014. else
  1015. tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
  1016. if (plen + 1 > dd->piosize2kmax_dwords)
  1017. tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
  1018. if (len) {
  1019. /*
  1020. * Don't try to DMA if it takes more descriptors than
  1021. * the queue holds.
  1022. */
  1023. ndesc = qib_count_sge(ss, len);
  1024. if (ndesc >= ppd->sdma_descq_cnt)
  1025. ndesc = 0;
  1026. } else
  1027. ndesc = 1;
  1028. if (ndesc) {
  1029. phdr = &dev->pio_hdrs[tx->hdr_inx];
  1030. phdr->pbc[0] = cpu_to_le32(plen);
  1031. phdr->pbc[1] = cpu_to_le32(control);
  1032. memcpy(&phdr->hdr, hdr, hdrwords << 2);
  1033. tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
  1034. tx->txreq.sg_count = ndesc;
  1035. tx->txreq.addr = dev->pio_hdrs_phys +
  1036. tx->hdr_inx * sizeof(struct qib_pio_header);
  1037. tx->hdr_dwords = hdrwords + 2; /* add PBC length */
  1038. ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
  1039. goto bail;
  1040. }
  1041. /* Allocate a buffer and copy the header and payload to it. */
  1042. tx->hdr_dwords = plen + 1;
  1043. phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
  1044. if (!phdr)
  1045. goto err_tx;
  1046. phdr->pbc[0] = cpu_to_le32(plen);
  1047. phdr->pbc[1] = cpu_to_le32(control);
  1048. memcpy(&phdr->hdr, hdr, hdrwords << 2);
  1049. qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
  1050. tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
  1051. tx->hdr_dwords << 2, DMA_TO_DEVICE);
  1052. if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
  1053. goto map_err;
  1054. tx->align_buf = phdr;
  1055. tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
  1056. tx->txreq.sg_count = 1;
  1057. ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
  1058. goto unaligned;
  1059. map_err:
  1060. kfree(phdr);
  1061. err_tx:
  1062. qib_put_txreq(tx);
  1063. ret = wait_kmem(dev, qp);
  1064. unaligned:
  1065. ibp->n_unaligned++;
  1066. bail:
  1067. return ret;
  1068. }
  1069. /*
  1070. * If we are now in the error state, return zero to flush the
  1071. * send work request.
  1072. */
  1073. static int no_bufs_available(struct qib_qp *qp)
  1074. {
  1075. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  1076. struct qib_devdata *dd;
  1077. unsigned long flags;
  1078. int ret = 0;
  1079. /*
  1080. * Note that as soon as want_buffer() is called and
  1081. * possibly before it returns, qib_ib_piobufavail()
  1082. * could be called. Therefore, put QP on the I/O wait list before
  1083. * enabling the PIO avail interrupt.
  1084. */
  1085. spin_lock_irqsave(&qp->s_lock, flags);
  1086. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
  1087. spin_lock(&dev->pending_lock);
  1088. if (list_empty(&qp->iowait)) {
  1089. dev->n_piowait++;
  1090. qp->s_flags |= QIB_S_WAIT_PIO;
  1091. list_add_tail(&qp->iowait, &dev->piowait);
  1092. dd = dd_from_dev(dev);
  1093. dd->f_wantpiobuf_intr(dd, 1);
  1094. }
  1095. spin_unlock(&dev->pending_lock);
  1096. qp->s_flags &= ~QIB_S_BUSY;
  1097. ret = -EBUSY;
  1098. }
  1099. spin_unlock_irqrestore(&qp->s_lock, flags);
  1100. return ret;
  1101. }
  1102. static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
  1103. u32 hdrwords, struct qib_sge_state *ss, u32 len,
  1104. u32 plen, u32 dwords)
  1105. {
  1106. struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
  1107. struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
  1108. u32 *hdr = (u32 *) ibhdr;
  1109. u32 __iomem *piobuf_orig;
  1110. u32 __iomem *piobuf;
  1111. u64 pbc;
  1112. unsigned long flags;
  1113. unsigned flush_wc;
  1114. u32 control;
  1115. u32 pbufn;
  1116. control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
  1117. be16_to_cpu(ibhdr->lrh[0]) >> 12);
  1118. pbc = ((u64) control << 32) | plen;
  1119. piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
  1120. if (unlikely(piobuf == NULL))
  1121. return no_bufs_available(qp);
  1122. /*
  1123. * Write the pbc.
  1124. * We have to flush after the PBC for correctness on some cpus
  1125. * or WC buffer can be written out of order.
  1126. */
  1127. writeq(pbc, piobuf);
  1128. piobuf_orig = piobuf;
  1129. piobuf += 2;
  1130. flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
  1131. if (len == 0) {
  1132. /*
  1133. * If there is just the header portion, must flush before
  1134. * writing last word of header for correctness, and after
  1135. * the last header word (trigger word).
  1136. */
  1137. if (flush_wc) {
  1138. qib_flush_wc();
  1139. qib_pio_copy(piobuf, hdr, hdrwords - 1);
  1140. qib_flush_wc();
  1141. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
  1142. qib_flush_wc();
  1143. } else
  1144. qib_pio_copy(piobuf, hdr, hdrwords);
  1145. goto done;
  1146. }
  1147. if (flush_wc)
  1148. qib_flush_wc();
  1149. qib_pio_copy(piobuf, hdr, hdrwords);
  1150. piobuf += hdrwords;
  1151. /* The common case is aligned and contained in one segment. */
  1152. if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
  1153. !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
  1154. u32 *addr = (u32 *) ss->sge.vaddr;
  1155. /* Update address before sending packet. */
  1156. update_sge(ss, len);
  1157. if (flush_wc) {
  1158. qib_pio_copy(piobuf, addr, dwords - 1);
  1159. /* must flush early everything before trigger word */
  1160. qib_flush_wc();
  1161. __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
  1162. /* be sure trigger word is written */
  1163. qib_flush_wc();
  1164. } else
  1165. qib_pio_copy(piobuf, addr, dwords);
  1166. goto done;
  1167. }
  1168. copy_io(piobuf, ss, len, flush_wc);
  1169. done:
  1170. if (dd->flags & QIB_USE_SPCL_TRIG) {
  1171. u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
  1172. qib_flush_wc();
  1173. __raw_writel(0xaebecede, piobuf_orig + spcl_off);
  1174. }
  1175. qib_sendbuf_done(dd, pbufn);
  1176. if (qp->s_rdma_mr) {
  1177. atomic_dec(&qp->s_rdma_mr->refcount);
  1178. qp->s_rdma_mr = NULL;
  1179. }
  1180. if (qp->s_wqe) {
  1181. spin_lock_irqsave(&qp->s_lock, flags);
  1182. qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
  1183. spin_unlock_irqrestore(&qp->s_lock, flags);
  1184. } else if (qp->ibqp.qp_type == IB_QPT_RC) {
  1185. spin_lock_irqsave(&qp->s_lock, flags);
  1186. qib_rc_send_complete(qp, ibhdr);
  1187. spin_unlock_irqrestore(&qp->s_lock, flags);
  1188. }
  1189. return 0;
  1190. }
  1191. /**
  1192. * qib_verbs_send - send a packet
  1193. * @qp: the QP to send on
  1194. * @hdr: the packet header
  1195. * @hdrwords: the number of 32-bit words in the header
  1196. * @ss: the SGE to send
  1197. * @len: the length of the packet in bytes
  1198. *
  1199. * Return zero if packet is sent or queued OK.
  1200. * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
  1201. */
  1202. int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
  1203. u32 hdrwords, struct qib_sge_state *ss, u32 len)
  1204. {
  1205. struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
  1206. u32 plen;
  1207. int ret;
  1208. u32 dwords = (len + 3) >> 2;
  1209. /*
  1210. * Calculate the send buffer trigger address.
  1211. * The +1 counts for the pbc control dword following the pbc length.
  1212. */
  1213. plen = hdrwords + dwords + 1;
  1214. /*
  1215. * VL15 packets (IB_QPT_SMI) will always use PIO, so we
  1216. * can defer SDMA restart until link goes ACTIVE without
  1217. * worrying about just how we got there.
  1218. */
  1219. if (qp->ibqp.qp_type == IB_QPT_SMI ||
  1220. !(dd->flags & QIB_HAS_SEND_DMA))
  1221. ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
  1222. plen, dwords);
  1223. else
  1224. ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
  1225. plen, dwords);
  1226. return ret;
  1227. }
  1228. int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
  1229. u64 *rwords, u64 *spkts, u64 *rpkts,
  1230. u64 *xmit_wait)
  1231. {
  1232. int ret;
  1233. struct qib_devdata *dd = ppd->dd;
  1234. if (!(dd->flags & QIB_PRESENT)) {
  1235. /* no hardware, freeze, etc. */
  1236. ret = -EINVAL;
  1237. goto bail;
  1238. }
  1239. *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
  1240. *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
  1241. *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
  1242. *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
  1243. *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
  1244. ret = 0;
  1245. bail:
  1246. return ret;
  1247. }
  1248. /**
  1249. * qib_get_counters - get various chip counters
  1250. * @dd: the qlogic_ib device
  1251. * @cntrs: counters are placed here
  1252. *
  1253. * Return the counters needed by recv_pma_get_portcounters().
  1254. */
  1255. int qib_get_counters(struct qib_pportdata *ppd,
  1256. struct qib_verbs_counters *cntrs)
  1257. {
  1258. int ret;
  1259. if (!(ppd->dd->flags & QIB_PRESENT)) {
  1260. /* no hardware, freeze, etc. */
  1261. ret = -EINVAL;
  1262. goto bail;
  1263. }
  1264. cntrs->symbol_error_counter =
  1265. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
  1266. cntrs->link_error_recovery_counter =
  1267. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
  1268. /*
  1269. * The link downed counter counts when the other side downs the
  1270. * connection. We add in the number of times we downed the link
  1271. * due to local link integrity errors to compensate.
  1272. */
  1273. cntrs->link_downed_counter =
  1274. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
  1275. cntrs->port_rcv_errors =
  1276. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
  1277. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
  1278. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
  1279. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
  1280. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
  1281. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
  1282. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
  1283. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
  1284. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
  1285. cntrs->port_rcv_errors +=
  1286. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
  1287. cntrs->port_rcv_errors +=
  1288. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
  1289. cntrs->port_rcv_remphys_errors =
  1290. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
  1291. cntrs->port_xmit_discards =
  1292. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
  1293. cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
  1294. QIBPORTCNTR_WORDSEND);
  1295. cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
  1296. QIBPORTCNTR_WORDRCV);
  1297. cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
  1298. QIBPORTCNTR_PKTSEND);
  1299. cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
  1300. QIBPORTCNTR_PKTRCV);
  1301. cntrs->local_link_integrity_errors =
  1302. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
  1303. cntrs->excessive_buffer_overrun_errors =
  1304. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
  1305. cntrs->vl15_dropped =
  1306. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
  1307. ret = 0;
  1308. bail:
  1309. return ret;
  1310. }
  1311. /**
  1312. * qib_ib_piobufavail - callback when a PIO buffer is available
  1313. * @dd: the device pointer
  1314. *
  1315. * This is called from qib_intr() at interrupt level when a PIO buffer is
  1316. * available after qib_verbs_send() returned an error that no buffers were
  1317. * available. Disable the interrupt if there are no more QPs waiting.
  1318. */
  1319. void qib_ib_piobufavail(struct qib_devdata *dd)
  1320. {
  1321. struct qib_ibdev *dev = &dd->verbs_dev;
  1322. struct list_head *list;
  1323. struct qib_qp *qps[5];
  1324. struct qib_qp *qp;
  1325. unsigned long flags;
  1326. unsigned i, n;
  1327. list = &dev->piowait;
  1328. n = 0;
  1329. /*
  1330. * Note: checking that the piowait list is empty and clearing
  1331. * the buffer available interrupt needs to be atomic or we
  1332. * could end up with QPs on the wait list with the interrupt
  1333. * disabled.
  1334. */
  1335. spin_lock_irqsave(&dev->pending_lock, flags);
  1336. while (!list_empty(list)) {
  1337. if (n == ARRAY_SIZE(qps))
  1338. goto full;
  1339. qp = list_entry(list->next, struct qib_qp, iowait);
  1340. list_del_init(&qp->iowait);
  1341. atomic_inc(&qp->refcount);
  1342. qps[n++] = qp;
  1343. }
  1344. dd->f_wantpiobuf_intr(dd, 0);
  1345. full:
  1346. spin_unlock_irqrestore(&dev->pending_lock, flags);
  1347. for (i = 0; i < n; i++) {
  1348. qp = qps[i];
  1349. spin_lock_irqsave(&qp->s_lock, flags);
  1350. if (qp->s_flags & QIB_S_WAIT_PIO) {
  1351. qp->s_flags &= ~QIB_S_WAIT_PIO;
  1352. qib_schedule_send(qp);
  1353. }
  1354. spin_unlock_irqrestore(&qp->s_lock, flags);
  1355. /* Notify qib_destroy_qp() if it is waiting. */
  1356. if (atomic_dec_and_test(&qp->refcount))
  1357. wake_up(&qp->wait);
  1358. }
  1359. }
  1360. static int qib_query_device(struct ib_device *ibdev,
  1361. struct ib_device_attr *props)
  1362. {
  1363. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1364. struct qib_ibdev *dev = to_idev(ibdev);
  1365. memset(props, 0, sizeof(*props));
  1366. props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
  1367. IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
  1368. IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
  1369. IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
  1370. props->page_size_cap = PAGE_SIZE;
  1371. props->vendor_id =
  1372. QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
  1373. props->vendor_part_id = dd->deviceid;
  1374. props->hw_ver = dd->minrev;
  1375. props->sys_image_guid = ib_qib_sys_image_guid;
  1376. props->max_mr_size = ~0ULL;
  1377. props->max_qp = ib_qib_max_qps;
  1378. props->max_qp_wr = ib_qib_max_qp_wrs;
  1379. props->max_sge = ib_qib_max_sges;
  1380. props->max_cq = ib_qib_max_cqs;
  1381. props->max_ah = ib_qib_max_ahs;
  1382. props->max_cqe = ib_qib_max_cqes;
  1383. props->max_mr = dev->lk_table.max;
  1384. props->max_fmr = dev->lk_table.max;
  1385. props->max_map_per_fmr = 32767;
  1386. props->max_pd = ib_qib_max_pds;
  1387. props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
  1388. props->max_qp_init_rd_atom = 255;
  1389. /* props->max_res_rd_atom */
  1390. props->max_srq = ib_qib_max_srqs;
  1391. props->max_srq_wr = ib_qib_max_srq_wrs;
  1392. props->max_srq_sge = ib_qib_max_srq_sges;
  1393. /* props->local_ca_ack_delay */
  1394. props->atomic_cap = IB_ATOMIC_GLOB;
  1395. props->max_pkeys = qib_get_npkeys(dd);
  1396. props->max_mcast_grp = ib_qib_max_mcast_grps;
  1397. props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
  1398. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  1399. props->max_mcast_grp;
  1400. return 0;
  1401. }
  1402. static int qib_query_port(struct ib_device *ibdev, u8 port,
  1403. struct ib_port_attr *props)
  1404. {
  1405. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1406. struct qib_ibport *ibp = to_iport(ibdev, port);
  1407. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1408. enum ib_mtu mtu;
  1409. u16 lid = ppd->lid;
  1410. memset(props, 0, sizeof(*props));
  1411. props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
  1412. props->lmc = ppd->lmc;
  1413. props->sm_lid = ibp->sm_lid;
  1414. props->sm_sl = ibp->sm_sl;
  1415. props->state = dd->f_iblink_state(ppd->lastibcstat);
  1416. props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
  1417. props->port_cap_flags = ibp->port_cap_flags;
  1418. props->gid_tbl_len = QIB_GUIDS_PER_PORT;
  1419. props->max_msg_sz = 0x80000000;
  1420. props->pkey_tbl_len = qib_get_npkeys(dd);
  1421. props->bad_pkey_cntr = ibp->pkey_violations;
  1422. props->qkey_viol_cntr = ibp->qkey_violations;
  1423. props->active_width = ppd->link_width_active;
  1424. /* See rate_show() */
  1425. props->active_speed = ppd->link_speed_active;
  1426. props->max_vl_num = qib_num_vls(ppd->vls_supported);
  1427. props->init_type_reply = 0;
  1428. props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
  1429. switch (ppd->ibmtu) {
  1430. case 4096:
  1431. mtu = IB_MTU_4096;
  1432. break;
  1433. case 2048:
  1434. mtu = IB_MTU_2048;
  1435. break;
  1436. case 1024:
  1437. mtu = IB_MTU_1024;
  1438. break;
  1439. case 512:
  1440. mtu = IB_MTU_512;
  1441. break;
  1442. case 256:
  1443. mtu = IB_MTU_256;
  1444. break;
  1445. default:
  1446. mtu = IB_MTU_2048;
  1447. }
  1448. props->active_mtu = mtu;
  1449. props->subnet_timeout = ibp->subnet_timeout;
  1450. return 0;
  1451. }
  1452. static int qib_modify_device(struct ib_device *device,
  1453. int device_modify_mask,
  1454. struct ib_device_modify *device_modify)
  1455. {
  1456. struct qib_devdata *dd = dd_from_ibdev(device);
  1457. unsigned i;
  1458. int ret;
  1459. if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
  1460. IB_DEVICE_MODIFY_NODE_DESC)) {
  1461. ret = -EOPNOTSUPP;
  1462. goto bail;
  1463. }
  1464. if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
  1465. memcpy(device->node_desc, device_modify->node_desc, 64);
  1466. for (i = 0; i < dd->num_pports; i++) {
  1467. struct qib_ibport *ibp = &dd->pport[i].ibport_data;
  1468. qib_node_desc_chg(ibp);
  1469. }
  1470. }
  1471. if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
  1472. ib_qib_sys_image_guid =
  1473. cpu_to_be64(device_modify->sys_image_guid);
  1474. for (i = 0; i < dd->num_pports; i++) {
  1475. struct qib_ibport *ibp = &dd->pport[i].ibport_data;
  1476. qib_sys_guid_chg(ibp);
  1477. }
  1478. }
  1479. ret = 0;
  1480. bail:
  1481. return ret;
  1482. }
  1483. static int qib_modify_port(struct ib_device *ibdev, u8 port,
  1484. int port_modify_mask, struct ib_port_modify *props)
  1485. {
  1486. struct qib_ibport *ibp = to_iport(ibdev, port);
  1487. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1488. ibp->port_cap_flags |= props->set_port_cap_mask;
  1489. ibp->port_cap_flags &= ~props->clr_port_cap_mask;
  1490. if (props->set_port_cap_mask || props->clr_port_cap_mask)
  1491. qib_cap_mask_chg(ibp);
  1492. if (port_modify_mask & IB_PORT_SHUTDOWN)
  1493. qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
  1494. if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
  1495. ibp->qkey_violations = 0;
  1496. return 0;
  1497. }
  1498. static int qib_query_gid(struct ib_device *ibdev, u8 port,
  1499. int index, union ib_gid *gid)
  1500. {
  1501. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1502. int ret = 0;
  1503. if (!port || port > dd->num_pports)
  1504. ret = -EINVAL;
  1505. else {
  1506. struct qib_ibport *ibp = to_iport(ibdev, port);
  1507. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1508. gid->global.subnet_prefix = ibp->gid_prefix;
  1509. if (index == 0)
  1510. gid->global.interface_id = ppd->guid;
  1511. else if (index < QIB_GUIDS_PER_PORT)
  1512. gid->global.interface_id = ibp->guids[index - 1];
  1513. else
  1514. ret = -EINVAL;
  1515. }
  1516. return ret;
  1517. }
  1518. static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
  1519. struct ib_ucontext *context,
  1520. struct ib_udata *udata)
  1521. {
  1522. struct qib_ibdev *dev = to_idev(ibdev);
  1523. struct qib_pd *pd;
  1524. struct ib_pd *ret;
  1525. /*
  1526. * This is actually totally arbitrary. Some correctness tests
  1527. * assume there's a maximum number of PDs that can be allocated.
  1528. * We don't actually have this limit, but we fail the test if
  1529. * we allow allocations of more than we report for this value.
  1530. */
  1531. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  1532. if (!pd) {
  1533. ret = ERR_PTR(-ENOMEM);
  1534. goto bail;
  1535. }
  1536. spin_lock(&dev->n_pds_lock);
  1537. if (dev->n_pds_allocated == ib_qib_max_pds) {
  1538. spin_unlock(&dev->n_pds_lock);
  1539. kfree(pd);
  1540. ret = ERR_PTR(-ENOMEM);
  1541. goto bail;
  1542. }
  1543. dev->n_pds_allocated++;
  1544. spin_unlock(&dev->n_pds_lock);
  1545. /* ib_alloc_pd() will initialize pd->ibpd. */
  1546. pd->user = udata != NULL;
  1547. ret = &pd->ibpd;
  1548. bail:
  1549. return ret;
  1550. }
  1551. static int qib_dealloc_pd(struct ib_pd *ibpd)
  1552. {
  1553. struct qib_pd *pd = to_ipd(ibpd);
  1554. struct qib_ibdev *dev = to_idev(ibpd->device);
  1555. spin_lock(&dev->n_pds_lock);
  1556. dev->n_pds_allocated--;
  1557. spin_unlock(&dev->n_pds_lock);
  1558. kfree(pd);
  1559. return 0;
  1560. }
  1561. int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
  1562. {
  1563. /* A multicast address requires a GRH (see ch. 8.4.1). */
  1564. if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
  1565. ah_attr->dlid != QIB_PERMISSIVE_LID &&
  1566. !(ah_attr->ah_flags & IB_AH_GRH))
  1567. goto bail;
  1568. if ((ah_attr->ah_flags & IB_AH_GRH) &&
  1569. ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
  1570. goto bail;
  1571. if (ah_attr->dlid == 0)
  1572. goto bail;
  1573. if (ah_attr->port_num < 1 ||
  1574. ah_attr->port_num > ibdev->phys_port_cnt)
  1575. goto bail;
  1576. if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
  1577. ib_rate_to_mult(ah_attr->static_rate) < 0)
  1578. goto bail;
  1579. if (ah_attr->sl > 15)
  1580. goto bail;
  1581. return 0;
  1582. bail:
  1583. return -EINVAL;
  1584. }
  1585. /**
  1586. * qib_create_ah - create an address handle
  1587. * @pd: the protection domain
  1588. * @ah_attr: the attributes of the AH
  1589. *
  1590. * This may be called from interrupt context.
  1591. */
  1592. static struct ib_ah *qib_create_ah(struct ib_pd *pd,
  1593. struct ib_ah_attr *ah_attr)
  1594. {
  1595. struct qib_ah *ah;
  1596. struct ib_ah *ret;
  1597. struct qib_ibdev *dev = to_idev(pd->device);
  1598. unsigned long flags;
  1599. if (qib_check_ah(pd->device, ah_attr)) {
  1600. ret = ERR_PTR(-EINVAL);
  1601. goto bail;
  1602. }
  1603. ah = kmalloc(sizeof *ah, GFP_ATOMIC);
  1604. if (!ah) {
  1605. ret = ERR_PTR(-ENOMEM);
  1606. goto bail;
  1607. }
  1608. spin_lock_irqsave(&dev->n_ahs_lock, flags);
  1609. if (dev->n_ahs_allocated == ib_qib_max_ahs) {
  1610. spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
  1611. kfree(ah);
  1612. ret = ERR_PTR(-ENOMEM);
  1613. goto bail;
  1614. }
  1615. dev->n_ahs_allocated++;
  1616. spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
  1617. /* ib_create_ah() will initialize ah->ibah. */
  1618. ah->attr = *ah_attr;
  1619. atomic_set(&ah->refcount, 0);
  1620. ret = &ah->ibah;
  1621. bail:
  1622. return ret;
  1623. }
  1624. /**
  1625. * qib_destroy_ah - destroy an address handle
  1626. * @ibah: the AH to destroy
  1627. *
  1628. * This may be called from interrupt context.
  1629. */
  1630. static int qib_destroy_ah(struct ib_ah *ibah)
  1631. {
  1632. struct qib_ibdev *dev = to_idev(ibah->device);
  1633. struct qib_ah *ah = to_iah(ibah);
  1634. unsigned long flags;
  1635. if (atomic_read(&ah->refcount) != 0)
  1636. return -EBUSY;
  1637. spin_lock_irqsave(&dev->n_ahs_lock, flags);
  1638. dev->n_ahs_allocated--;
  1639. spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
  1640. kfree(ah);
  1641. return 0;
  1642. }
  1643. static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
  1644. {
  1645. struct qib_ah *ah = to_iah(ibah);
  1646. if (qib_check_ah(ibah->device, ah_attr))
  1647. return -EINVAL;
  1648. ah->attr = *ah_attr;
  1649. return 0;
  1650. }
  1651. static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
  1652. {
  1653. struct qib_ah *ah = to_iah(ibah);
  1654. *ah_attr = ah->attr;
  1655. return 0;
  1656. }
  1657. /**
  1658. * qib_get_npkeys - return the size of the PKEY table for context 0
  1659. * @dd: the qlogic_ib device
  1660. */
  1661. unsigned qib_get_npkeys(struct qib_devdata *dd)
  1662. {
  1663. return ARRAY_SIZE(dd->rcd[0]->pkeys);
  1664. }
  1665. /*
  1666. * Return the indexed PKEY from the port PKEY table.
  1667. * No need to validate rcd[ctxt]; the port is setup if we are here.
  1668. */
  1669. unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
  1670. {
  1671. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1672. struct qib_devdata *dd = ppd->dd;
  1673. unsigned ctxt = ppd->hw_pidx;
  1674. unsigned ret;
  1675. /* dd->rcd null if mini_init or some init failures */
  1676. if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
  1677. ret = 0;
  1678. else
  1679. ret = dd->rcd[ctxt]->pkeys[index];
  1680. return ret;
  1681. }
  1682. static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  1683. u16 *pkey)
  1684. {
  1685. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1686. int ret;
  1687. if (index >= qib_get_npkeys(dd)) {
  1688. ret = -EINVAL;
  1689. goto bail;
  1690. }
  1691. *pkey = qib_get_pkey(to_iport(ibdev, port), index);
  1692. ret = 0;
  1693. bail:
  1694. return ret;
  1695. }
  1696. /**
  1697. * qib_alloc_ucontext - allocate a ucontest
  1698. * @ibdev: the infiniband device
  1699. * @udata: not used by the QLogic_IB driver
  1700. */
  1701. static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
  1702. struct ib_udata *udata)
  1703. {
  1704. struct qib_ucontext *context;
  1705. struct ib_ucontext *ret;
  1706. context = kmalloc(sizeof *context, GFP_KERNEL);
  1707. if (!context) {
  1708. ret = ERR_PTR(-ENOMEM);
  1709. goto bail;
  1710. }
  1711. ret = &context->ibucontext;
  1712. bail:
  1713. return ret;
  1714. }
  1715. static int qib_dealloc_ucontext(struct ib_ucontext *context)
  1716. {
  1717. kfree(to_iucontext(context));
  1718. return 0;
  1719. }
  1720. static void init_ibport(struct qib_pportdata *ppd)
  1721. {
  1722. struct qib_verbs_counters cntrs;
  1723. struct qib_ibport *ibp = &ppd->ibport_data;
  1724. spin_lock_init(&ibp->lock);
  1725. /* Set the prefix to the default value (see ch. 4.1.1) */
  1726. ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
  1727. ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
  1728. ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
  1729. IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
  1730. IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
  1731. IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
  1732. IB_PORT_OTHER_LOCAL_CHANGES_SUP;
  1733. if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
  1734. ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
  1735. ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
  1736. ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
  1737. ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
  1738. ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
  1739. ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
  1740. /* Snapshot current HW counters to "clear" them. */
  1741. qib_get_counters(ppd, &cntrs);
  1742. ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
  1743. ibp->z_link_error_recovery_counter =
  1744. cntrs.link_error_recovery_counter;
  1745. ibp->z_link_downed_counter = cntrs.link_downed_counter;
  1746. ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
  1747. ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
  1748. ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
  1749. ibp->z_port_xmit_data = cntrs.port_xmit_data;
  1750. ibp->z_port_rcv_data = cntrs.port_rcv_data;
  1751. ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
  1752. ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
  1753. ibp->z_local_link_integrity_errors =
  1754. cntrs.local_link_integrity_errors;
  1755. ibp->z_excessive_buffer_overrun_errors =
  1756. cntrs.excessive_buffer_overrun_errors;
  1757. ibp->z_vl15_dropped = cntrs.vl15_dropped;
  1758. RCU_INIT_POINTER(ibp->qp0, NULL);
  1759. RCU_INIT_POINTER(ibp->qp1, NULL);
  1760. }
  1761. /**
  1762. * qib_register_ib_device - register our device with the infiniband core
  1763. * @dd: the device data structure
  1764. * Return the allocated qib_ibdev pointer or NULL on error.
  1765. */
  1766. int qib_register_ib_device(struct qib_devdata *dd)
  1767. {
  1768. struct qib_ibdev *dev = &dd->verbs_dev;
  1769. struct ib_device *ibdev = &dev->ibdev;
  1770. struct qib_pportdata *ppd = dd->pport;
  1771. unsigned i, lk_tab_size;
  1772. int ret;
  1773. dev->qp_table_size = ib_qib_qp_table_size;
  1774. get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
  1775. dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
  1776. GFP_KERNEL);
  1777. if (!dev->qp_table) {
  1778. ret = -ENOMEM;
  1779. goto err_qpt;
  1780. }
  1781. for (i = 0; i < dev->qp_table_size; i++)
  1782. RCU_INIT_POINTER(dev->qp_table[i], NULL);
  1783. for (i = 0; i < dd->num_pports; i++)
  1784. init_ibport(ppd + i);
  1785. /* Only need to initialize non-zero fields. */
  1786. spin_lock_init(&dev->qpt_lock);
  1787. spin_lock_init(&dev->n_pds_lock);
  1788. spin_lock_init(&dev->n_ahs_lock);
  1789. spin_lock_init(&dev->n_cqs_lock);
  1790. spin_lock_init(&dev->n_qps_lock);
  1791. spin_lock_init(&dev->n_srqs_lock);
  1792. spin_lock_init(&dev->n_mcast_grps_lock);
  1793. init_timer(&dev->mem_timer);
  1794. dev->mem_timer.function = mem_timer;
  1795. dev->mem_timer.data = (unsigned long) dev;
  1796. qib_init_qpn_table(dd, &dev->qpn_table);
  1797. /*
  1798. * The top ib_qib_lkey_table_size bits are used to index the
  1799. * table. The lower 8 bits can be owned by the user (copied from
  1800. * the LKEY). The remaining bits act as a generation number or tag.
  1801. */
  1802. spin_lock_init(&dev->lk_table.lock);
  1803. dev->lk_table.max = 1 << ib_qib_lkey_table_size;
  1804. lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
  1805. dev->lk_table.table = (struct qib_mregion **)
  1806. __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
  1807. if (dev->lk_table.table == NULL) {
  1808. ret = -ENOMEM;
  1809. goto err_lk;
  1810. }
  1811. memset(dev->lk_table.table, 0, lk_tab_size);
  1812. INIT_LIST_HEAD(&dev->pending_mmaps);
  1813. spin_lock_init(&dev->pending_lock);
  1814. dev->mmap_offset = PAGE_SIZE;
  1815. spin_lock_init(&dev->mmap_offset_lock);
  1816. INIT_LIST_HEAD(&dev->piowait);
  1817. INIT_LIST_HEAD(&dev->dmawait);
  1818. INIT_LIST_HEAD(&dev->txwait);
  1819. INIT_LIST_HEAD(&dev->memwait);
  1820. INIT_LIST_HEAD(&dev->txreq_free);
  1821. if (ppd->sdma_descq_cnt) {
  1822. dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
  1823. ppd->sdma_descq_cnt *
  1824. sizeof(struct qib_pio_header),
  1825. &dev->pio_hdrs_phys,
  1826. GFP_KERNEL);
  1827. if (!dev->pio_hdrs) {
  1828. ret = -ENOMEM;
  1829. goto err_hdrs;
  1830. }
  1831. }
  1832. for (i = 0; i < ppd->sdma_descq_cnt; i++) {
  1833. struct qib_verbs_txreq *tx;
  1834. tx = kzalloc(sizeof *tx, GFP_KERNEL);
  1835. if (!tx) {
  1836. ret = -ENOMEM;
  1837. goto err_tx;
  1838. }
  1839. tx->hdr_inx = i;
  1840. list_add(&tx->txreq.list, &dev->txreq_free);
  1841. }
  1842. /*
  1843. * The system image GUID is supposed to be the same for all
  1844. * IB HCAs in a single system but since there can be other
  1845. * device types in the system, we can't be sure this is unique.
  1846. */
  1847. if (!ib_qib_sys_image_guid)
  1848. ib_qib_sys_image_guid = ppd->guid;
  1849. strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
  1850. ibdev->owner = THIS_MODULE;
  1851. ibdev->node_guid = ppd->guid;
  1852. ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
  1853. ibdev->uverbs_cmd_mask =
  1854. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1855. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1856. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1857. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1858. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1859. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  1860. (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
  1861. (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
  1862. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  1863. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1864. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1865. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1866. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1867. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1868. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1869. (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
  1870. (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
  1871. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1872. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1873. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1874. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1875. (1ull << IB_USER_VERBS_CMD_POST_SEND) |
  1876. (1ull << IB_USER_VERBS_CMD_POST_RECV) |
  1877. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1878. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1879. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1880. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1881. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1882. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1883. (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
  1884. ibdev->node_type = RDMA_NODE_IB_CA;
  1885. ibdev->phys_port_cnt = dd->num_pports;
  1886. ibdev->num_comp_vectors = 1;
  1887. ibdev->dma_device = &dd->pcidev->dev;
  1888. ibdev->query_device = qib_query_device;
  1889. ibdev->modify_device = qib_modify_device;
  1890. ibdev->query_port = qib_query_port;
  1891. ibdev->modify_port = qib_modify_port;
  1892. ibdev->query_pkey = qib_query_pkey;
  1893. ibdev->query_gid = qib_query_gid;
  1894. ibdev->alloc_ucontext = qib_alloc_ucontext;
  1895. ibdev->dealloc_ucontext = qib_dealloc_ucontext;
  1896. ibdev->alloc_pd = qib_alloc_pd;
  1897. ibdev->dealloc_pd = qib_dealloc_pd;
  1898. ibdev->create_ah = qib_create_ah;
  1899. ibdev->destroy_ah = qib_destroy_ah;
  1900. ibdev->modify_ah = qib_modify_ah;
  1901. ibdev->query_ah = qib_query_ah;
  1902. ibdev->create_srq = qib_create_srq;
  1903. ibdev->modify_srq = qib_modify_srq;
  1904. ibdev->query_srq = qib_query_srq;
  1905. ibdev->destroy_srq = qib_destroy_srq;
  1906. ibdev->create_qp = qib_create_qp;
  1907. ibdev->modify_qp = qib_modify_qp;
  1908. ibdev->query_qp = qib_query_qp;
  1909. ibdev->destroy_qp = qib_destroy_qp;
  1910. ibdev->post_send = qib_post_send;
  1911. ibdev->post_recv = qib_post_receive;
  1912. ibdev->post_srq_recv = qib_post_srq_receive;
  1913. ibdev->create_cq = qib_create_cq;
  1914. ibdev->destroy_cq = qib_destroy_cq;
  1915. ibdev->resize_cq = qib_resize_cq;
  1916. ibdev->poll_cq = qib_poll_cq;
  1917. ibdev->req_notify_cq = qib_req_notify_cq;
  1918. ibdev->get_dma_mr = qib_get_dma_mr;
  1919. ibdev->reg_phys_mr = qib_reg_phys_mr;
  1920. ibdev->reg_user_mr = qib_reg_user_mr;
  1921. ibdev->dereg_mr = qib_dereg_mr;
  1922. ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
  1923. ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
  1924. ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
  1925. ibdev->alloc_fmr = qib_alloc_fmr;
  1926. ibdev->map_phys_fmr = qib_map_phys_fmr;
  1927. ibdev->unmap_fmr = qib_unmap_fmr;
  1928. ibdev->dealloc_fmr = qib_dealloc_fmr;
  1929. ibdev->attach_mcast = qib_multicast_attach;
  1930. ibdev->detach_mcast = qib_multicast_detach;
  1931. ibdev->process_mad = qib_process_mad;
  1932. ibdev->mmap = qib_mmap;
  1933. ibdev->dma_ops = &qib_dma_mapping_ops;
  1934. snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
  1935. QIB_IDSTR " %s", init_utsname()->nodename);
  1936. ret = ib_register_device(ibdev, qib_create_port_files);
  1937. if (ret)
  1938. goto err_reg;
  1939. ret = qib_create_agents(dev);
  1940. if (ret)
  1941. goto err_agents;
  1942. if (qib_verbs_register_sysfs(dd))
  1943. goto err_class;
  1944. goto bail;
  1945. err_class:
  1946. qib_free_agents(dev);
  1947. err_agents:
  1948. ib_unregister_device(ibdev);
  1949. err_reg:
  1950. err_tx:
  1951. while (!list_empty(&dev->txreq_free)) {
  1952. struct list_head *l = dev->txreq_free.next;
  1953. struct qib_verbs_txreq *tx;
  1954. list_del(l);
  1955. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  1956. kfree(tx);
  1957. }
  1958. if (ppd->sdma_descq_cnt)
  1959. dma_free_coherent(&dd->pcidev->dev,
  1960. ppd->sdma_descq_cnt *
  1961. sizeof(struct qib_pio_header),
  1962. dev->pio_hdrs, dev->pio_hdrs_phys);
  1963. err_hdrs:
  1964. free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
  1965. err_lk:
  1966. kfree(dev->qp_table);
  1967. err_qpt:
  1968. qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
  1969. bail:
  1970. return ret;
  1971. }
  1972. void qib_unregister_ib_device(struct qib_devdata *dd)
  1973. {
  1974. struct qib_ibdev *dev = &dd->verbs_dev;
  1975. struct ib_device *ibdev = &dev->ibdev;
  1976. u32 qps_inuse;
  1977. unsigned lk_tab_size;
  1978. qib_verbs_unregister_sysfs(dd);
  1979. qib_free_agents(dev);
  1980. ib_unregister_device(ibdev);
  1981. if (!list_empty(&dev->piowait))
  1982. qib_dev_err(dd, "piowait list not empty!\n");
  1983. if (!list_empty(&dev->dmawait))
  1984. qib_dev_err(dd, "dmawait list not empty!\n");
  1985. if (!list_empty(&dev->txwait))
  1986. qib_dev_err(dd, "txwait list not empty!\n");
  1987. if (!list_empty(&dev->memwait))
  1988. qib_dev_err(dd, "memwait list not empty!\n");
  1989. if (dev->dma_mr)
  1990. qib_dev_err(dd, "DMA MR not NULL!\n");
  1991. qps_inuse = qib_free_all_qps(dd);
  1992. if (qps_inuse)
  1993. qib_dev_err(dd, "QP memory leak! %u still in use\n",
  1994. qps_inuse);
  1995. del_timer_sync(&dev->mem_timer);
  1996. qib_free_qpn_table(&dev->qpn_table);
  1997. while (!list_empty(&dev->txreq_free)) {
  1998. struct list_head *l = dev->txreq_free.next;
  1999. struct qib_verbs_txreq *tx;
  2000. list_del(l);
  2001. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  2002. kfree(tx);
  2003. }
  2004. if (dd->pport->sdma_descq_cnt)
  2005. dma_free_coherent(&dd->pcidev->dev,
  2006. dd->pport->sdma_descq_cnt *
  2007. sizeof(struct qib_pio_header),
  2008. dev->pio_hdrs, dev->pio_hdrs_phys);
  2009. lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
  2010. free_pages((unsigned long) dev->lk_table.table,
  2011. get_order(lk_tab_size));
  2012. kfree(dev->qp_table);
  2013. }