qib_verbs.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  3. * All rights reserved.
  4. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <rdma/ib_mad.h>
  35. #include <rdma/ib_user_verbs.h>
  36. #include <linux/io.h>
  37. #include <linux/module.h>
  38. #include <linux/utsname.h>
  39. #include <linux/rculist.h>
  40. #include <linux/mm.h>
  41. #include <linux/random.h>
  42. #include "qib.h"
  43. #include "qib_common.h"
  44. static unsigned int ib_qib_qp_table_size = 256;
  45. module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
  46. MODULE_PARM_DESC(qp_table_size, "QP table size");
  47. unsigned int ib_qib_lkey_table_size = 16;
  48. module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
  49. S_IRUGO);
  50. MODULE_PARM_DESC(lkey_table_size,
  51. "LKEY table size in bits (2^n, 1 <= n <= 23)");
  52. static unsigned int ib_qib_max_pds = 0xFFFF;
  53. module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
  54. MODULE_PARM_DESC(max_pds,
  55. "Maximum number of protection domains to support");
  56. static unsigned int ib_qib_max_ahs = 0xFFFF;
  57. module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
  58. MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
  59. unsigned int ib_qib_max_cqes = 0x2FFFF;
  60. module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
  61. MODULE_PARM_DESC(max_cqes,
  62. "Maximum number of completion queue entries to support");
  63. unsigned int ib_qib_max_cqs = 0x1FFFF;
  64. module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
  65. MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
  66. unsigned int ib_qib_max_qp_wrs = 0x3FFF;
  67. module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
  68. MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
  69. unsigned int ib_qib_max_qps = 16384;
  70. module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
  71. MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
  72. unsigned int ib_qib_max_sges = 0x60;
  73. module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
  74. MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
  75. unsigned int ib_qib_max_mcast_grps = 16384;
  76. module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
  77. MODULE_PARM_DESC(max_mcast_grps,
  78. "Maximum number of multicast groups to support");
  79. unsigned int ib_qib_max_mcast_qp_attached = 16;
  80. module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
  81. uint, S_IRUGO);
  82. MODULE_PARM_DESC(max_mcast_qp_attached,
  83. "Maximum number of attached QPs to support");
  84. unsigned int ib_qib_max_srqs = 1024;
  85. module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
  86. MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
  87. unsigned int ib_qib_max_srq_sges = 128;
  88. module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
  89. MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
  90. unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
  91. module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
  92. MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
  93. static unsigned int ib_qib_disable_sma;
  94. module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
  95. MODULE_PARM_DESC(disable_sma, "Disable the SMA");
  96. /*
  97. * Note that it is OK to post send work requests in the SQE and ERR
  98. * states; qib_do_send() will process them and generate error
  99. * completions as per IB 1.2 C10-96.
  100. */
  101. const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
  102. [IB_QPS_RESET] = 0,
  103. [IB_QPS_INIT] = QIB_POST_RECV_OK,
  104. [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
  105. [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
  106. QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
  107. QIB_PROCESS_NEXT_SEND_OK,
  108. [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
  109. QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
  110. [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
  111. QIB_POST_SEND_OK | QIB_FLUSH_SEND,
  112. [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
  113. QIB_POST_SEND_OK | QIB_FLUSH_SEND,
  114. };
  115. struct qib_ucontext {
  116. struct ib_ucontext ibucontext;
  117. };
  118. static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
  119. *ibucontext)
  120. {
  121. return container_of(ibucontext, struct qib_ucontext, ibucontext);
  122. }
  123. /*
  124. * Translate ib_wr_opcode into ib_wc_opcode.
  125. */
  126. const enum ib_wc_opcode ib_qib_wc_opcode[] = {
  127. [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
  128. [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
  129. [IB_WR_SEND] = IB_WC_SEND,
  130. [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
  131. [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
  132. [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
  133. [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
  134. };
  135. /*
  136. * System image GUID.
  137. */
  138. __be64 ib_qib_sys_image_guid;
  139. /**
  140. * qib_copy_sge - copy data to SGE memory
  141. * @ss: the SGE state
  142. * @data: the data to copy
  143. * @length: the length of the data
  144. */
  145. void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
  146. {
  147. struct qib_sge *sge = &ss->sge;
  148. while (length) {
  149. u32 len = sge->length;
  150. if (len > length)
  151. len = length;
  152. if (len > sge->sge_length)
  153. len = sge->sge_length;
  154. BUG_ON(len == 0);
  155. memcpy(sge->vaddr, data, len);
  156. sge->vaddr += len;
  157. sge->length -= len;
  158. sge->sge_length -= len;
  159. if (sge->sge_length == 0) {
  160. if (release)
  161. atomic_dec(&sge->mr->refcount);
  162. if (--ss->num_sge)
  163. *sge = *ss->sg_list++;
  164. } else if (sge->length == 0 && sge->mr->lkey) {
  165. if (++sge->n >= QIB_SEGSZ) {
  166. if (++sge->m >= sge->mr->mapsz)
  167. break;
  168. sge->n = 0;
  169. }
  170. sge->vaddr =
  171. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  172. sge->length =
  173. sge->mr->map[sge->m]->segs[sge->n].length;
  174. }
  175. data += len;
  176. length -= len;
  177. }
  178. }
  179. /**
  180. * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
  181. * @ss: the SGE state
  182. * @length: the number of bytes to skip
  183. */
  184. void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
  185. {
  186. struct qib_sge *sge = &ss->sge;
  187. while (length) {
  188. u32 len = sge->length;
  189. if (len > length)
  190. len = length;
  191. if (len > sge->sge_length)
  192. len = sge->sge_length;
  193. BUG_ON(len == 0);
  194. sge->vaddr += len;
  195. sge->length -= len;
  196. sge->sge_length -= len;
  197. if (sge->sge_length == 0) {
  198. if (release)
  199. atomic_dec(&sge->mr->refcount);
  200. if (--ss->num_sge)
  201. *sge = *ss->sg_list++;
  202. } else if (sge->length == 0 && sge->mr->lkey) {
  203. if (++sge->n >= QIB_SEGSZ) {
  204. if (++sge->m >= sge->mr->mapsz)
  205. break;
  206. sge->n = 0;
  207. }
  208. sge->vaddr =
  209. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  210. sge->length =
  211. sge->mr->map[sge->m]->segs[sge->n].length;
  212. }
  213. length -= len;
  214. }
  215. }
  216. /*
  217. * Count the number of DMA descriptors needed to send length bytes of data.
  218. * Don't modify the qib_sge_state to get the count.
  219. * Return zero if any of the segments is not aligned.
  220. */
  221. static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
  222. {
  223. struct qib_sge *sg_list = ss->sg_list;
  224. struct qib_sge sge = ss->sge;
  225. u8 num_sge = ss->num_sge;
  226. u32 ndesc = 1; /* count the header */
  227. while (length) {
  228. u32 len = sge.length;
  229. if (len > length)
  230. len = length;
  231. if (len > sge.sge_length)
  232. len = sge.sge_length;
  233. BUG_ON(len == 0);
  234. if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
  235. (len != length && (len & (sizeof(u32) - 1)))) {
  236. ndesc = 0;
  237. break;
  238. }
  239. ndesc++;
  240. sge.vaddr += len;
  241. sge.length -= len;
  242. sge.sge_length -= len;
  243. if (sge.sge_length == 0) {
  244. if (--num_sge)
  245. sge = *sg_list++;
  246. } else if (sge.length == 0 && sge.mr->lkey) {
  247. if (++sge.n >= QIB_SEGSZ) {
  248. if (++sge.m >= sge.mr->mapsz)
  249. break;
  250. sge.n = 0;
  251. }
  252. sge.vaddr =
  253. sge.mr->map[sge.m]->segs[sge.n].vaddr;
  254. sge.length =
  255. sge.mr->map[sge.m]->segs[sge.n].length;
  256. }
  257. length -= len;
  258. }
  259. return ndesc;
  260. }
  261. /*
  262. * Copy from the SGEs to the data buffer.
  263. */
  264. static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
  265. {
  266. struct qib_sge *sge = &ss->sge;
  267. while (length) {
  268. u32 len = sge->length;
  269. if (len > length)
  270. len = length;
  271. if (len > sge->sge_length)
  272. len = sge->sge_length;
  273. BUG_ON(len == 0);
  274. memcpy(data, sge->vaddr, len);
  275. sge->vaddr += len;
  276. sge->length -= len;
  277. sge->sge_length -= len;
  278. if (sge->sge_length == 0) {
  279. if (--ss->num_sge)
  280. *sge = *ss->sg_list++;
  281. } else if (sge->length == 0 && sge->mr->lkey) {
  282. if (++sge->n >= QIB_SEGSZ) {
  283. if (++sge->m >= sge->mr->mapsz)
  284. break;
  285. sge->n = 0;
  286. }
  287. sge->vaddr =
  288. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  289. sge->length =
  290. sge->mr->map[sge->m]->segs[sge->n].length;
  291. }
  292. data += len;
  293. length -= len;
  294. }
  295. }
  296. /**
  297. * qib_post_one_send - post one RC, UC, or UD send work request
  298. * @qp: the QP to post on
  299. * @wr: the work request to send
  300. */
  301. static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
  302. {
  303. struct qib_swqe *wqe;
  304. u32 next;
  305. int i;
  306. int j;
  307. int acc;
  308. int ret;
  309. unsigned long flags;
  310. struct qib_lkey_table *rkt;
  311. struct qib_pd *pd;
  312. spin_lock_irqsave(&qp->s_lock, flags);
  313. /* Check that state is OK to post send. */
  314. if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
  315. goto bail_inval;
  316. /* IB spec says that num_sge == 0 is OK. */
  317. if (wr->num_sge > qp->s_max_sge)
  318. goto bail_inval;
  319. /*
  320. * Don't allow RDMA reads or atomic operations on UC or
  321. * undefined operations.
  322. * Make sure buffer is large enough to hold the result for atomics.
  323. */
  324. if (wr->opcode == IB_WR_FAST_REG_MR) {
  325. if (qib_fast_reg_mr(qp, wr))
  326. goto bail_inval;
  327. } else if (qp->ibqp.qp_type == IB_QPT_UC) {
  328. if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
  329. goto bail_inval;
  330. } else if (qp->ibqp.qp_type != IB_QPT_RC) {
  331. /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
  332. if (wr->opcode != IB_WR_SEND &&
  333. wr->opcode != IB_WR_SEND_WITH_IMM)
  334. goto bail_inval;
  335. /* Check UD destination address PD */
  336. if (qp->ibqp.pd != wr->wr.ud.ah->pd)
  337. goto bail_inval;
  338. } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
  339. goto bail_inval;
  340. else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
  341. (wr->num_sge == 0 ||
  342. wr->sg_list[0].length < sizeof(u64) ||
  343. wr->sg_list[0].addr & (sizeof(u64) - 1)))
  344. goto bail_inval;
  345. else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
  346. goto bail_inval;
  347. next = qp->s_head + 1;
  348. if (next >= qp->s_size)
  349. next = 0;
  350. if (next == qp->s_last) {
  351. ret = -ENOMEM;
  352. goto bail;
  353. }
  354. rkt = &to_idev(qp->ibqp.device)->lk_table;
  355. pd = to_ipd(qp->ibqp.pd);
  356. wqe = get_swqe_ptr(qp, qp->s_head);
  357. wqe->wr = *wr;
  358. wqe->length = 0;
  359. j = 0;
  360. if (wr->num_sge) {
  361. acc = wr->opcode >= IB_WR_RDMA_READ ?
  362. IB_ACCESS_LOCAL_WRITE : 0;
  363. for (i = 0; i < wr->num_sge; i++) {
  364. u32 length = wr->sg_list[i].length;
  365. int ok;
  366. if (length == 0)
  367. continue;
  368. ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
  369. &wr->sg_list[i], acc);
  370. if (!ok)
  371. goto bail_inval_free;
  372. wqe->length += length;
  373. j++;
  374. }
  375. wqe->wr.num_sge = j;
  376. }
  377. if (qp->ibqp.qp_type == IB_QPT_UC ||
  378. qp->ibqp.qp_type == IB_QPT_RC) {
  379. if (wqe->length > 0x80000000U)
  380. goto bail_inval_free;
  381. } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
  382. qp->port_num - 1)->ibmtu)
  383. goto bail_inval_free;
  384. else
  385. atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
  386. wqe->ssn = qp->s_ssn++;
  387. qp->s_head = next;
  388. ret = 0;
  389. goto bail;
  390. bail_inval_free:
  391. while (j) {
  392. struct qib_sge *sge = &wqe->sg_list[--j];
  393. atomic_dec(&sge->mr->refcount);
  394. }
  395. bail_inval:
  396. ret = -EINVAL;
  397. bail:
  398. spin_unlock_irqrestore(&qp->s_lock, flags);
  399. return ret;
  400. }
  401. /**
  402. * qib_post_send - post a send on a QP
  403. * @ibqp: the QP to post the send on
  404. * @wr: the list of work requests to post
  405. * @bad_wr: the first bad WR is put here
  406. *
  407. * This may be called from interrupt context.
  408. */
  409. static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  410. struct ib_send_wr **bad_wr)
  411. {
  412. struct qib_qp *qp = to_iqp(ibqp);
  413. int err = 0;
  414. for (; wr; wr = wr->next) {
  415. err = qib_post_one_send(qp, wr);
  416. if (err) {
  417. *bad_wr = wr;
  418. goto bail;
  419. }
  420. }
  421. /* Try to do the send work in the caller's context. */
  422. qib_do_send(&qp->s_work);
  423. bail:
  424. return err;
  425. }
  426. /**
  427. * qib_post_receive - post a receive on a QP
  428. * @ibqp: the QP to post the receive on
  429. * @wr: the WR to post
  430. * @bad_wr: the first bad WR is put here
  431. *
  432. * This may be called from interrupt context.
  433. */
  434. static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  435. struct ib_recv_wr **bad_wr)
  436. {
  437. struct qib_qp *qp = to_iqp(ibqp);
  438. struct qib_rwq *wq = qp->r_rq.wq;
  439. unsigned long flags;
  440. int ret;
  441. /* Check that state is OK to post receive. */
  442. if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
  443. *bad_wr = wr;
  444. ret = -EINVAL;
  445. goto bail;
  446. }
  447. for (; wr; wr = wr->next) {
  448. struct qib_rwqe *wqe;
  449. u32 next;
  450. int i;
  451. if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
  452. *bad_wr = wr;
  453. ret = -EINVAL;
  454. goto bail;
  455. }
  456. spin_lock_irqsave(&qp->r_rq.lock, flags);
  457. next = wq->head + 1;
  458. if (next >= qp->r_rq.size)
  459. next = 0;
  460. if (next == wq->tail) {
  461. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  462. *bad_wr = wr;
  463. ret = -ENOMEM;
  464. goto bail;
  465. }
  466. wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
  467. wqe->wr_id = wr->wr_id;
  468. wqe->num_sge = wr->num_sge;
  469. for (i = 0; i < wr->num_sge; i++)
  470. wqe->sg_list[i] = wr->sg_list[i];
  471. /* Make sure queue entry is written before the head index. */
  472. smp_wmb();
  473. wq->head = next;
  474. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  475. }
  476. ret = 0;
  477. bail:
  478. return ret;
  479. }
  480. /**
  481. * qib_qp_rcv - processing an incoming packet on a QP
  482. * @rcd: the context pointer
  483. * @hdr: the packet header
  484. * @has_grh: true if the packet has a GRH
  485. * @data: the packet data
  486. * @tlen: the packet length
  487. * @qp: the QP the packet came on
  488. *
  489. * This is called from qib_ib_rcv() to process an incoming packet
  490. * for the given QP.
  491. * Called at interrupt level.
  492. */
  493. static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
  494. int has_grh, void *data, u32 tlen, struct qib_qp *qp)
  495. {
  496. struct qib_ibport *ibp = &rcd->ppd->ibport_data;
  497. spin_lock(&qp->r_lock);
  498. /* Check for valid receive state. */
  499. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
  500. ibp->n_pkt_drops++;
  501. goto unlock;
  502. }
  503. switch (qp->ibqp.qp_type) {
  504. case IB_QPT_SMI:
  505. case IB_QPT_GSI:
  506. if (ib_qib_disable_sma)
  507. break;
  508. /* FALLTHROUGH */
  509. case IB_QPT_UD:
  510. qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
  511. break;
  512. case IB_QPT_RC:
  513. qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
  514. break;
  515. case IB_QPT_UC:
  516. qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
  517. break;
  518. default:
  519. break;
  520. }
  521. unlock:
  522. spin_unlock(&qp->r_lock);
  523. }
  524. /**
  525. * qib_ib_rcv - process an incoming packet
  526. * @rcd: the context pointer
  527. * @rhdr: the header of the packet
  528. * @data: the packet payload
  529. * @tlen: the packet length
  530. *
  531. * This is called from qib_kreceive() to process an incoming packet at
  532. * interrupt level. Tlen is the length of the header + data + CRC in bytes.
  533. */
  534. void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
  535. {
  536. struct qib_pportdata *ppd = rcd->ppd;
  537. struct qib_ibport *ibp = &ppd->ibport_data;
  538. struct qib_ib_header *hdr = rhdr;
  539. struct qib_other_headers *ohdr;
  540. struct qib_qp *qp;
  541. u32 qp_num;
  542. int lnh;
  543. u8 opcode;
  544. u16 lid;
  545. /* 24 == LRH+BTH+CRC */
  546. if (unlikely(tlen < 24))
  547. goto drop;
  548. /* Check for a valid destination LID (see ch. 7.11.1). */
  549. lid = be16_to_cpu(hdr->lrh[1]);
  550. if (lid < QIB_MULTICAST_LID_BASE) {
  551. lid &= ~((1 << ppd->lmc) - 1);
  552. if (unlikely(lid != ppd->lid))
  553. goto drop;
  554. }
  555. /* Check for GRH */
  556. lnh = be16_to_cpu(hdr->lrh[0]) & 3;
  557. if (lnh == QIB_LRH_BTH)
  558. ohdr = &hdr->u.oth;
  559. else if (lnh == QIB_LRH_GRH) {
  560. u32 vtf;
  561. ohdr = &hdr->u.l.oth;
  562. if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
  563. goto drop;
  564. vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
  565. if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
  566. goto drop;
  567. } else
  568. goto drop;
  569. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  570. ibp->opstats[opcode & 0x7f].n_bytes += tlen;
  571. ibp->opstats[opcode & 0x7f].n_packets++;
  572. /* Get the destination QP number. */
  573. qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
  574. if (qp_num == QIB_MULTICAST_QPN) {
  575. struct qib_mcast *mcast;
  576. struct qib_mcast_qp *p;
  577. if (lnh != QIB_LRH_GRH)
  578. goto drop;
  579. mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
  580. if (mcast == NULL)
  581. goto drop;
  582. ibp->n_multicast_rcv++;
  583. list_for_each_entry_rcu(p, &mcast->qp_list, list)
  584. qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
  585. /*
  586. * Notify qib_multicast_detach() if it is waiting for us
  587. * to finish.
  588. */
  589. if (atomic_dec_return(&mcast->refcount) <= 1)
  590. wake_up(&mcast->wait);
  591. } else {
  592. if (rcd->lookaside_qp) {
  593. if (rcd->lookaside_qpn != qp_num) {
  594. if (atomic_dec_and_test(
  595. &rcd->lookaside_qp->refcount))
  596. wake_up(
  597. &rcd->lookaside_qp->wait);
  598. rcd->lookaside_qp = NULL;
  599. }
  600. }
  601. if (!rcd->lookaside_qp) {
  602. qp = qib_lookup_qpn(ibp, qp_num);
  603. if (!qp)
  604. goto drop;
  605. rcd->lookaside_qp = qp;
  606. rcd->lookaside_qpn = qp_num;
  607. } else
  608. qp = rcd->lookaside_qp;
  609. ibp->n_unicast_rcv++;
  610. qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
  611. }
  612. return;
  613. drop:
  614. ibp->n_pkt_drops++;
  615. }
  616. /*
  617. * This is called from a timer to check for QPs
  618. * which need kernel memory in order to send a packet.
  619. */
  620. static void mem_timer(unsigned long data)
  621. {
  622. struct qib_ibdev *dev = (struct qib_ibdev *) data;
  623. struct list_head *list = &dev->memwait;
  624. struct qib_qp *qp = NULL;
  625. unsigned long flags;
  626. spin_lock_irqsave(&dev->pending_lock, flags);
  627. if (!list_empty(list)) {
  628. qp = list_entry(list->next, struct qib_qp, iowait);
  629. list_del_init(&qp->iowait);
  630. atomic_inc(&qp->refcount);
  631. if (!list_empty(list))
  632. mod_timer(&dev->mem_timer, jiffies + 1);
  633. }
  634. spin_unlock_irqrestore(&dev->pending_lock, flags);
  635. if (qp) {
  636. spin_lock_irqsave(&qp->s_lock, flags);
  637. if (qp->s_flags & QIB_S_WAIT_KMEM) {
  638. qp->s_flags &= ~QIB_S_WAIT_KMEM;
  639. qib_schedule_send(qp);
  640. }
  641. spin_unlock_irqrestore(&qp->s_lock, flags);
  642. if (atomic_dec_and_test(&qp->refcount))
  643. wake_up(&qp->wait);
  644. }
  645. }
  646. static void update_sge(struct qib_sge_state *ss, u32 length)
  647. {
  648. struct qib_sge *sge = &ss->sge;
  649. sge->vaddr += length;
  650. sge->length -= length;
  651. sge->sge_length -= length;
  652. if (sge->sge_length == 0) {
  653. if (--ss->num_sge)
  654. *sge = *ss->sg_list++;
  655. } else if (sge->length == 0 && sge->mr->lkey) {
  656. if (++sge->n >= QIB_SEGSZ) {
  657. if (++sge->m >= sge->mr->mapsz)
  658. return;
  659. sge->n = 0;
  660. }
  661. sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
  662. sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
  663. }
  664. }
  665. #ifdef __LITTLE_ENDIAN
  666. static inline u32 get_upper_bits(u32 data, u32 shift)
  667. {
  668. return data >> shift;
  669. }
  670. static inline u32 set_upper_bits(u32 data, u32 shift)
  671. {
  672. return data << shift;
  673. }
  674. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  675. {
  676. data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
  677. data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  678. return data;
  679. }
  680. #else
  681. static inline u32 get_upper_bits(u32 data, u32 shift)
  682. {
  683. return data << shift;
  684. }
  685. static inline u32 set_upper_bits(u32 data, u32 shift)
  686. {
  687. return data >> shift;
  688. }
  689. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  690. {
  691. data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
  692. data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  693. return data;
  694. }
  695. #endif
  696. static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
  697. u32 length, unsigned flush_wc)
  698. {
  699. u32 extra = 0;
  700. u32 data = 0;
  701. u32 last;
  702. while (1) {
  703. u32 len = ss->sge.length;
  704. u32 off;
  705. if (len > length)
  706. len = length;
  707. if (len > ss->sge.sge_length)
  708. len = ss->sge.sge_length;
  709. BUG_ON(len == 0);
  710. /* If the source address is not aligned, try to align it. */
  711. off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
  712. if (off) {
  713. u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
  714. ~(sizeof(u32) - 1));
  715. u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
  716. u32 y;
  717. y = sizeof(u32) - off;
  718. if (len > y)
  719. len = y;
  720. if (len + extra >= sizeof(u32)) {
  721. data |= set_upper_bits(v, extra *
  722. BITS_PER_BYTE);
  723. len = sizeof(u32) - extra;
  724. if (len == length) {
  725. last = data;
  726. break;
  727. }
  728. __raw_writel(data, piobuf);
  729. piobuf++;
  730. extra = 0;
  731. data = 0;
  732. } else {
  733. /* Clear unused upper bytes */
  734. data |= clear_upper_bytes(v, len, extra);
  735. if (len == length) {
  736. last = data;
  737. break;
  738. }
  739. extra += len;
  740. }
  741. } else if (extra) {
  742. /* Source address is aligned. */
  743. u32 *addr = (u32 *) ss->sge.vaddr;
  744. int shift = extra * BITS_PER_BYTE;
  745. int ushift = 32 - shift;
  746. u32 l = len;
  747. while (l >= sizeof(u32)) {
  748. u32 v = *addr;
  749. data |= set_upper_bits(v, shift);
  750. __raw_writel(data, piobuf);
  751. data = get_upper_bits(v, ushift);
  752. piobuf++;
  753. addr++;
  754. l -= sizeof(u32);
  755. }
  756. /*
  757. * We still have 'extra' number of bytes leftover.
  758. */
  759. if (l) {
  760. u32 v = *addr;
  761. if (l + extra >= sizeof(u32)) {
  762. data |= set_upper_bits(v, shift);
  763. len -= l + extra - sizeof(u32);
  764. if (len == length) {
  765. last = data;
  766. break;
  767. }
  768. __raw_writel(data, piobuf);
  769. piobuf++;
  770. extra = 0;
  771. data = 0;
  772. } else {
  773. /* Clear unused upper bytes */
  774. data |= clear_upper_bytes(v, l, extra);
  775. if (len == length) {
  776. last = data;
  777. break;
  778. }
  779. extra += l;
  780. }
  781. } else if (len == length) {
  782. last = data;
  783. break;
  784. }
  785. } else if (len == length) {
  786. u32 w;
  787. /*
  788. * Need to round up for the last dword in the
  789. * packet.
  790. */
  791. w = (len + 3) >> 2;
  792. qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
  793. piobuf += w - 1;
  794. last = ((u32 *) ss->sge.vaddr)[w - 1];
  795. break;
  796. } else {
  797. u32 w = len >> 2;
  798. qib_pio_copy(piobuf, ss->sge.vaddr, w);
  799. piobuf += w;
  800. extra = len & (sizeof(u32) - 1);
  801. if (extra) {
  802. u32 v = ((u32 *) ss->sge.vaddr)[w];
  803. /* Clear unused upper bytes */
  804. data = clear_upper_bytes(v, extra, 0);
  805. }
  806. }
  807. update_sge(ss, len);
  808. length -= len;
  809. }
  810. /* Update address before sending packet. */
  811. update_sge(ss, length);
  812. if (flush_wc) {
  813. /* must flush early everything before trigger word */
  814. qib_flush_wc();
  815. __raw_writel(last, piobuf);
  816. /* be sure trigger word is written */
  817. qib_flush_wc();
  818. } else
  819. __raw_writel(last, piobuf);
  820. }
  821. static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
  822. struct qib_qp *qp)
  823. {
  824. struct qib_verbs_txreq *tx;
  825. unsigned long flags;
  826. spin_lock_irqsave(&qp->s_lock, flags);
  827. spin_lock(&dev->pending_lock);
  828. if (!list_empty(&dev->txreq_free)) {
  829. struct list_head *l = dev->txreq_free.next;
  830. list_del(l);
  831. spin_unlock(&dev->pending_lock);
  832. spin_unlock_irqrestore(&qp->s_lock, flags);
  833. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  834. } else {
  835. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
  836. list_empty(&qp->iowait)) {
  837. dev->n_txwait++;
  838. qp->s_flags |= QIB_S_WAIT_TX;
  839. list_add_tail(&qp->iowait, &dev->txwait);
  840. }
  841. qp->s_flags &= ~QIB_S_BUSY;
  842. spin_unlock(&dev->pending_lock);
  843. spin_unlock_irqrestore(&qp->s_lock, flags);
  844. tx = ERR_PTR(-EBUSY);
  845. }
  846. return tx;
  847. }
  848. static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
  849. struct qib_qp *qp)
  850. {
  851. struct qib_verbs_txreq *tx;
  852. unsigned long flags;
  853. spin_lock_irqsave(&dev->pending_lock, flags);
  854. /* assume the list non empty */
  855. if (likely(!list_empty(&dev->txreq_free))) {
  856. struct list_head *l = dev->txreq_free.next;
  857. list_del(l);
  858. spin_unlock_irqrestore(&dev->pending_lock, flags);
  859. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  860. } else {
  861. /* call slow path to get the extra lock */
  862. spin_unlock_irqrestore(&dev->pending_lock, flags);
  863. tx = __get_txreq(dev, qp);
  864. }
  865. return tx;
  866. }
  867. void qib_put_txreq(struct qib_verbs_txreq *tx)
  868. {
  869. struct qib_ibdev *dev;
  870. struct qib_qp *qp;
  871. unsigned long flags;
  872. qp = tx->qp;
  873. dev = to_idev(qp->ibqp.device);
  874. if (atomic_dec_and_test(&qp->refcount))
  875. wake_up(&qp->wait);
  876. if (tx->mr) {
  877. atomic_dec(&tx->mr->refcount);
  878. tx->mr = NULL;
  879. }
  880. if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
  881. tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
  882. dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
  883. tx->txreq.addr, tx->hdr_dwords << 2,
  884. DMA_TO_DEVICE);
  885. kfree(tx->align_buf);
  886. }
  887. spin_lock_irqsave(&dev->pending_lock, flags);
  888. /* Put struct back on free list */
  889. list_add(&tx->txreq.list, &dev->txreq_free);
  890. if (!list_empty(&dev->txwait)) {
  891. /* Wake up first QP wanting a free struct */
  892. qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
  893. list_del_init(&qp->iowait);
  894. atomic_inc(&qp->refcount);
  895. spin_unlock_irqrestore(&dev->pending_lock, flags);
  896. spin_lock_irqsave(&qp->s_lock, flags);
  897. if (qp->s_flags & QIB_S_WAIT_TX) {
  898. qp->s_flags &= ~QIB_S_WAIT_TX;
  899. qib_schedule_send(qp);
  900. }
  901. spin_unlock_irqrestore(&qp->s_lock, flags);
  902. if (atomic_dec_and_test(&qp->refcount))
  903. wake_up(&qp->wait);
  904. } else
  905. spin_unlock_irqrestore(&dev->pending_lock, flags);
  906. }
  907. /*
  908. * This is called when there are send DMA descriptors that might be
  909. * available.
  910. *
  911. * This is called with ppd->sdma_lock held.
  912. */
  913. void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
  914. {
  915. struct qib_qp *qp, *nqp;
  916. struct qib_qp *qps[20];
  917. struct qib_ibdev *dev;
  918. unsigned i, n;
  919. n = 0;
  920. dev = &ppd->dd->verbs_dev;
  921. spin_lock(&dev->pending_lock);
  922. /* Search wait list for first QP wanting DMA descriptors. */
  923. list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
  924. if (qp->port_num != ppd->port)
  925. continue;
  926. if (n == ARRAY_SIZE(qps))
  927. break;
  928. if (qp->s_tx->txreq.sg_count > avail)
  929. break;
  930. avail -= qp->s_tx->txreq.sg_count;
  931. list_del_init(&qp->iowait);
  932. atomic_inc(&qp->refcount);
  933. qps[n++] = qp;
  934. }
  935. spin_unlock(&dev->pending_lock);
  936. for (i = 0; i < n; i++) {
  937. qp = qps[i];
  938. spin_lock(&qp->s_lock);
  939. if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
  940. qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
  941. qib_schedule_send(qp);
  942. }
  943. spin_unlock(&qp->s_lock);
  944. if (atomic_dec_and_test(&qp->refcount))
  945. wake_up(&qp->wait);
  946. }
  947. }
  948. /*
  949. * This is called with ppd->sdma_lock held.
  950. */
  951. static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
  952. {
  953. struct qib_verbs_txreq *tx =
  954. container_of(cookie, struct qib_verbs_txreq, txreq);
  955. struct qib_qp *qp = tx->qp;
  956. spin_lock(&qp->s_lock);
  957. if (tx->wqe)
  958. qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
  959. else if (qp->ibqp.qp_type == IB_QPT_RC) {
  960. struct qib_ib_header *hdr;
  961. if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
  962. hdr = &tx->align_buf->hdr;
  963. else {
  964. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  965. hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
  966. }
  967. qib_rc_send_complete(qp, hdr);
  968. }
  969. if (atomic_dec_and_test(&qp->s_dma_busy)) {
  970. if (qp->state == IB_QPS_RESET)
  971. wake_up(&qp->wait_dma);
  972. else if (qp->s_flags & QIB_S_WAIT_DMA) {
  973. qp->s_flags &= ~QIB_S_WAIT_DMA;
  974. qib_schedule_send(qp);
  975. }
  976. }
  977. spin_unlock(&qp->s_lock);
  978. qib_put_txreq(tx);
  979. }
  980. static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
  981. {
  982. unsigned long flags;
  983. int ret = 0;
  984. spin_lock_irqsave(&qp->s_lock, flags);
  985. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
  986. spin_lock(&dev->pending_lock);
  987. if (list_empty(&qp->iowait)) {
  988. if (list_empty(&dev->memwait))
  989. mod_timer(&dev->mem_timer, jiffies + 1);
  990. qp->s_flags |= QIB_S_WAIT_KMEM;
  991. list_add_tail(&qp->iowait, &dev->memwait);
  992. }
  993. spin_unlock(&dev->pending_lock);
  994. qp->s_flags &= ~QIB_S_BUSY;
  995. ret = -EBUSY;
  996. }
  997. spin_unlock_irqrestore(&qp->s_lock, flags);
  998. return ret;
  999. }
  1000. static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
  1001. u32 hdrwords, struct qib_sge_state *ss, u32 len,
  1002. u32 plen, u32 dwords)
  1003. {
  1004. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  1005. struct qib_devdata *dd = dd_from_dev(dev);
  1006. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  1007. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1008. struct qib_verbs_txreq *tx;
  1009. struct qib_pio_header *phdr;
  1010. u32 control;
  1011. u32 ndesc;
  1012. int ret;
  1013. tx = qp->s_tx;
  1014. if (tx) {
  1015. qp->s_tx = NULL;
  1016. /* resend previously constructed packet */
  1017. ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
  1018. goto bail;
  1019. }
  1020. tx = get_txreq(dev, qp);
  1021. if (IS_ERR(tx))
  1022. goto bail_tx;
  1023. control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
  1024. be16_to_cpu(hdr->lrh[0]) >> 12);
  1025. tx->qp = qp;
  1026. atomic_inc(&qp->refcount);
  1027. tx->wqe = qp->s_wqe;
  1028. tx->mr = qp->s_rdma_mr;
  1029. if (qp->s_rdma_mr)
  1030. qp->s_rdma_mr = NULL;
  1031. tx->txreq.callback = sdma_complete;
  1032. if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
  1033. tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
  1034. else
  1035. tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
  1036. if (plen + 1 > dd->piosize2kmax_dwords)
  1037. tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
  1038. if (len) {
  1039. /*
  1040. * Don't try to DMA if it takes more descriptors than
  1041. * the queue holds.
  1042. */
  1043. ndesc = qib_count_sge(ss, len);
  1044. if (ndesc >= ppd->sdma_descq_cnt)
  1045. ndesc = 0;
  1046. } else
  1047. ndesc = 1;
  1048. if (ndesc) {
  1049. phdr = &dev->pio_hdrs[tx->hdr_inx];
  1050. phdr->pbc[0] = cpu_to_le32(plen);
  1051. phdr->pbc[1] = cpu_to_le32(control);
  1052. memcpy(&phdr->hdr, hdr, hdrwords << 2);
  1053. tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
  1054. tx->txreq.sg_count = ndesc;
  1055. tx->txreq.addr = dev->pio_hdrs_phys +
  1056. tx->hdr_inx * sizeof(struct qib_pio_header);
  1057. tx->hdr_dwords = hdrwords + 2; /* add PBC length */
  1058. ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
  1059. goto bail;
  1060. }
  1061. /* Allocate a buffer and copy the header and payload to it. */
  1062. tx->hdr_dwords = plen + 1;
  1063. phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
  1064. if (!phdr)
  1065. goto err_tx;
  1066. phdr->pbc[0] = cpu_to_le32(plen);
  1067. phdr->pbc[1] = cpu_to_le32(control);
  1068. memcpy(&phdr->hdr, hdr, hdrwords << 2);
  1069. qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
  1070. tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
  1071. tx->hdr_dwords << 2, DMA_TO_DEVICE);
  1072. if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
  1073. goto map_err;
  1074. tx->align_buf = phdr;
  1075. tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
  1076. tx->txreq.sg_count = 1;
  1077. ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
  1078. goto unaligned;
  1079. map_err:
  1080. kfree(phdr);
  1081. err_tx:
  1082. qib_put_txreq(tx);
  1083. ret = wait_kmem(dev, qp);
  1084. unaligned:
  1085. ibp->n_unaligned++;
  1086. bail:
  1087. return ret;
  1088. bail_tx:
  1089. ret = PTR_ERR(tx);
  1090. goto bail;
  1091. }
  1092. /*
  1093. * If we are now in the error state, return zero to flush the
  1094. * send work request.
  1095. */
  1096. static int no_bufs_available(struct qib_qp *qp)
  1097. {
  1098. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  1099. struct qib_devdata *dd;
  1100. unsigned long flags;
  1101. int ret = 0;
  1102. /*
  1103. * Note that as soon as want_buffer() is called and
  1104. * possibly before it returns, qib_ib_piobufavail()
  1105. * could be called. Therefore, put QP on the I/O wait list before
  1106. * enabling the PIO avail interrupt.
  1107. */
  1108. spin_lock_irqsave(&qp->s_lock, flags);
  1109. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
  1110. spin_lock(&dev->pending_lock);
  1111. if (list_empty(&qp->iowait)) {
  1112. dev->n_piowait++;
  1113. qp->s_flags |= QIB_S_WAIT_PIO;
  1114. list_add_tail(&qp->iowait, &dev->piowait);
  1115. dd = dd_from_dev(dev);
  1116. dd->f_wantpiobuf_intr(dd, 1);
  1117. }
  1118. spin_unlock(&dev->pending_lock);
  1119. qp->s_flags &= ~QIB_S_BUSY;
  1120. ret = -EBUSY;
  1121. }
  1122. spin_unlock_irqrestore(&qp->s_lock, flags);
  1123. return ret;
  1124. }
  1125. static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
  1126. u32 hdrwords, struct qib_sge_state *ss, u32 len,
  1127. u32 plen, u32 dwords)
  1128. {
  1129. struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
  1130. struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
  1131. u32 *hdr = (u32 *) ibhdr;
  1132. u32 __iomem *piobuf_orig;
  1133. u32 __iomem *piobuf;
  1134. u64 pbc;
  1135. unsigned long flags;
  1136. unsigned flush_wc;
  1137. u32 control;
  1138. u32 pbufn;
  1139. control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
  1140. be16_to_cpu(ibhdr->lrh[0]) >> 12);
  1141. pbc = ((u64) control << 32) | plen;
  1142. piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
  1143. if (unlikely(piobuf == NULL))
  1144. return no_bufs_available(qp);
  1145. /*
  1146. * Write the pbc.
  1147. * We have to flush after the PBC for correctness on some cpus
  1148. * or WC buffer can be written out of order.
  1149. */
  1150. writeq(pbc, piobuf);
  1151. piobuf_orig = piobuf;
  1152. piobuf += 2;
  1153. flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
  1154. if (len == 0) {
  1155. /*
  1156. * If there is just the header portion, must flush before
  1157. * writing last word of header for correctness, and after
  1158. * the last header word (trigger word).
  1159. */
  1160. if (flush_wc) {
  1161. qib_flush_wc();
  1162. qib_pio_copy(piobuf, hdr, hdrwords - 1);
  1163. qib_flush_wc();
  1164. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
  1165. qib_flush_wc();
  1166. } else
  1167. qib_pio_copy(piobuf, hdr, hdrwords);
  1168. goto done;
  1169. }
  1170. if (flush_wc)
  1171. qib_flush_wc();
  1172. qib_pio_copy(piobuf, hdr, hdrwords);
  1173. piobuf += hdrwords;
  1174. /* The common case is aligned and contained in one segment. */
  1175. if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
  1176. !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
  1177. u32 *addr = (u32 *) ss->sge.vaddr;
  1178. /* Update address before sending packet. */
  1179. update_sge(ss, len);
  1180. if (flush_wc) {
  1181. qib_pio_copy(piobuf, addr, dwords - 1);
  1182. /* must flush early everything before trigger word */
  1183. qib_flush_wc();
  1184. __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
  1185. /* be sure trigger word is written */
  1186. qib_flush_wc();
  1187. } else
  1188. qib_pio_copy(piobuf, addr, dwords);
  1189. goto done;
  1190. }
  1191. copy_io(piobuf, ss, len, flush_wc);
  1192. done:
  1193. if (dd->flags & QIB_USE_SPCL_TRIG) {
  1194. u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
  1195. qib_flush_wc();
  1196. __raw_writel(0xaebecede, piobuf_orig + spcl_off);
  1197. }
  1198. qib_sendbuf_done(dd, pbufn);
  1199. if (qp->s_rdma_mr) {
  1200. atomic_dec(&qp->s_rdma_mr->refcount);
  1201. qp->s_rdma_mr = NULL;
  1202. }
  1203. if (qp->s_wqe) {
  1204. spin_lock_irqsave(&qp->s_lock, flags);
  1205. qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
  1206. spin_unlock_irqrestore(&qp->s_lock, flags);
  1207. } else if (qp->ibqp.qp_type == IB_QPT_RC) {
  1208. spin_lock_irqsave(&qp->s_lock, flags);
  1209. qib_rc_send_complete(qp, ibhdr);
  1210. spin_unlock_irqrestore(&qp->s_lock, flags);
  1211. }
  1212. return 0;
  1213. }
  1214. /**
  1215. * qib_verbs_send - send a packet
  1216. * @qp: the QP to send on
  1217. * @hdr: the packet header
  1218. * @hdrwords: the number of 32-bit words in the header
  1219. * @ss: the SGE to send
  1220. * @len: the length of the packet in bytes
  1221. *
  1222. * Return zero if packet is sent or queued OK.
  1223. * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
  1224. */
  1225. int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
  1226. u32 hdrwords, struct qib_sge_state *ss, u32 len)
  1227. {
  1228. struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
  1229. u32 plen;
  1230. int ret;
  1231. u32 dwords = (len + 3) >> 2;
  1232. /*
  1233. * Calculate the send buffer trigger address.
  1234. * The +1 counts for the pbc control dword following the pbc length.
  1235. */
  1236. plen = hdrwords + dwords + 1;
  1237. /*
  1238. * VL15 packets (IB_QPT_SMI) will always use PIO, so we
  1239. * can defer SDMA restart until link goes ACTIVE without
  1240. * worrying about just how we got there.
  1241. */
  1242. if (qp->ibqp.qp_type == IB_QPT_SMI ||
  1243. !(dd->flags & QIB_HAS_SEND_DMA))
  1244. ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
  1245. plen, dwords);
  1246. else
  1247. ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
  1248. plen, dwords);
  1249. return ret;
  1250. }
  1251. int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
  1252. u64 *rwords, u64 *spkts, u64 *rpkts,
  1253. u64 *xmit_wait)
  1254. {
  1255. int ret;
  1256. struct qib_devdata *dd = ppd->dd;
  1257. if (!(dd->flags & QIB_PRESENT)) {
  1258. /* no hardware, freeze, etc. */
  1259. ret = -EINVAL;
  1260. goto bail;
  1261. }
  1262. *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
  1263. *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
  1264. *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
  1265. *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
  1266. *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
  1267. ret = 0;
  1268. bail:
  1269. return ret;
  1270. }
  1271. /**
  1272. * qib_get_counters - get various chip counters
  1273. * @dd: the qlogic_ib device
  1274. * @cntrs: counters are placed here
  1275. *
  1276. * Return the counters needed by recv_pma_get_portcounters().
  1277. */
  1278. int qib_get_counters(struct qib_pportdata *ppd,
  1279. struct qib_verbs_counters *cntrs)
  1280. {
  1281. int ret;
  1282. if (!(ppd->dd->flags & QIB_PRESENT)) {
  1283. /* no hardware, freeze, etc. */
  1284. ret = -EINVAL;
  1285. goto bail;
  1286. }
  1287. cntrs->symbol_error_counter =
  1288. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
  1289. cntrs->link_error_recovery_counter =
  1290. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
  1291. /*
  1292. * The link downed counter counts when the other side downs the
  1293. * connection. We add in the number of times we downed the link
  1294. * due to local link integrity errors to compensate.
  1295. */
  1296. cntrs->link_downed_counter =
  1297. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
  1298. cntrs->port_rcv_errors =
  1299. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
  1300. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
  1301. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
  1302. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
  1303. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
  1304. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
  1305. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
  1306. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
  1307. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
  1308. cntrs->port_rcv_errors +=
  1309. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
  1310. cntrs->port_rcv_errors +=
  1311. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
  1312. cntrs->port_rcv_remphys_errors =
  1313. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
  1314. cntrs->port_xmit_discards =
  1315. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
  1316. cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
  1317. QIBPORTCNTR_WORDSEND);
  1318. cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
  1319. QIBPORTCNTR_WORDRCV);
  1320. cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
  1321. QIBPORTCNTR_PKTSEND);
  1322. cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
  1323. QIBPORTCNTR_PKTRCV);
  1324. cntrs->local_link_integrity_errors =
  1325. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
  1326. cntrs->excessive_buffer_overrun_errors =
  1327. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
  1328. cntrs->vl15_dropped =
  1329. ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
  1330. ret = 0;
  1331. bail:
  1332. return ret;
  1333. }
  1334. /**
  1335. * qib_ib_piobufavail - callback when a PIO buffer is available
  1336. * @dd: the device pointer
  1337. *
  1338. * This is called from qib_intr() at interrupt level when a PIO buffer is
  1339. * available after qib_verbs_send() returned an error that no buffers were
  1340. * available. Disable the interrupt if there are no more QPs waiting.
  1341. */
  1342. void qib_ib_piobufavail(struct qib_devdata *dd)
  1343. {
  1344. struct qib_ibdev *dev = &dd->verbs_dev;
  1345. struct list_head *list;
  1346. struct qib_qp *qps[5];
  1347. struct qib_qp *qp;
  1348. unsigned long flags;
  1349. unsigned i, n;
  1350. list = &dev->piowait;
  1351. n = 0;
  1352. /*
  1353. * Note: checking that the piowait list is empty and clearing
  1354. * the buffer available interrupt needs to be atomic or we
  1355. * could end up with QPs on the wait list with the interrupt
  1356. * disabled.
  1357. */
  1358. spin_lock_irqsave(&dev->pending_lock, flags);
  1359. while (!list_empty(list)) {
  1360. if (n == ARRAY_SIZE(qps))
  1361. goto full;
  1362. qp = list_entry(list->next, struct qib_qp, iowait);
  1363. list_del_init(&qp->iowait);
  1364. atomic_inc(&qp->refcount);
  1365. qps[n++] = qp;
  1366. }
  1367. dd->f_wantpiobuf_intr(dd, 0);
  1368. full:
  1369. spin_unlock_irqrestore(&dev->pending_lock, flags);
  1370. for (i = 0; i < n; i++) {
  1371. qp = qps[i];
  1372. spin_lock_irqsave(&qp->s_lock, flags);
  1373. if (qp->s_flags & QIB_S_WAIT_PIO) {
  1374. qp->s_flags &= ~QIB_S_WAIT_PIO;
  1375. qib_schedule_send(qp);
  1376. }
  1377. spin_unlock_irqrestore(&qp->s_lock, flags);
  1378. /* Notify qib_destroy_qp() if it is waiting. */
  1379. if (atomic_dec_and_test(&qp->refcount))
  1380. wake_up(&qp->wait);
  1381. }
  1382. }
  1383. static int qib_query_device(struct ib_device *ibdev,
  1384. struct ib_device_attr *props)
  1385. {
  1386. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1387. struct qib_ibdev *dev = to_idev(ibdev);
  1388. memset(props, 0, sizeof(*props));
  1389. props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
  1390. IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
  1391. IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
  1392. IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
  1393. props->page_size_cap = PAGE_SIZE;
  1394. props->vendor_id =
  1395. QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
  1396. props->vendor_part_id = dd->deviceid;
  1397. props->hw_ver = dd->minrev;
  1398. props->sys_image_guid = ib_qib_sys_image_guid;
  1399. props->max_mr_size = ~0ULL;
  1400. props->max_qp = ib_qib_max_qps;
  1401. props->max_qp_wr = ib_qib_max_qp_wrs;
  1402. props->max_sge = ib_qib_max_sges;
  1403. props->max_cq = ib_qib_max_cqs;
  1404. props->max_ah = ib_qib_max_ahs;
  1405. props->max_cqe = ib_qib_max_cqes;
  1406. props->max_mr = dev->lk_table.max;
  1407. props->max_fmr = dev->lk_table.max;
  1408. props->max_map_per_fmr = 32767;
  1409. props->max_pd = ib_qib_max_pds;
  1410. props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
  1411. props->max_qp_init_rd_atom = 255;
  1412. /* props->max_res_rd_atom */
  1413. props->max_srq = ib_qib_max_srqs;
  1414. props->max_srq_wr = ib_qib_max_srq_wrs;
  1415. props->max_srq_sge = ib_qib_max_srq_sges;
  1416. /* props->local_ca_ack_delay */
  1417. props->atomic_cap = IB_ATOMIC_GLOB;
  1418. props->max_pkeys = qib_get_npkeys(dd);
  1419. props->max_mcast_grp = ib_qib_max_mcast_grps;
  1420. props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
  1421. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  1422. props->max_mcast_grp;
  1423. return 0;
  1424. }
  1425. static int qib_query_port(struct ib_device *ibdev, u8 port,
  1426. struct ib_port_attr *props)
  1427. {
  1428. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1429. struct qib_ibport *ibp = to_iport(ibdev, port);
  1430. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1431. enum ib_mtu mtu;
  1432. u16 lid = ppd->lid;
  1433. memset(props, 0, sizeof(*props));
  1434. props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
  1435. props->lmc = ppd->lmc;
  1436. props->sm_lid = ibp->sm_lid;
  1437. props->sm_sl = ibp->sm_sl;
  1438. props->state = dd->f_iblink_state(ppd->lastibcstat);
  1439. props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
  1440. props->port_cap_flags = ibp->port_cap_flags;
  1441. props->gid_tbl_len = QIB_GUIDS_PER_PORT;
  1442. props->max_msg_sz = 0x80000000;
  1443. props->pkey_tbl_len = qib_get_npkeys(dd);
  1444. props->bad_pkey_cntr = ibp->pkey_violations;
  1445. props->qkey_viol_cntr = ibp->qkey_violations;
  1446. props->active_width = ppd->link_width_active;
  1447. /* See rate_show() */
  1448. props->active_speed = ppd->link_speed_active;
  1449. props->max_vl_num = qib_num_vls(ppd->vls_supported);
  1450. props->init_type_reply = 0;
  1451. props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
  1452. switch (ppd->ibmtu) {
  1453. case 4096:
  1454. mtu = IB_MTU_4096;
  1455. break;
  1456. case 2048:
  1457. mtu = IB_MTU_2048;
  1458. break;
  1459. case 1024:
  1460. mtu = IB_MTU_1024;
  1461. break;
  1462. case 512:
  1463. mtu = IB_MTU_512;
  1464. break;
  1465. case 256:
  1466. mtu = IB_MTU_256;
  1467. break;
  1468. default:
  1469. mtu = IB_MTU_2048;
  1470. }
  1471. props->active_mtu = mtu;
  1472. props->subnet_timeout = ibp->subnet_timeout;
  1473. return 0;
  1474. }
  1475. static int qib_modify_device(struct ib_device *device,
  1476. int device_modify_mask,
  1477. struct ib_device_modify *device_modify)
  1478. {
  1479. struct qib_devdata *dd = dd_from_ibdev(device);
  1480. unsigned i;
  1481. int ret;
  1482. if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
  1483. IB_DEVICE_MODIFY_NODE_DESC)) {
  1484. ret = -EOPNOTSUPP;
  1485. goto bail;
  1486. }
  1487. if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
  1488. memcpy(device->node_desc, device_modify->node_desc, 64);
  1489. for (i = 0; i < dd->num_pports; i++) {
  1490. struct qib_ibport *ibp = &dd->pport[i].ibport_data;
  1491. qib_node_desc_chg(ibp);
  1492. }
  1493. }
  1494. if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
  1495. ib_qib_sys_image_guid =
  1496. cpu_to_be64(device_modify->sys_image_guid);
  1497. for (i = 0; i < dd->num_pports; i++) {
  1498. struct qib_ibport *ibp = &dd->pport[i].ibport_data;
  1499. qib_sys_guid_chg(ibp);
  1500. }
  1501. }
  1502. ret = 0;
  1503. bail:
  1504. return ret;
  1505. }
  1506. static int qib_modify_port(struct ib_device *ibdev, u8 port,
  1507. int port_modify_mask, struct ib_port_modify *props)
  1508. {
  1509. struct qib_ibport *ibp = to_iport(ibdev, port);
  1510. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1511. ibp->port_cap_flags |= props->set_port_cap_mask;
  1512. ibp->port_cap_flags &= ~props->clr_port_cap_mask;
  1513. if (props->set_port_cap_mask || props->clr_port_cap_mask)
  1514. qib_cap_mask_chg(ibp);
  1515. if (port_modify_mask & IB_PORT_SHUTDOWN)
  1516. qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
  1517. if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
  1518. ibp->qkey_violations = 0;
  1519. return 0;
  1520. }
  1521. static int qib_query_gid(struct ib_device *ibdev, u8 port,
  1522. int index, union ib_gid *gid)
  1523. {
  1524. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1525. int ret = 0;
  1526. if (!port || port > dd->num_pports)
  1527. ret = -EINVAL;
  1528. else {
  1529. struct qib_ibport *ibp = to_iport(ibdev, port);
  1530. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1531. gid->global.subnet_prefix = ibp->gid_prefix;
  1532. if (index == 0)
  1533. gid->global.interface_id = ppd->guid;
  1534. else if (index < QIB_GUIDS_PER_PORT)
  1535. gid->global.interface_id = ibp->guids[index - 1];
  1536. else
  1537. ret = -EINVAL;
  1538. }
  1539. return ret;
  1540. }
  1541. static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
  1542. struct ib_ucontext *context,
  1543. struct ib_udata *udata)
  1544. {
  1545. struct qib_ibdev *dev = to_idev(ibdev);
  1546. struct qib_pd *pd;
  1547. struct ib_pd *ret;
  1548. /*
  1549. * This is actually totally arbitrary. Some correctness tests
  1550. * assume there's a maximum number of PDs that can be allocated.
  1551. * We don't actually have this limit, but we fail the test if
  1552. * we allow allocations of more than we report for this value.
  1553. */
  1554. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  1555. if (!pd) {
  1556. ret = ERR_PTR(-ENOMEM);
  1557. goto bail;
  1558. }
  1559. spin_lock(&dev->n_pds_lock);
  1560. if (dev->n_pds_allocated == ib_qib_max_pds) {
  1561. spin_unlock(&dev->n_pds_lock);
  1562. kfree(pd);
  1563. ret = ERR_PTR(-ENOMEM);
  1564. goto bail;
  1565. }
  1566. dev->n_pds_allocated++;
  1567. spin_unlock(&dev->n_pds_lock);
  1568. /* ib_alloc_pd() will initialize pd->ibpd. */
  1569. pd->user = udata != NULL;
  1570. ret = &pd->ibpd;
  1571. bail:
  1572. return ret;
  1573. }
  1574. static int qib_dealloc_pd(struct ib_pd *ibpd)
  1575. {
  1576. struct qib_pd *pd = to_ipd(ibpd);
  1577. struct qib_ibdev *dev = to_idev(ibpd->device);
  1578. spin_lock(&dev->n_pds_lock);
  1579. dev->n_pds_allocated--;
  1580. spin_unlock(&dev->n_pds_lock);
  1581. kfree(pd);
  1582. return 0;
  1583. }
  1584. int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
  1585. {
  1586. /* A multicast address requires a GRH (see ch. 8.4.1). */
  1587. if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
  1588. ah_attr->dlid != QIB_PERMISSIVE_LID &&
  1589. !(ah_attr->ah_flags & IB_AH_GRH))
  1590. goto bail;
  1591. if ((ah_attr->ah_flags & IB_AH_GRH) &&
  1592. ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
  1593. goto bail;
  1594. if (ah_attr->dlid == 0)
  1595. goto bail;
  1596. if (ah_attr->port_num < 1 ||
  1597. ah_attr->port_num > ibdev->phys_port_cnt)
  1598. goto bail;
  1599. if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
  1600. ib_rate_to_mult(ah_attr->static_rate) < 0)
  1601. goto bail;
  1602. if (ah_attr->sl > 15)
  1603. goto bail;
  1604. return 0;
  1605. bail:
  1606. return -EINVAL;
  1607. }
  1608. /**
  1609. * qib_create_ah - create an address handle
  1610. * @pd: the protection domain
  1611. * @ah_attr: the attributes of the AH
  1612. *
  1613. * This may be called from interrupt context.
  1614. */
  1615. static struct ib_ah *qib_create_ah(struct ib_pd *pd,
  1616. struct ib_ah_attr *ah_attr)
  1617. {
  1618. struct qib_ah *ah;
  1619. struct ib_ah *ret;
  1620. struct qib_ibdev *dev = to_idev(pd->device);
  1621. unsigned long flags;
  1622. if (qib_check_ah(pd->device, ah_attr)) {
  1623. ret = ERR_PTR(-EINVAL);
  1624. goto bail;
  1625. }
  1626. ah = kmalloc(sizeof *ah, GFP_ATOMIC);
  1627. if (!ah) {
  1628. ret = ERR_PTR(-ENOMEM);
  1629. goto bail;
  1630. }
  1631. spin_lock_irqsave(&dev->n_ahs_lock, flags);
  1632. if (dev->n_ahs_allocated == ib_qib_max_ahs) {
  1633. spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
  1634. kfree(ah);
  1635. ret = ERR_PTR(-ENOMEM);
  1636. goto bail;
  1637. }
  1638. dev->n_ahs_allocated++;
  1639. spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
  1640. /* ib_create_ah() will initialize ah->ibah. */
  1641. ah->attr = *ah_attr;
  1642. atomic_set(&ah->refcount, 0);
  1643. ret = &ah->ibah;
  1644. bail:
  1645. return ret;
  1646. }
  1647. /**
  1648. * qib_destroy_ah - destroy an address handle
  1649. * @ibah: the AH to destroy
  1650. *
  1651. * This may be called from interrupt context.
  1652. */
  1653. static int qib_destroy_ah(struct ib_ah *ibah)
  1654. {
  1655. struct qib_ibdev *dev = to_idev(ibah->device);
  1656. struct qib_ah *ah = to_iah(ibah);
  1657. unsigned long flags;
  1658. if (atomic_read(&ah->refcount) != 0)
  1659. return -EBUSY;
  1660. spin_lock_irqsave(&dev->n_ahs_lock, flags);
  1661. dev->n_ahs_allocated--;
  1662. spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
  1663. kfree(ah);
  1664. return 0;
  1665. }
  1666. static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
  1667. {
  1668. struct qib_ah *ah = to_iah(ibah);
  1669. if (qib_check_ah(ibah->device, ah_attr))
  1670. return -EINVAL;
  1671. ah->attr = *ah_attr;
  1672. return 0;
  1673. }
  1674. static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
  1675. {
  1676. struct qib_ah *ah = to_iah(ibah);
  1677. *ah_attr = ah->attr;
  1678. return 0;
  1679. }
  1680. /**
  1681. * qib_get_npkeys - return the size of the PKEY table for context 0
  1682. * @dd: the qlogic_ib device
  1683. */
  1684. unsigned qib_get_npkeys(struct qib_devdata *dd)
  1685. {
  1686. return ARRAY_SIZE(dd->rcd[0]->pkeys);
  1687. }
  1688. /*
  1689. * Return the indexed PKEY from the port PKEY table.
  1690. * No need to validate rcd[ctxt]; the port is setup if we are here.
  1691. */
  1692. unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
  1693. {
  1694. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1695. struct qib_devdata *dd = ppd->dd;
  1696. unsigned ctxt = ppd->hw_pidx;
  1697. unsigned ret;
  1698. /* dd->rcd null if mini_init or some init failures */
  1699. if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
  1700. ret = 0;
  1701. else
  1702. ret = dd->rcd[ctxt]->pkeys[index];
  1703. return ret;
  1704. }
  1705. static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  1706. u16 *pkey)
  1707. {
  1708. struct qib_devdata *dd = dd_from_ibdev(ibdev);
  1709. int ret;
  1710. if (index >= qib_get_npkeys(dd)) {
  1711. ret = -EINVAL;
  1712. goto bail;
  1713. }
  1714. *pkey = qib_get_pkey(to_iport(ibdev, port), index);
  1715. ret = 0;
  1716. bail:
  1717. return ret;
  1718. }
  1719. /**
  1720. * qib_alloc_ucontext - allocate a ucontest
  1721. * @ibdev: the infiniband device
  1722. * @udata: not used by the QLogic_IB driver
  1723. */
  1724. static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
  1725. struct ib_udata *udata)
  1726. {
  1727. struct qib_ucontext *context;
  1728. struct ib_ucontext *ret;
  1729. context = kmalloc(sizeof *context, GFP_KERNEL);
  1730. if (!context) {
  1731. ret = ERR_PTR(-ENOMEM);
  1732. goto bail;
  1733. }
  1734. ret = &context->ibucontext;
  1735. bail:
  1736. return ret;
  1737. }
  1738. static int qib_dealloc_ucontext(struct ib_ucontext *context)
  1739. {
  1740. kfree(to_iucontext(context));
  1741. return 0;
  1742. }
  1743. static void init_ibport(struct qib_pportdata *ppd)
  1744. {
  1745. struct qib_verbs_counters cntrs;
  1746. struct qib_ibport *ibp = &ppd->ibport_data;
  1747. spin_lock_init(&ibp->lock);
  1748. /* Set the prefix to the default value (see ch. 4.1.1) */
  1749. ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
  1750. ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
  1751. ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
  1752. IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
  1753. IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
  1754. IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
  1755. IB_PORT_OTHER_LOCAL_CHANGES_SUP;
  1756. if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
  1757. ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
  1758. ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
  1759. ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
  1760. ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
  1761. ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
  1762. ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
  1763. /* Snapshot current HW counters to "clear" them. */
  1764. qib_get_counters(ppd, &cntrs);
  1765. ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
  1766. ibp->z_link_error_recovery_counter =
  1767. cntrs.link_error_recovery_counter;
  1768. ibp->z_link_downed_counter = cntrs.link_downed_counter;
  1769. ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
  1770. ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
  1771. ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
  1772. ibp->z_port_xmit_data = cntrs.port_xmit_data;
  1773. ibp->z_port_rcv_data = cntrs.port_rcv_data;
  1774. ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
  1775. ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
  1776. ibp->z_local_link_integrity_errors =
  1777. cntrs.local_link_integrity_errors;
  1778. ibp->z_excessive_buffer_overrun_errors =
  1779. cntrs.excessive_buffer_overrun_errors;
  1780. ibp->z_vl15_dropped = cntrs.vl15_dropped;
  1781. RCU_INIT_POINTER(ibp->qp0, NULL);
  1782. RCU_INIT_POINTER(ibp->qp1, NULL);
  1783. }
  1784. /**
  1785. * qib_register_ib_device - register our device with the infiniband core
  1786. * @dd: the device data structure
  1787. * Return the allocated qib_ibdev pointer or NULL on error.
  1788. */
  1789. int qib_register_ib_device(struct qib_devdata *dd)
  1790. {
  1791. struct qib_ibdev *dev = &dd->verbs_dev;
  1792. struct ib_device *ibdev = &dev->ibdev;
  1793. struct qib_pportdata *ppd = dd->pport;
  1794. unsigned i, lk_tab_size;
  1795. int ret;
  1796. dev->qp_table_size = ib_qib_qp_table_size;
  1797. get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
  1798. dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
  1799. GFP_KERNEL);
  1800. if (!dev->qp_table) {
  1801. ret = -ENOMEM;
  1802. goto err_qpt;
  1803. }
  1804. for (i = 0; i < dev->qp_table_size; i++)
  1805. RCU_INIT_POINTER(dev->qp_table[i], NULL);
  1806. for (i = 0; i < dd->num_pports; i++)
  1807. init_ibport(ppd + i);
  1808. /* Only need to initialize non-zero fields. */
  1809. spin_lock_init(&dev->qpt_lock);
  1810. spin_lock_init(&dev->n_pds_lock);
  1811. spin_lock_init(&dev->n_ahs_lock);
  1812. spin_lock_init(&dev->n_cqs_lock);
  1813. spin_lock_init(&dev->n_qps_lock);
  1814. spin_lock_init(&dev->n_srqs_lock);
  1815. spin_lock_init(&dev->n_mcast_grps_lock);
  1816. init_timer(&dev->mem_timer);
  1817. dev->mem_timer.function = mem_timer;
  1818. dev->mem_timer.data = (unsigned long) dev;
  1819. qib_init_qpn_table(dd, &dev->qpn_table);
  1820. /*
  1821. * The top ib_qib_lkey_table_size bits are used to index the
  1822. * table. The lower 8 bits can be owned by the user (copied from
  1823. * the LKEY). The remaining bits act as a generation number or tag.
  1824. */
  1825. spin_lock_init(&dev->lk_table.lock);
  1826. dev->lk_table.max = 1 << ib_qib_lkey_table_size;
  1827. lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
  1828. dev->lk_table.table = (struct qib_mregion **)
  1829. __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
  1830. if (dev->lk_table.table == NULL) {
  1831. ret = -ENOMEM;
  1832. goto err_lk;
  1833. }
  1834. memset(dev->lk_table.table, 0, lk_tab_size);
  1835. INIT_LIST_HEAD(&dev->pending_mmaps);
  1836. spin_lock_init(&dev->pending_lock);
  1837. dev->mmap_offset = PAGE_SIZE;
  1838. spin_lock_init(&dev->mmap_offset_lock);
  1839. INIT_LIST_HEAD(&dev->piowait);
  1840. INIT_LIST_HEAD(&dev->dmawait);
  1841. INIT_LIST_HEAD(&dev->txwait);
  1842. INIT_LIST_HEAD(&dev->memwait);
  1843. INIT_LIST_HEAD(&dev->txreq_free);
  1844. if (ppd->sdma_descq_cnt) {
  1845. dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
  1846. ppd->sdma_descq_cnt *
  1847. sizeof(struct qib_pio_header),
  1848. &dev->pio_hdrs_phys,
  1849. GFP_KERNEL);
  1850. if (!dev->pio_hdrs) {
  1851. ret = -ENOMEM;
  1852. goto err_hdrs;
  1853. }
  1854. }
  1855. for (i = 0; i < ppd->sdma_descq_cnt; i++) {
  1856. struct qib_verbs_txreq *tx;
  1857. tx = kzalloc(sizeof *tx, GFP_KERNEL);
  1858. if (!tx) {
  1859. ret = -ENOMEM;
  1860. goto err_tx;
  1861. }
  1862. tx->hdr_inx = i;
  1863. list_add(&tx->txreq.list, &dev->txreq_free);
  1864. }
  1865. /*
  1866. * The system image GUID is supposed to be the same for all
  1867. * IB HCAs in a single system but since there can be other
  1868. * device types in the system, we can't be sure this is unique.
  1869. */
  1870. if (!ib_qib_sys_image_guid)
  1871. ib_qib_sys_image_guid = ppd->guid;
  1872. strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
  1873. ibdev->owner = THIS_MODULE;
  1874. ibdev->node_guid = ppd->guid;
  1875. ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
  1876. ibdev->uverbs_cmd_mask =
  1877. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1878. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1879. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1880. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1881. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1882. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  1883. (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
  1884. (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
  1885. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  1886. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1887. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1888. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1889. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1890. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1891. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1892. (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
  1893. (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
  1894. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1895. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1896. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1897. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1898. (1ull << IB_USER_VERBS_CMD_POST_SEND) |
  1899. (1ull << IB_USER_VERBS_CMD_POST_RECV) |
  1900. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1901. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1902. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1903. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1904. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1905. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1906. (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
  1907. ibdev->node_type = RDMA_NODE_IB_CA;
  1908. ibdev->phys_port_cnt = dd->num_pports;
  1909. ibdev->num_comp_vectors = 1;
  1910. ibdev->dma_device = &dd->pcidev->dev;
  1911. ibdev->query_device = qib_query_device;
  1912. ibdev->modify_device = qib_modify_device;
  1913. ibdev->query_port = qib_query_port;
  1914. ibdev->modify_port = qib_modify_port;
  1915. ibdev->query_pkey = qib_query_pkey;
  1916. ibdev->query_gid = qib_query_gid;
  1917. ibdev->alloc_ucontext = qib_alloc_ucontext;
  1918. ibdev->dealloc_ucontext = qib_dealloc_ucontext;
  1919. ibdev->alloc_pd = qib_alloc_pd;
  1920. ibdev->dealloc_pd = qib_dealloc_pd;
  1921. ibdev->create_ah = qib_create_ah;
  1922. ibdev->destroy_ah = qib_destroy_ah;
  1923. ibdev->modify_ah = qib_modify_ah;
  1924. ibdev->query_ah = qib_query_ah;
  1925. ibdev->create_srq = qib_create_srq;
  1926. ibdev->modify_srq = qib_modify_srq;
  1927. ibdev->query_srq = qib_query_srq;
  1928. ibdev->destroy_srq = qib_destroy_srq;
  1929. ibdev->create_qp = qib_create_qp;
  1930. ibdev->modify_qp = qib_modify_qp;
  1931. ibdev->query_qp = qib_query_qp;
  1932. ibdev->destroy_qp = qib_destroy_qp;
  1933. ibdev->post_send = qib_post_send;
  1934. ibdev->post_recv = qib_post_receive;
  1935. ibdev->post_srq_recv = qib_post_srq_receive;
  1936. ibdev->create_cq = qib_create_cq;
  1937. ibdev->destroy_cq = qib_destroy_cq;
  1938. ibdev->resize_cq = qib_resize_cq;
  1939. ibdev->poll_cq = qib_poll_cq;
  1940. ibdev->req_notify_cq = qib_req_notify_cq;
  1941. ibdev->get_dma_mr = qib_get_dma_mr;
  1942. ibdev->reg_phys_mr = qib_reg_phys_mr;
  1943. ibdev->reg_user_mr = qib_reg_user_mr;
  1944. ibdev->dereg_mr = qib_dereg_mr;
  1945. ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
  1946. ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
  1947. ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
  1948. ibdev->alloc_fmr = qib_alloc_fmr;
  1949. ibdev->map_phys_fmr = qib_map_phys_fmr;
  1950. ibdev->unmap_fmr = qib_unmap_fmr;
  1951. ibdev->dealloc_fmr = qib_dealloc_fmr;
  1952. ibdev->attach_mcast = qib_multicast_attach;
  1953. ibdev->detach_mcast = qib_multicast_detach;
  1954. ibdev->process_mad = qib_process_mad;
  1955. ibdev->mmap = qib_mmap;
  1956. ibdev->dma_ops = &qib_dma_mapping_ops;
  1957. snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
  1958. QIB_IDSTR " %s", init_utsname()->nodename);
  1959. ret = ib_register_device(ibdev, qib_create_port_files);
  1960. if (ret)
  1961. goto err_reg;
  1962. ret = qib_create_agents(dev);
  1963. if (ret)
  1964. goto err_agents;
  1965. if (qib_verbs_register_sysfs(dd))
  1966. goto err_class;
  1967. goto bail;
  1968. err_class:
  1969. qib_free_agents(dev);
  1970. err_agents:
  1971. ib_unregister_device(ibdev);
  1972. err_reg:
  1973. err_tx:
  1974. while (!list_empty(&dev->txreq_free)) {
  1975. struct list_head *l = dev->txreq_free.next;
  1976. struct qib_verbs_txreq *tx;
  1977. list_del(l);
  1978. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  1979. kfree(tx);
  1980. }
  1981. if (ppd->sdma_descq_cnt)
  1982. dma_free_coherent(&dd->pcidev->dev,
  1983. ppd->sdma_descq_cnt *
  1984. sizeof(struct qib_pio_header),
  1985. dev->pio_hdrs, dev->pio_hdrs_phys);
  1986. err_hdrs:
  1987. free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
  1988. err_lk:
  1989. kfree(dev->qp_table);
  1990. err_qpt:
  1991. qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
  1992. bail:
  1993. return ret;
  1994. }
  1995. void qib_unregister_ib_device(struct qib_devdata *dd)
  1996. {
  1997. struct qib_ibdev *dev = &dd->verbs_dev;
  1998. struct ib_device *ibdev = &dev->ibdev;
  1999. u32 qps_inuse;
  2000. unsigned lk_tab_size;
  2001. qib_verbs_unregister_sysfs(dd);
  2002. qib_free_agents(dev);
  2003. ib_unregister_device(ibdev);
  2004. if (!list_empty(&dev->piowait))
  2005. qib_dev_err(dd, "piowait list not empty!\n");
  2006. if (!list_empty(&dev->dmawait))
  2007. qib_dev_err(dd, "dmawait list not empty!\n");
  2008. if (!list_empty(&dev->txwait))
  2009. qib_dev_err(dd, "txwait list not empty!\n");
  2010. if (!list_empty(&dev->memwait))
  2011. qib_dev_err(dd, "memwait list not empty!\n");
  2012. if (dev->dma_mr)
  2013. qib_dev_err(dd, "DMA MR not NULL!\n");
  2014. qps_inuse = qib_free_all_qps(dd);
  2015. if (qps_inuse)
  2016. qib_dev_err(dd, "QP memory leak! %u still in use\n",
  2017. qps_inuse);
  2018. del_timer_sync(&dev->mem_timer);
  2019. qib_free_qpn_table(&dev->qpn_table);
  2020. while (!list_empty(&dev->txreq_free)) {
  2021. struct list_head *l = dev->txreq_free.next;
  2022. struct qib_verbs_txreq *tx;
  2023. list_del(l);
  2024. tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
  2025. kfree(tx);
  2026. }
  2027. if (dd->pport->sdma_descq_cnt)
  2028. dma_free_coherent(&dd->pcidev->dev,
  2029. dd->pport->sdma_descq_cnt *
  2030. sizeof(struct qib_pio_header),
  2031. dev->pio_hdrs, dev->pio_hdrs_phys);
  2032. lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
  2033. free_pages((unsigned long) dev->lk_table.table,
  2034. get_order(lk_tab_size));
  2035. kfree(dev->qp_table);
  2036. }