device.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/moduleparam.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/vmalloc.h>
  36. #include <rdma/ib_verbs.h>
  37. #include "iw_cxgb4.h"
  38. #define DRV_VERSION "0.1"
  39. MODULE_AUTHOR("Steve Wise");
  40. MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
  41. MODULE_LICENSE("Dual BSD/GPL");
  42. MODULE_VERSION(DRV_VERSION);
  43. static int allow_db_fc_on_t5;
  44. module_param(allow_db_fc_on_t5, int, 0644);
  45. MODULE_PARM_DESC(allow_db_fc_on_t5,
  46. "Allow DB Flow Control on T5 (default = 0)");
  47. static int allow_db_coalescing_on_t5;
  48. module_param(allow_db_coalescing_on_t5, int, 0644);
  49. MODULE_PARM_DESC(allow_db_coalescing_on_t5,
  50. "Allow DB Coalescing on T5 (default = 0)");
  51. struct uld_ctx {
  52. struct list_head entry;
  53. struct cxgb4_lld_info lldi;
  54. struct c4iw_dev *dev;
  55. };
  56. static LIST_HEAD(uld_ctx_list);
  57. static DEFINE_MUTEX(dev_mutex);
  58. static struct dentry *c4iw_debugfs_root;
  59. struct c4iw_debugfs_data {
  60. struct c4iw_dev *devp;
  61. char *buf;
  62. int bufsize;
  63. int pos;
  64. };
  65. static int count_idrs(int id, void *p, void *data)
  66. {
  67. int *countp = data;
  68. *countp = *countp + 1;
  69. return 0;
  70. }
  71. static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
  72. loff_t *ppos)
  73. {
  74. struct c4iw_debugfs_data *d = file->private_data;
  75. return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
  76. }
  77. static int dump_qp(int id, void *p, void *data)
  78. {
  79. struct c4iw_qp *qp = p;
  80. struct c4iw_debugfs_data *qpd = data;
  81. int space;
  82. int cc;
  83. if (id != qp->wq.sq.qid)
  84. return 0;
  85. space = qpd->bufsize - qpd->pos - 1;
  86. if (space == 0)
  87. return 1;
  88. if (qp->ep) {
  89. if (qp->ep->com.local_addr.ss_family == AF_INET) {
  90. struct sockaddr_in *lsin = (struct sockaddr_in *)
  91. &qp->ep->com.local_addr;
  92. struct sockaddr_in *rsin = (struct sockaddr_in *)
  93. &qp->ep->com.remote_addr;
  94. cc = snprintf(qpd->buf + qpd->pos, space,
  95. "rc qp sq id %u rq id %u state %u "
  96. "onchip %u ep tid %u state %u "
  97. "%pI4:%u->%pI4:%u\n",
  98. qp->wq.sq.qid, qp->wq.rq.qid,
  99. (int)qp->attr.state,
  100. qp->wq.sq.flags & T4_SQ_ONCHIP,
  101. qp->ep->hwtid, (int)qp->ep->com.state,
  102. &lsin->sin_addr, ntohs(lsin->sin_port),
  103. &rsin->sin_addr, ntohs(rsin->sin_port));
  104. } else {
  105. struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
  106. &qp->ep->com.local_addr;
  107. struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
  108. &qp->ep->com.remote_addr;
  109. cc = snprintf(qpd->buf + qpd->pos, space,
  110. "rc qp sq id %u rq id %u state %u "
  111. "onchip %u ep tid %u state %u "
  112. "%pI6:%u->%pI6:%u\n",
  113. qp->wq.sq.qid, qp->wq.rq.qid,
  114. (int)qp->attr.state,
  115. qp->wq.sq.flags & T4_SQ_ONCHIP,
  116. qp->ep->hwtid, (int)qp->ep->com.state,
  117. &lsin6->sin6_addr,
  118. ntohs(lsin6->sin6_port),
  119. &rsin6->sin6_addr,
  120. ntohs(rsin6->sin6_port));
  121. }
  122. } else
  123. cc = snprintf(qpd->buf + qpd->pos, space,
  124. "qp sq id %u rq id %u state %u onchip %u\n",
  125. qp->wq.sq.qid, qp->wq.rq.qid,
  126. (int)qp->attr.state,
  127. qp->wq.sq.flags & T4_SQ_ONCHIP);
  128. if (cc < space)
  129. qpd->pos += cc;
  130. return 0;
  131. }
  132. static int qp_release(struct inode *inode, struct file *file)
  133. {
  134. struct c4iw_debugfs_data *qpd = file->private_data;
  135. if (!qpd) {
  136. printk(KERN_INFO "%s null qpd?\n", __func__);
  137. return 0;
  138. }
  139. vfree(qpd->buf);
  140. kfree(qpd);
  141. return 0;
  142. }
  143. static int qp_open(struct inode *inode, struct file *file)
  144. {
  145. struct c4iw_debugfs_data *qpd;
  146. int ret = 0;
  147. int count = 1;
  148. qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
  149. if (!qpd) {
  150. ret = -ENOMEM;
  151. goto out;
  152. }
  153. qpd->devp = inode->i_private;
  154. qpd->pos = 0;
  155. spin_lock_irq(&qpd->devp->lock);
  156. idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
  157. spin_unlock_irq(&qpd->devp->lock);
  158. qpd->bufsize = count * 128;
  159. qpd->buf = vmalloc(qpd->bufsize);
  160. if (!qpd->buf) {
  161. ret = -ENOMEM;
  162. goto err1;
  163. }
  164. spin_lock_irq(&qpd->devp->lock);
  165. idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
  166. spin_unlock_irq(&qpd->devp->lock);
  167. qpd->buf[qpd->pos++] = 0;
  168. file->private_data = qpd;
  169. goto out;
  170. err1:
  171. kfree(qpd);
  172. out:
  173. return ret;
  174. }
  175. static const struct file_operations qp_debugfs_fops = {
  176. .owner = THIS_MODULE,
  177. .open = qp_open,
  178. .release = qp_release,
  179. .read = debugfs_read,
  180. .llseek = default_llseek,
  181. };
  182. static int dump_stag(int id, void *p, void *data)
  183. {
  184. struct c4iw_debugfs_data *stagd = data;
  185. int space;
  186. int cc;
  187. space = stagd->bufsize - stagd->pos - 1;
  188. if (space == 0)
  189. return 1;
  190. cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
  191. if (cc < space)
  192. stagd->pos += cc;
  193. return 0;
  194. }
  195. static int stag_release(struct inode *inode, struct file *file)
  196. {
  197. struct c4iw_debugfs_data *stagd = file->private_data;
  198. if (!stagd) {
  199. printk(KERN_INFO "%s null stagd?\n", __func__);
  200. return 0;
  201. }
  202. kfree(stagd->buf);
  203. kfree(stagd);
  204. return 0;
  205. }
  206. static int stag_open(struct inode *inode, struct file *file)
  207. {
  208. struct c4iw_debugfs_data *stagd;
  209. int ret = 0;
  210. int count = 1;
  211. stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
  212. if (!stagd) {
  213. ret = -ENOMEM;
  214. goto out;
  215. }
  216. stagd->devp = inode->i_private;
  217. stagd->pos = 0;
  218. spin_lock_irq(&stagd->devp->lock);
  219. idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
  220. spin_unlock_irq(&stagd->devp->lock);
  221. stagd->bufsize = count * sizeof("0x12345678\n");
  222. stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
  223. if (!stagd->buf) {
  224. ret = -ENOMEM;
  225. goto err1;
  226. }
  227. spin_lock_irq(&stagd->devp->lock);
  228. idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
  229. spin_unlock_irq(&stagd->devp->lock);
  230. stagd->buf[stagd->pos++] = 0;
  231. file->private_data = stagd;
  232. goto out;
  233. err1:
  234. kfree(stagd);
  235. out:
  236. return ret;
  237. }
  238. static const struct file_operations stag_debugfs_fops = {
  239. .owner = THIS_MODULE,
  240. .open = stag_open,
  241. .release = stag_release,
  242. .read = debugfs_read,
  243. .llseek = default_llseek,
  244. };
  245. static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
  246. static int stats_show(struct seq_file *seq, void *v)
  247. {
  248. struct c4iw_dev *dev = seq->private;
  249. seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
  250. "Max", "Fail");
  251. seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
  252. dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
  253. dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
  254. seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
  255. dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
  256. dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
  257. seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
  258. dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
  259. dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
  260. seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
  261. dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
  262. dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
  263. seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
  264. dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
  265. dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
  266. seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
  267. dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
  268. dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
  269. seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
  270. seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
  271. seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
  272. seq_printf(seq, " DB State: %s Transitions %llu\n",
  273. db_state_str[dev->db_state],
  274. dev->rdev.stats.db_state_transitions);
  275. seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
  276. seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
  277. dev->rdev.stats.act_ofld_conn_fails);
  278. seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
  279. dev->rdev.stats.pas_ofld_conn_fails);
  280. return 0;
  281. }
  282. static int stats_open(struct inode *inode, struct file *file)
  283. {
  284. return single_open(file, stats_show, inode->i_private);
  285. }
  286. static ssize_t stats_clear(struct file *file, const char __user *buf,
  287. size_t count, loff_t *pos)
  288. {
  289. struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
  290. mutex_lock(&dev->rdev.stats.lock);
  291. dev->rdev.stats.pd.max = 0;
  292. dev->rdev.stats.pd.fail = 0;
  293. dev->rdev.stats.qid.max = 0;
  294. dev->rdev.stats.qid.fail = 0;
  295. dev->rdev.stats.stag.max = 0;
  296. dev->rdev.stats.stag.fail = 0;
  297. dev->rdev.stats.pbl.max = 0;
  298. dev->rdev.stats.pbl.fail = 0;
  299. dev->rdev.stats.rqt.max = 0;
  300. dev->rdev.stats.rqt.fail = 0;
  301. dev->rdev.stats.ocqp.max = 0;
  302. dev->rdev.stats.ocqp.fail = 0;
  303. dev->rdev.stats.db_full = 0;
  304. dev->rdev.stats.db_empty = 0;
  305. dev->rdev.stats.db_drop = 0;
  306. dev->rdev.stats.db_state_transitions = 0;
  307. dev->rdev.stats.tcam_full = 0;
  308. dev->rdev.stats.act_ofld_conn_fails = 0;
  309. dev->rdev.stats.pas_ofld_conn_fails = 0;
  310. mutex_unlock(&dev->rdev.stats.lock);
  311. return count;
  312. }
  313. static const struct file_operations stats_debugfs_fops = {
  314. .owner = THIS_MODULE,
  315. .open = stats_open,
  316. .release = single_release,
  317. .read = seq_read,
  318. .llseek = seq_lseek,
  319. .write = stats_clear,
  320. };
  321. static int dump_ep(int id, void *p, void *data)
  322. {
  323. struct c4iw_ep *ep = p;
  324. struct c4iw_debugfs_data *epd = data;
  325. int space;
  326. int cc;
  327. space = epd->bufsize - epd->pos - 1;
  328. if (space == 0)
  329. return 1;
  330. if (ep->com.local_addr.ss_family == AF_INET) {
  331. struct sockaddr_in *lsin = (struct sockaddr_in *)
  332. &ep->com.local_addr;
  333. struct sockaddr_in *rsin = (struct sockaddr_in *)
  334. &ep->com.remote_addr;
  335. cc = snprintf(epd->buf + epd->pos, space,
  336. "ep %p cm_id %p qp %p state %d flags 0x%lx "
  337. "history 0x%lx hwtid %d atid %d "
  338. "%pI4:%d <-> %pI4:%d\n",
  339. ep, ep->com.cm_id, ep->com.qp,
  340. (int)ep->com.state, ep->com.flags,
  341. ep->com.history, ep->hwtid, ep->atid,
  342. &lsin->sin_addr, ntohs(lsin->sin_port),
  343. &rsin->sin_addr, ntohs(rsin->sin_port));
  344. } else {
  345. struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
  346. &ep->com.local_addr;
  347. struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
  348. &ep->com.remote_addr;
  349. cc = snprintf(epd->buf + epd->pos, space,
  350. "ep %p cm_id %p qp %p state %d flags 0x%lx "
  351. "history 0x%lx hwtid %d atid %d "
  352. "%pI6:%d <-> %pI6:%d\n",
  353. ep, ep->com.cm_id, ep->com.qp,
  354. (int)ep->com.state, ep->com.flags,
  355. ep->com.history, ep->hwtid, ep->atid,
  356. &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
  357. &rsin6->sin6_addr, ntohs(rsin6->sin6_port));
  358. }
  359. if (cc < space)
  360. epd->pos += cc;
  361. return 0;
  362. }
  363. static int dump_listen_ep(int id, void *p, void *data)
  364. {
  365. struct c4iw_listen_ep *ep = p;
  366. struct c4iw_debugfs_data *epd = data;
  367. int space;
  368. int cc;
  369. space = epd->bufsize - epd->pos - 1;
  370. if (space == 0)
  371. return 1;
  372. if (ep->com.local_addr.ss_family == AF_INET) {
  373. struct sockaddr_in *lsin = (struct sockaddr_in *)
  374. &ep->com.local_addr;
  375. cc = snprintf(epd->buf + epd->pos, space,
  376. "ep %p cm_id %p state %d flags 0x%lx stid %d "
  377. "backlog %d %pI4:%d\n",
  378. ep, ep->com.cm_id, (int)ep->com.state,
  379. ep->com.flags, ep->stid, ep->backlog,
  380. &lsin->sin_addr, ntohs(lsin->sin_port));
  381. } else {
  382. struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
  383. &ep->com.local_addr;
  384. cc = snprintf(epd->buf + epd->pos, space,
  385. "ep %p cm_id %p state %d flags 0x%lx stid %d "
  386. "backlog %d %pI6:%d\n",
  387. ep, ep->com.cm_id, (int)ep->com.state,
  388. ep->com.flags, ep->stid, ep->backlog,
  389. &lsin6->sin6_addr, ntohs(lsin6->sin6_port));
  390. }
  391. if (cc < space)
  392. epd->pos += cc;
  393. return 0;
  394. }
  395. static int ep_release(struct inode *inode, struct file *file)
  396. {
  397. struct c4iw_debugfs_data *epd = file->private_data;
  398. if (!epd) {
  399. pr_info("%s null qpd?\n", __func__);
  400. return 0;
  401. }
  402. vfree(epd->buf);
  403. kfree(epd);
  404. return 0;
  405. }
  406. static int ep_open(struct inode *inode, struct file *file)
  407. {
  408. struct c4iw_debugfs_data *epd;
  409. int ret = 0;
  410. int count = 1;
  411. epd = kmalloc(sizeof(*epd), GFP_KERNEL);
  412. if (!epd) {
  413. ret = -ENOMEM;
  414. goto out;
  415. }
  416. epd->devp = inode->i_private;
  417. epd->pos = 0;
  418. spin_lock_irq(&epd->devp->lock);
  419. idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
  420. idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
  421. idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
  422. spin_unlock_irq(&epd->devp->lock);
  423. epd->bufsize = count * 160;
  424. epd->buf = vmalloc(epd->bufsize);
  425. if (!epd->buf) {
  426. ret = -ENOMEM;
  427. goto err1;
  428. }
  429. spin_lock_irq(&epd->devp->lock);
  430. idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
  431. idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
  432. idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
  433. spin_unlock_irq(&epd->devp->lock);
  434. file->private_data = epd;
  435. goto out;
  436. err1:
  437. kfree(epd);
  438. out:
  439. return ret;
  440. }
  441. static const struct file_operations ep_debugfs_fops = {
  442. .owner = THIS_MODULE,
  443. .open = ep_open,
  444. .release = ep_release,
  445. .read = debugfs_read,
  446. };
  447. static int setup_debugfs(struct c4iw_dev *devp)
  448. {
  449. struct dentry *de;
  450. if (!devp->debugfs_root)
  451. return -1;
  452. de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
  453. (void *)devp, &qp_debugfs_fops);
  454. if (de && de->d_inode)
  455. de->d_inode->i_size = 4096;
  456. de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
  457. (void *)devp, &stag_debugfs_fops);
  458. if (de && de->d_inode)
  459. de->d_inode->i_size = 4096;
  460. de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
  461. (void *)devp, &stats_debugfs_fops);
  462. if (de && de->d_inode)
  463. de->d_inode->i_size = 4096;
  464. de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
  465. (void *)devp, &ep_debugfs_fops);
  466. if (de && de->d_inode)
  467. de->d_inode->i_size = 4096;
  468. return 0;
  469. }
  470. void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
  471. struct c4iw_dev_ucontext *uctx)
  472. {
  473. struct list_head *pos, *nxt;
  474. struct c4iw_qid_list *entry;
  475. mutex_lock(&uctx->lock);
  476. list_for_each_safe(pos, nxt, &uctx->qpids) {
  477. entry = list_entry(pos, struct c4iw_qid_list, entry);
  478. list_del_init(&entry->entry);
  479. if (!(entry->qid & rdev->qpmask)) {
  480. c4iw_put_resource(&rdev->resource.qid_table,
  481. entry->qid);
  482. mutex_lock(&rdev->stats.lock);
  483. rdev->stats.qid.cur -= rdev->qpmask + 1;
  484. mutex_unlock(&rdev->stats.lock);
  485. }
  486. kfree(entry);
  487. }
  488. list_for_each_safe(pos, nxt, &uctx->qpids) {
  489. entry = list_entry(pos, struct c4iw_qid_list, entry);
  490. list_del_init(&entry->entry);
  491. kfree(entry);
  492. }
  493. mutex_unlock(&uctx->lock);
  494. }
  495. void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
  496. struct c4iw_dev_ucontext *uctx)
  497. {
  498. INIT_LIST_HEAD(&uctx->qpids);
  499. INIT_LIST_HEAD(&uctx->cqids);
  500. mutex_init(&uctx->lock);
  501. }
  502. /* Caller takes care of locking if needed */
  503. static int c4iw_rdev_open(struct c4iw_rdev *rdev)
  504. {
  505. int err;
  506. c4iw_init_dev_ucontext(rdev, &rdev->uctx);
  507. /*
  508. * qpshift is the number of bits to shift the qpid left in order
  509. * to get the correct address of the doorbell for that qp.
  510. */
  511. rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
  512. rdev->qpmask = rdev->lldi.udb_density - 1;
  513. rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
  514. rdev->cqmask = rdev->lldi.ucq_density - 1;
  515. PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
  516. "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
  517. "qp qid start %u size %u cq qid start %u size %u\n",
  518. __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
  519. rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
  520. rdev->lldi.vr->pbl.start,
  521. rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
  522. rdev->lldi.vr->rq.size,
  523. rdev->lldi.vr->qp.start,
  524. rdev->lldi.vr->qp.size,
  525. rdev->lldi.vr->cq.start,
  526. rdev->lldi.vr->cq.size);
  527. PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
  528. "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
  529. (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
  530. (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
  531. rdev->lldi.db_reg,
  532. rdev->lldi.gts_reg,
  533. rdev->qpshift, rdev->qpmask,
  534. rdev->cqshift, rdev->cqmask);
  535. if (c4iw_num_stags(rdev) == 0) {
  536. err = -EINVAL;
  537. goto err1;
  538. }
  539. rdev->stats.pd.total = T4_MAX_NUM_PD;
  540. rdev->stats.stag.total = rdev->lldi.vr->stag.size;
  541. rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
  542. rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
  543. rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
  544. rdev->stats.qid.total = rdev->lldi.vr->qp.size;
  545. err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
  546. if (err) {
  547. printk(KERN_ERR MOD "error %d initializing resources\n", err);
  548. goto err1;
  549. }
  550. err = c4iw_pblpool_create(rdev);
  551. if (err) {
  552. printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
  553. goto err2;
  554. }
  555. err = c4iw_rqtpool_create(rdev);
  556. if (err) {
  557. printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
  558. goto err3;
  559. }
  560. err = c4iw_ocqp_pool_create(rdev);
  561. if (err) {
  562. printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
  563. goto err4;
  564. }
  565. return 0;
  566. err4:
  567. c4iw_rqtpool_destroy(rdev);
  568. err3:
  569. c4iw_pblpool_destroy(rdev);
  570. err2:
  571. c4iw_destroy_resource(&rdev->resource);
  572. err1:
  573. return err;
  574. }
  575. static void c4iw_rdev_close(struct c4iw_rdev *rdev)
  576. {
  577. c4iw_pblpool_destroy(rdev);
  578. c4iw_rqtpool_destroy(rdev);
  579. c4iw_destroy_resource(&rdev->resource);
  580. }
  581. static void c4iw_dealloc(struct uld_ctx *ctx)
  582. {
  583. c4iw_rdev_close(&ctx->dev->rdev);
  584. idr_destroy(&ctx->dev->cqidr);
  585. idr_destroy(&ctx->dev->qpidr);
  586. idr_destroy(&ctx->dev->mmidr);
  587. idr_destroy(&ctx->dev->hwtid_idr);
  588. idr_destroy(&ctx->dev->stid_idr);
  589. idr_destroy(&ctx->dev->atid_idr);
  590. iounmap(ctx->dev->rdev.oc_mw_kva);
  591. ib_dealloc_device(&ctx->dev->ibdev);
  592. ctx->dev = NULL;
  593. }
  594. static void c4iw_remove(struct uld_ctx *ctx)
  595. {
  596. PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
  597. c4iw_unregister_device(ctx->dev);
  598. c4iw_dealloc(ctx);
  599. }
  600. static int rdma_supported(const struct cxgb4_lld_info *infop)
  601. {
  602. return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
  603. infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
  604. infop->vr->cq.size > 0;
  605. }
  606. static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
  607. {
  608. struct c4iw_dev *devp;
  609. int ret;
  610. if (!rdma_supported(infop)) {
  611. printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
  612. pci_name(infop->pdev));
  613. return ERR_PTR(-ENOSYS);
  614. }
  615. if (!ocqp_supported(infop))
  616. pr_info("%s: On-Chip Queues not supported on this device.\n",
  617. pci_name(infop->pdev));
  618. if (!is_t4(infop->adapter_type)) {
  619. if (!allow_db_fc_on_t5) {
  620. db_fc_threshold = 100000;
  621. pr_info("DB Flow Control Disabled.\n");
  622. }
  623. if (!allow_db_coalescing_on_t5) {
  624. db_coalescing_threshold = -1;
  625. pr_info("DB Coalescing Disabled.\n");
  626. }
  627. }
  628. devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
  629. if (!devp) {
  630. printk(KERN_ERR MOD "Cannot allocate ib device\n");
  631. return ERR_PTR(-ENOMEM);
  632. }
  633. devp->rdev.lldi = *infop;
  634. devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
  635. (pci_resource_len(devp->rdev.lldi.pdev, 2) -
  636. roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
  637. devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
  638. devp->rdev.lldi.vr->ocq.size);
  639. PDBG(KERN_INFO MOD "ocq memory: "
  640. "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
  641. devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
  642. devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
  643. ret = c4iw_rdev_open(&devp->rdev);
  644. if (ret) {
  645. printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
  646. ib_dealloc_device(&devp->ibdev);
  647. return ERR_PTR(ret);
  648. }
  649. idr_init(&devp->cqidr);
  650. idr_init(&devp->qpidr);
  651. idr_init(&devp->mmidr);
  652. idr_init(&devp->hwtid_idr);
  653. idr_init(&devp->stid_idr);
  654. idr_init(&devp->atid_idr);
  655. spin_lock_init(&devp->lock);
  656. mutex_init(&devp->rdev.stats.lock);
  657. mutex_init(&devp->db_mutex);
  658. if (c4iw_debugfs_root) {
  659. devp->debugfs_root = debugfs_create_dir(
  660. pci_name(devp->rdev.lldi.pdev),
  661. c4iw_debugfs_root);
  662. setup_debugfs(devp);
  663. }
  664. return devp;
  665. }
  666. static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
  667. {
  668. struct uld_ctx *ctx;
  669. static int vers_printed;
  670. int i;
  671. if (!vers_printed++)
  672. pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
  673. DRV_VERSION);
  674. ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
  675. if (!ctx) {
  676. ctx = ERR_PTR(-ENOMEM);
  677. goto out;
  678. }
  679. ctx->lldi = *infop;
  680. PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
  681. __func__, pci_name(ctx->lldi.pdev),
  682. ctx->lldi.nchan, ctx->lldi.nrxq,
  683. ctx->lldi.ntxq, ctx->lldi.nports);
  684. mutex_lock(&dev_mutex);
  685. list_add_tail(&ctx->entry, &uld_ctx_list);
  686. mutex_unlock(&dev_mutex);
  687. for (i = 0; i < ctx->lldi.nrxq; i++)
  688. PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
  689. out:
  690. return ctx;
  691. }
  692. static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
  693. const __be64 *rsp,
  694. u32 pktshift)
  695. {
  696. struct sk_buff *skb;
  697. /*
  698. * Allocate space for cpl_pass_accept_req which will be synthesized by
  699. * driver. Once the driver synthesizes the request the skb will go
  700. * through the regular cpl_pass_accept_req processing.
  701. * The math here assumes sizeof cpl_pass_accept_req >= sizeof
  702. * cpl_rx_pkt.
  703. */
  704. skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
  705. sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
  706. if (unlikely(!skb))
  707. return NULL;
  708. __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
  709. sizeof(struct rss_header) - pktshift);
  710. /*
  711. * This skb will contain:
  712. * rss_header from the rspq descriptor (1 flit)
  713. * cpl_rx_pkt struct from the rspq descriptor (2 flits)
  714. * space for the difference between the size of an
  715. * rx_pkt and pass_accept_req cpl (1 flit)
  716. * the packet data from the gl
  717. */
  718. skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
  719. sizeof(struct rss_header));
  720. skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
  721. sizeof(struct cpl_pass_accept_req),
  722. gl->va + pktshift,
  723. gl->tot_len - pktshift);
  724. return skb;
  725. }
  726. static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
  727. const __be64 *rsp)
  728. {
  729. unsigned int opcode = *(u8 *)rsp;
  730. struct sk_buff *skb;
  731. if (opcode != CPL_RX_PKT)
  732. goto out;
  733. skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
  734. if (skb == NULL)
  735. goto out;
  736. if (c4iw_handlers[opcode] == NULL) {
  737. pr_info("%s no handler opcode 0x%x...\n", __func__,
  738. opcode);
  739. kfree_skb(skb);
  740. goto out;
  741. }
  742. c4iw_handlers[opcode](dev, skb);
  743. return 1;
  744. out:
  745. return 0;
  746. }
  747. static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
  748. const struct pkt_gl *gl)
  749. {
  750. struct uld_ctx *ctx = handle;
  751. struct c4iw_dev *dev = ctx->dev;
  752. struct sk_buff *skb;
  753. u8 opcode;
  754. if (gl == NULL) {
  755. /* omit RSS and rsp_ctrl at end of descriptor */
  756. unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
  757. skb = alloc_skb(256, GFP_ATOMIC);
  758. if (!skb)
  759. goto nomem;
  760. __skb_put(skb, len);
  761. skb_copy_to_linear_data(skb, &rsp[1], len);
  762. } else if (gl == CXGB4_MSG_AN) {
  763. const struct rsp_ctrl *rc = (void *)rsp;
  764. u32 qid = be32_to_cpu(rc->pldbuflen_qid);
  765. c4iw_ev_handler(dev, qid);
  766. return 0;
  767. } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
  768. if (recv_rx_pkt(dev, gl, rsp))
  769. return 0;
  770. pr_info("%s: unexpected FL contents at %p, " \
  771. "RSS %#llx, FL %#llx, len %u\n",
  772. pci_name(ctx->lldi.pdev), gl->va,
  773. (unsigned long long)be64_to_cpu(*rsp),
  774. (unsigned long long)be64_to_cpu(
  775. *(__force __be64 *)gl->va),
  776. gl->tot_len);
  777. return 0;
  778. } else {
  779. skb = cxgb4_pktgl_to_skb(gl, 128, 128);
  780. if (unlikely(!skb))
  781. goto nomem;
  782. }
  783. opcode = *(u8 *)rsp;
  784. if (c4iw_handlers[opcode])
  785. c4iw_handlers[opcode](dev, skb);
  786. else
  787. pr_info("%s no handler opcode 0x%x...\n", __func__,
  788. opcode);
  789. return 0;
  790. nomem:
  791. return -1;
  792. }
  793. static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
  794. {
  795. struct uld_ctx *ctx = handle;
  796. PDBG("%s new_state %u\n", __func__, new_state);
  797. switch (new_state) {
  798. case CXGB4_STATE_UP:
  799. printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
  800. if (!ctx->dev) {
  801. int ret;
  802. ctx->dev = c4iw_alloc(&ctx->lldi);
  803. if (IS_ERR(ctx->dev)) {
  804. printk(KERN_ERR MOD
  805. "%s: initialization failed: %ld\n",
  806. pci_name(ctx->lldi.pdev),
  807. PTR_ERR(ctx->dev));
  808. ctx->dev = NULL;
  809. break;
  810. }
  811. ret = c4iw_register_device(ctx->dev);
  812. if (ret) {
  813. printk(KERN_ERR MOD
  814. "%s: RDMA registration failed: %d\n",
  815. pci_name(ctx->lldi.pdev), ret);
  816. c4iw_dealloc(ctx);
  817. }
  818. }
  819. break;
  820. case CXGB4_STATE_DOWN:
  821. printk(KERN_INFO MOD "%s: Down\n",
  822. pci_name(ctx->lldi.pdev));
  823. if (ctx->dev)
  824. c4iw_remove(ctx);
  825. break;
  826. case CXGB4_STATE_START_RECOVERY:
  827. printk(KERN_INFO MOD "%s: Fatal Error\n",
  828. pci_name(ctx->lldi.pdev));
  829. if (ctx->dev) {
  830. struct ib_event event;
  831. ctx->dev->rdev.flags |= T4_FATAL_ERROR;
  832. memset(&event, 0, sizeof event);
  833. event.event = IB_EVENT_DEVICE_FATAL;
  834. event.device = &ctx->dev->ibdev;
  835. ib_dispatch_event(&event);
  836. c4iw_remove(ctx);
  837. }
  838. break;
  839. case CXGB4_STATE_DETACH:
  840. printk(KERN_INFO MOD "%s: Detach\n",
  841. pci_name(ctx->lldi.pdev));
  842. if (ctx->dev)
  843. c4iw_remove(ctx);
  844. break;
  845. }
  846. return 0;
  847. }
  848. static int disable_qp_db(int id, void *p, void *data)
  849. {
  850. struct c4iw_qp *qp = p;
  851. t4_disable_wq_db(&qp->wq);
  852. return 0;
  853. }
  854. static void stop_queues(struct uld_ctx *ctx)
  855. {
  856. spin_lock_irq(&ctx->dev->lock);
  857. if (ctx->dev->db_state == NORMAL) {
  858. ctx->dev->rdev.stats.db_state_transitions++;
  859. ctx->dev->db_state = FLOW_CONTROL;
  860. idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
  861. }
  862. spin_unlock_irq(&ctx->dev->lock);
  863. }
  864. static int enable_qp_db(int id, void *p, void *data)
  865. {
  866. struct c4iw_qp *qp = p;
  867. t4_enable_wq_db(&qp->wq);
  868. return 0;
  869. }
  870. static void resume_queues(struct uld_ctx *ctx)
  871. {
  872. spin_lock_irq(&ctx->dev->lock);
  873. if (ctx->dev->qpcnt <= db_fc_threshold &&
  874. ctx->dev->db_state == FLOW_CONTROL) {
  875. ctx->dev->db_state = NORMAL;
  876. ctx->dev->rdev.stats.db_state_transitions++;
  877. idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
  878. }
  879. spin_unlock_irq(&ctx->dev->lock);
  880. }
  881. struct qp_list {
  882. unsigned idx;
  883. struct c4iw_qp **qps;
  884. };
  885. static int add_and_ref_qp(int id, void *p, void *data)
  886. {
  887. struct qp_list *qp_listp = data;
  888. struct c4iw_qp *qp = p;
  889. c4iw_qp_add_ref(&qp->ibqp);
  890. qp_listp->qps[qp_listp->idx++] = qp;
  891. return 0;
  892. }
  893. static int count_qps(int id, void *p, void *data)
  894. {
  895. unsigned *countp = data;
  896. (*countp)++;
  897. return 0;
  898. }
  899. static void deref_qps(struct qp_list qp_list)
  900. {
  901. int idx;
  902. for (idx = 0; idx < qp_list.idx; idx++)
  903. c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
  904. }
  905. static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
  906. {
  907. int idx;
  908. int ret;
  909. for (idx = 0; idx < qp_list->idx; idx++) {
  910. struct c4iw_qp *qp = qp_list->qps[idx];
  911. ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
  912. qp->wq.sq.qid,
  913. t4_sq_host_wq_pidx(&qp->wq),
  914. t4_sq_wq_size(&qp->wq));
  915. if (ret) {
  916. printk(KERN_ERR MOD "%s: Fatal error - "
  917. "DB overflow recovery failed - "
  918. "error syncing SQ qid %u\n",
  919. pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
  920. return;
  921. }
  922. ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
  923. qp->wq.rq.qid,
  924. t4_rq_host_wq_pidx(&qp->wq),
  925. t4_rq_wq_size(&qp->wq));
  926. if (ret) {
  927. printk(KERN_ERR MOD "%s: Fatal error - "
  928. "DB overflow recovery failed - "
  929. "error syncing RQ qid %u\n",
  930. pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
  931. return;
  932. }
  933. /* Wait for the dbfifo to drain */
  934. while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
  935. set_current_state(TASK_UNINTERRUPTIBLE);
  936. schedule_timeout(usecs_to_jiffies(10));
  937. }
  938. }
  939. }
  940. static void recover_queues(struct uld_ctx *ctx)
  941. {
  942. int count = 0;
  943. struct qp_list qp_list;
  944. int ret;
  945. /* lock out kernel db ringers */
  946. mutex_lock(&ctx->dev->db_mutex);
  947. /* put all queues in to recovery mode */
  948. spin_lock_irq(&ctx->dev->lock);
  949. ctx->dev->db_state = RECOVERY;
  950. ctx->dev->rdev.stats.db_state_transitions++;
  951. idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
  952. spin_unlock_irq(&ctx->dev->lock);
  953. /* slow everybody down */
  954. set_current_state(TASK_UNINTERRUPTIBLE);
  955. schedule_timeout(usecs_to_jiffies(1000));
  956. /* Wait for the dbfifo to completely drain. */
  957. while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
  958. set_current_state(TASK_UNINTERRUPTIBLE);
  959. schedule_timeout(usecs_to_jiffies(10));
  960. }
  961. /* flush the SGE contexts */
  962. ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
  963. if (ret) {
  964. printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
  965. pci_name(ctx->lldi.pdev));
  966. goto out;
  967. }
  968. /* Count active queues so we can build a list of queues to recover */
  969. spin_lock_irq(&ctx->dev->lock);
  970. idr_for_each(&ctx->dev->qpidr, count_qps, &count);
  971. qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
  972. if (!qp_list.qps) {
  973. printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
  974. pci_name(ctx->lldi.pdev));
  975. spin_unlock_irq(&ctx->dev->lock);
  976. goto out;
  977. }
  978. qp_list.idx = 0;
  979. /* add and ref each qp so it doesn't get freed */
  980. idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
  981. spin_unlock_irq(&ctx->dev->lock);
  982. /* now traverse the list in a safe context to recover the db state*/
  983. recover_lost_dbs(ctx, &qp_list);
  984. /* we're almost done! deref the qps and clean up */
  985. deref_qps(qp_list);
  986. kfree(qp_list.qps);
  987. /* Wait for the dbfifo to completely drain again */
  988. while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
  989. set_current_state(TASK_UNINTERRUPTIBLE);
  990. schedule_timeout(usecs_to_jiffies(10));
  991. }
  992. /* resume the queues */
  993. spin_lock_irq(&ctx->dev->lock);
  994. if (ctx->dev->qpcnt > db_fc_threshold)
  995. ctx->dev->db_state = FLOW_CONTROL;
  996. else {
  997. ctx->dev->db_state = NORMAL;
  998. idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
  999. }
  1000. ctx->dev->rdev.stats.db_state_transitions++;
  1001. spin_unlock_irq(&ctx->dev->lock);
  1002. out:
  1003. /* start up kernel db ringers again */
  1004. mutex_unlock(&ctx->dev->db_mutex);
  1005. }
  1006. static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
  1007. {
  1008. struct uld_ctx *ctx = handle;
  1009. switch (control) {
  1010. case CXGB4_CONTROL_DB_FULL:
  1011. stop_queues(ctx);
  1012. mutex_lock(&ctx->dev->rdev.stats.lock);
  1013. ctx->dev->rdev.stats.db_full++;
  1014. mutex_unlock(&ctx->dev->rdev.stats.lock);
  1015. break;
  1016. case CXGB4_CONTROL_DB_EMPTY:
  1017. resume_queues(ctx);
  1018. mutex_lock(&ctx->dev->rdev.stats.lock);
  1019. ctx->dev->rdev.stats.db_empty++;
  1020. mutex_unlock(&ctx->dev->rdev.stats.lock);
  1021. break;
  1022. case CXGB4_CONTROL_DB_DROP:
  1023. recover_queues(ctx);
  1024. mutex_lock(&ctx->dev->rdev.stats.lock);
  1025. ctx->dev->rdev.stats.db_drop++;
  1026. mutex_unlock(&ctx->dev->rdev.stats.lock);
  1027. break;
  1028. default:
  1029. printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
  1030. pci_name(ctx->lldi.pdev), control);
  1031. break;
  1032. }
  1033. return 0;
  1034. }
  1035. static struct cxgb4_uld_info c4iw_uld_info = {
  1036. .name = DRV_NAME,
  1037. .add = c4iw_uld_add,
  1038. .rx_handler = c4iw_uld_rx_handler,
  1039. .state_change = c4iw_uld_state_change,
  1040. .control = c4iw_uld_control,
  1041. };
  1042. static int __init c4iw_init_module(void)
  1043. {
  1044. int err;
  1045. err = c4iw_cm_init();
  1046. if (err)
  1047. return err;
  1048. c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
  1049. if (!c4iw_debugfs_root)
  1050. printk(KERN_WARNING MOD
  1051. "could not create debugfs entry, continuing\n");
  1052. cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
  1053. return 0;
  1054. }
  1055. static void __exit c4iw_exit_module(void)
  1056. {
  1057. struct uld_ctx *ctx, *tmp;
  1058. mutex_lock(&dev_mutex);
  1059. list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
  1060. if (ctx->dev)
  1061. c4iw_remove(ctx);
  1062. kfree(ctx);
  1063. }
  1064. mutex_unlock(&dev_mutex);
  1065. cxgb4_unregister_uld(CXGB4_ULD_RDMA);
  1066. c4iw_cm_term();
  1067. debugfs_remove_recursive(c4iw_debugfs_root);
  1068. }
  1069. module_init(c4iw_init_module);
  1070. module_exit(c4iw_exit_module);