device.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/moduleparam.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/vmalloc.h>
  36. #include <rdma/ib_verbs.h>
  37. #include "iw_cxgb4.h"
  38. #define DRV_VERSION "0.1"
  39. MODULE_AUTHOR("Steve Wise");
  40. MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
  41. MODULE_LICENSE("Dual BSD/GPL");
  42. MODULE_VERSION(DRV_VERSION);
  43. static int allow_db_fc_on_t5;
  44. module_param(allow_db_fc_on_t5, int, 0644);
  45. MODULE_PARM_DESC(allow_db_fc_on_t5,
  46. "Allow DB Flow Control on T5 (default = 0)");
  47. static int allow_db_coalescing_on_t5;
  48. module_param(allow_db_coalescing_on_t5, int, 0644);
  49. MODULE_PARM_DESC(allow_db_coalescing_on_t5,
  50. "Allow DB Coalescing on T5 (default = 0)");
  51. struct uld_ctx {
  52. struct list_head entry;
  53. struct cxgb4_lld_info lldi;
  54. struct c4iw_dev *dev;
  55. };
  56. static LIST_HEAD(uld_ctx_list);
  57. static DEFINE_MUTEX(dev_mutex);
  58. static struct dentry *c4iw_debugfs_root;
  59. struct c4iw_debugfs_data {
  60. struct c4iw_dev *devp;
  61. char *buf;
  62. int bufsize;
  63. int pos;
  64. };
  65. static int count_idrs(int id, void *p, void *data)
  66. {
  67. int *countp = data;
  68. *countp = *countp + 1;
  69. return 0;
  70. }
  71. static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
  72. loff_t *ppos)
  73. {
  74. struct c4iw_debugfs_data *d = file->private_data;
  75. return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
  76. }
  77. static int dump_qp(int id, void *p, void *data)
  78. {
  79. struct c4iw_qp *qp = p;
  80. struct c4iw_debugfs_data *qpd = data;
  81. int space;
  82. int cc;
  83. if (id != qp->wq.sq.qid)
  84. return 0;
  85. space = qpd->bufsize - qpd->pos - 1;
  86. if (space == 0)
  87. return 1;
  88. if (qp->ep)
  89. cc = snprintf(qpd->buf + qpd->pos, space,
  90. "qp sq id %u rq id %u state %u onchip %u "
  91. "ep tid %u state %u %pI4:%u->%pI4:%u\n",
  92. qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
  93. qp->wq.sq.flags & T4_SQ_ONCHIP,
  94. qp->ep->hwtid, (int)qp->ep->com.state,
  95. &qp->ep->com.local_addr.sin_addr.s_addr,
  96. ntohs(qp->ep->com.local_addr.sin_port),
  97. &qp->ep->com.remote_addr.sin_addr.s_addr,
  98. ntohs(qp->ep->com.remote_addr.sin_port));
  99. else
  100. cc = snprintf(qpd->buf + qpd->pos, space,
  101. "qp sq id %u rq id %u state %u onchip %u\n",
  102. qp->wq.sq.qid, qp->wq.rq.qid,
  103. (int)qp->attr.state,
  104. qp->wq.sq.flags & T4_SQ_ONCHIP);
  105. if (cc < space)
  106. qpd->pos += cc;
  107. return 0;
  108. }
  109. static int qp_release(struct inode *inode, struct file *file)
  110. {
  111. struct c4iw_debugfs_data *qpd = file->private_data;
  112. if (!qpd) {
  113. printk(KERN_INFO "%s null qpd?\n", __func__);
  114. return 0;
  115. }
  116. vfree(qpd->buf);
  117. kfree(qpd);
  118. return 0;
  119. }
  120. static int qp_open(struct inode *inode, struct file *file)
  121. {
  122. struct c4iw_debugfs_data *qpd;
  123. int ret = 0;
  124. int count = 1;
  125. qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
  126. if (!qpd) {
  127. ret = -ENOMEM;
  128. goto out;
  129. }
  130. qpd->devp = inode->i_private;
  131. qpd->pos = 0;
  132. spin_lock_irq(&qpd->devp->lock);
  133. idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
  134. spin_unlock_irq(&qpd->devp->lock);
  135. qpd->bufsize = count * 128;
  136. qpd->buf = vmalloc(qpd->bufsize);
  137. if (!qpd->buf) {
  138. ret = -ENOMEM;
  139. goto err1;
  140. }
  141. spin_lock_irq(&qpd->devp->lock);
  142. idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
  143. spin_unlock_irq(&qpd->devp->lock);
  144. qpd->buf[qpd->pos++] = 0;
  145. file->private_data = qpd;
  146. goto out;
  147. err1:
  148. kfree(qpd);
  149. out:
  150. return ret;
  151. }
  152. static const struct file_operations qp_debugfs_fops = {
  153. .owner = THIS_MODULE,
  154. .open = qp_open,
  155. .release = qp_release,
  156. .read = debugfs_read,
  157. .llseek = default_llseek,
  158. };
  159. static int dump_stag(int id, void *p, void *data)
  160. {
  161. struct c4iw_debugfs_data *stagd = data;
  162. int space;
  163. int cc;
  164. space = stagd->bufsize - stagd->pos - 1;
  165. if (space == 0)
  166. return 1;
  167. cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
  168. if (cc < space)
  169. stagd->pos += cc;
  170. return 0;
  171. }
  172. static int stag_release(struct inode *inode, struct file *file)
  173. {
  174. struct c4iw_debugfs_data *stagd = file->private_data;
  175. if (!stagd) {
  176. printk(KERN_INFO "%s null stagd?\n", __func__);
  177. return 0;
  178. }
  179. kfree(stagd->buf);
  180. kfree(stagd);
  181. return 0;
  182. }
  183. static int stag_open(struct inode *inode, struct file *file)
  184. {
  185. struct c4iw_debugfs_data *stagd;
  186. int ret = 0;
  187. int count = 1;
  188. stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
  189. if (!stagd) {
  190. ret = -ENOMEM;
  191. goto out;
  192. }
  193. stagd->devp = inode->i_private;
  194. stagd->pos = 0;
  195. spin_lock_irq(&stagd->devp->lock);
  196. idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
  197. spin_unlock_irq(&stagd->devp->lock);
  198. stagd->bufsize = count * sizeof("0x12345678\n");
  199. stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
  200. if (!stagd->buf) {
  201. ret = -ENOMEM;
  202. goto err1;
  203. }
  204. spin_lock_irq(&stagd->devp->lock);
  205. idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
  206. spin_unlock_irq(&stagd->devp->lock);
  207. stagd->buf[stagd->pos++] = 0;
  208. file->private_data = stagd;
  209. goto out;
  210. err1:
  211. kfree(stagd);
  212. out:
  213. return ret;
  214. }
  215. static const struct file_operations stag_debugfs_fops = {
  216. .owner = THIS_MODULE,
  217. .open = stag_open,
  218. .release = stag_release,
  219. .read = debugfs_read,
  220. .llseek = default_llseek,
  221. };
  222. static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
  223. static int stats_show(struct seq_file *seq, void *v)
  224. {
  225. struct c4iw_dev *dev = seq->private;
  226. seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
  227. "Max", "Fail");
  228. seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
  229. dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
  230. dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
  231. seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
  232. dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
  233. dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
  234. seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
  235. dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
  236. dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
  237. seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
  238. dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
  239. dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
  240. seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
  241. dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
  242. dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
  243. seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
  244. dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
  245. dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
  246. seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
  247. seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
  248. seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
  249. seq_printf(seq, " DB State: %s Transitions %llu\n",
  250. db_state_str[dev->db_state],
  251. dev->rdev.stats.db_state_transitions);
  252. seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
  253. seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
  254. dev->rdev.stats.act_ofld_conn_fails);
  255. seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
  256. dev->rdev.stats.pas_ofld_conn_fails);
  257. return 0;
  258. }
  259. static int stats_open(struct inode *inode, struct file *file)
  260. {
  261. return single_open(file, stats_show, inode->i_private);
  262. }
  263. static ssize_t stats_clear(struct file *file, const char __user *buf,
  264. size_t count, loff_t *pos)
  265. {
  266. struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
  267. mutex_lock(&dev->rdev.stats.lock);
  268. dev->rdev.stats.pd.max = 0;
  269. dev->rdev.stats.pd.fail = 0;
  270. dev->rdev.stats.qid.max = 0;
  271. dev->rdev.stats.qid.fail = 0;
  272. dev->rdev.stats.stag.max = 0;
  273. dev->rdev.stats.stag.fail = 0;
  274. dev->rdev.stats.pbl.max = 0;
  275. dev->rdev.stats.pbl.fail = 0;
  276. dev->rdev.stats.rqt.max = 0;
  277. dev->rdev.stats.rqt.fail = 0;
  278. dev->rdev.stats.ocqp.max = 0;
  279. dev->rdev.stats.ocqp.fail = 0;
  280. dev->rdev.stats.db_full = 0;
  281. dev->rdev.stats.db_empty = 0;
  282. dev->rdev.stats.db_drop = 0;
  283. dev->rdev.stats.db_state_transitions = 0;
  284. dev->rdev.stats.tcam_full = 0;
  285. dev->rdev.stats.act_ofld_conn_fails = 0;
  286. dev->rdev.stats.pas_ofld_conn_fails = 0;
  287. mutex_unlock(&dev->rdev.stats.lock);
  288. return count;
  289. }
  290. static const struct file_operations stats_debugfs_fops = {
  291. .owner = THIS_MODULE,
  292. .open = stats_open,
  293. .release = single_release,
  294. .read = seq_read,
  295. .llseek = seq_lseek,
  296. .write = stats_clear,
  297. };
  298. static int dump_ep(int id, void *p, void *data)
  299. {
  300. struct c4iw_ep *ep = p;
  301. struct c4iw_debugfs_data *epd = data;
  302. int space;
  303. int cc;
  304. space = epd->bufsize - epd->pos - 1;
  305. if (space == 0)
  306. return 1;
  307. cc = snprintf(epd->buf + epd->pos, space,
  308. "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
  309. "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
  310. ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
  311. ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
  312. &ep->com.local_addr.sin_addr.s_addr,
  313. ntohs(ep->com.local_addr.sin_port),
  314. &ep->com.remote_addr.sin_addr.s_addr,
  315. ntohs(ep->com.remote_addr.sin_port));
  316. if (cc < space)
  317. epd->pos += cc;
  318. return 0;
  319. }
  320. static int dump_listen_ep(int id, void *p, void *data)
  321. {
  322. struct c4iw_listen_ep *ep = p;
  323. struct c4iw_debugfs_data *epd = data;
  324. int space;
  325. int cc;
  326. space = epd->bufsize - epd->pos - 1;
  327. if (space == 0)
  328. return 1;
  329. cc = snprintf(epd->buf + epd->pos, space,
  330. "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
  331. "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
  332. ep->com.flags, ep->stid, ep->backlog,
  333. &ep->com.local_addr.sin_addr.s_addr,
  334. ntohs(ep->com.local_addr.sin_port));
  335. if (cc < space)
  336. epd->pos += cc;
  337. return 0;
  338. }
  339. static int ep_release(struct inode *inode, struct file *file)
  340. {
  341. struct c4iw_debugfs_data *epd = file->private_data;
  342. if (!epd) {
  343. pr_info("%s null qpd?\n", __func__);
  344. return 0;
  345. }
  346. vfree(epd->buf);
  347. kfree(epd);
  348. return 0;
  349. }
  350. static int ep_open(struct inode *inode, struct file *file)
  351. {
  352. struct c4iw_debugfs_data *epd;
  353. int ret = 0;
  354. int count = 1;
  355. epd = kmalloc(sizeof(*epd), GFP_KERNEL);
  356. if (!epd) {
  357. ret = -ENOMEM;
  358. goto out;
  359. }
  360. epd->devp = inode->i_private;
  361. epd->pos = 0;
  362. spin_lock_irq(&epd->devp->lock);
  363. idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
  364. idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
  365. idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
  366. spin_unlock_irq(&epd->devp->lock);
  367. epd->bufsize = count * 160;
  368. epd->buf = vmalloc(epd->bufsize);
  369. if (!epd->buf) {
  370. ret = -ENOMEM;
  371. goto err1;
  372. }
  373. spin_lock_irq(&epd->devp->lock);
  374. idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
  375. idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
  376. idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
  377. spin_unlock_irq(&epd->devp->lock);
  378. file->private_data = epd;
  379. goto out;
  380. err1:
  381. kfree(epd);
  382. out:
  383. return ret;
  384. }
  385. static const struct file_operations ep_debugfs_fops = {
  386. .owner = THIS_MODULE,
  387. .open = ep_open,
  388. .release = ep_release,
  389. .read = debugfs_read,
  390. };
  391. static int setup_debugfs(struct c4iw_dev *devp)
  392. {
  393. struct dentry *de;
  394. if (!devp->debugfs_root)
  395. return -1;
  396. de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
  397. (void *)devp, &qp_debugfs_fops);
  398. if (de && de->d_inode)
  399. de->d_inode->i_size = 4096;
  400. de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
  401. (void *)devp, &stag_debugfs_fops);
  402. if (de && de->d_inode)
  403. de->d_inode->i_size = 4096;
  404. de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
  405. (void *)devp, &stats_debugfs_fops);
  406. if (de && de->d_inode)
  407. de->d_inode->i_size = 4096;
  408. de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
  409. (void *)devp, &ep_debugfs_fops);
  410. if (de && de->d_inode)
  411. de->d_inode->i_size = 4096;
  412. return 0;
  413. }
  414. void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
  415. struct c4iw_dev_ucontext *uctx)
  416. {
  417. struct list_head *pos, *nxt;
  418. struct c4iw_qid_list *entry;
  419. mutex_lock(&uctx->lock);
  420. list_for_each_safe(pos, nxt, &uctx->qpids) {
  421. entry = list_entry(pos, struct c4iw_qid_list, entry);
  422. list_del_init(&entry->entry);
  423. if (!(entry->qid & rdev->qpmask)) {
  424. c4iw_put_resource(&rdev->resource.qid_table,
  425. entry->qid);
  426. mutex_lock(&rdev->stats.lock);
  427. rdev->stats.qid.cur -= rdev->qpmask + 1;
  428. mutex_unlock(&rdev->stats.lock);
  429. }
  430. kfree(entry);
  431. }
  432. list_for_each_safe(pos, nxt, &uctx->qpids) {
  433. entry = list_entry(pos, struct c4iw_qid_list, entry);
  434. list_del_init(&entry->entry);
  435. kfree(entry);
  436. }
  437. mutex_unlock(&uctx->lock);
  438. }
  439. void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
  440. struct c4iw_dev_ucontext *uctx)
  441. {
  442. INIT_LIST_HEAD(&uctx->qpids);
  443. INIT_LIST_HEAD(&uctx->cqids);
  444. mutex_init(&uctx->lock);
  445. }
  446. /* Caller takes care of locking if needed */
  447. static int c4iw_rdev_open(struct c4iw_rdev *rdev)
  448. {
  449. int err;
  450. c4iw_init_dev_ucontext(rdev, &rdev->uctx);
  451. /*
  452. * qpshift is the number of bits to shift the qpid left in order
  453. * to get the correct address of the doorbell for that qp.
  454. */
  455. rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
  456. rdev->qpmask = rdev->lldi.udb_density - 1;
  457. rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
  458. rdev->cqmask = rdev->lldi.ucq_density - 1;
  459. PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
  460. "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
  461. "qp qid start %u size %u cq qid start %u size %u\n",
  462. __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
  463. rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
  464. rdev->lldi.vr->pbl.start,
  465. rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
  466. rdev->lldi.vr->rq.size,
  467. rdev->lldi.vr->qp.start,
  468. rdev->lldi.vr->qp.size,
  469. rdev->lldi.vr->cq.start,
  470. rdev->lldi.vr->cq.size);
  471. PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
  472. "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
  473. (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
  474. (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
  475. rdev->lldi.db_reg,
  476. rdev->lldi.gts_reg,
  477. rdev->qpshift, rdev->qpmask,
  478. rdev->cqshift, rdev->cqmask);
  479. if (c4iw_num_stags(rdev) == 0) {
  480. err = -EINVAL;
  481. goto err1;
  482. }
  483. rdev->stats.pd.total = T4_MAX_NUM_PD;
  484. rdev->stats.stag.total = rdev->lldi.vr->stag.size;
  485. rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
  486. rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
  487. rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
  488. rdev->stats.qid.total = rdev->lldi.vr->qp.size;
  489. err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
  490. if (err) {
  491. printk(KERN_ERR MOD "error %d initializing resources\n", err);
  492. goto err1;
  493. }
  494. err = c4iw_pblpool_create(rdev);
  495. if (err) {
  496. printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
  497. goto err2;
  498. }
  499. err = c4iw_rqtpool_create(rdev);
  500. if (err) {
  501. printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
  502. goto err3;
  503. }
  504. err = c4iw_ocqp_pool_create(rdev);
  505. if (err) {
  506. printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
  507. goto err4;
  508. }
  509. return 0;
  510. err4:
  511. c4iw_rqtpool_destroy(rdev);
  512. err3:
  513. c4iw_pblpool_destroy(rdev);
  514. err2:
  515. c4iw_destroy_resource(&rdev->resource);
  516. err1:
  517. return err;
  518. }
  519. static void c4iw_rdev_close(struct c4iw_rdev *rdev)
  520. {
  521. c4iw_pblpool_destroy(rdev);
  522. c4iw_rqtpool_destroy(rdev);
  523. c4iw_destroy_resource(&rdev->resource);
  524. }
  525. static void c4iw_dealloc(struct uld_ctx *ctx)
  526. {
  527. c4iw_rdev_close(&ctx->dev->rdev);
  528. idr_destroy(&ctx->dev->cqidr);
  529. idr_destroy(&ctx->dev->qpidr);
  530. idr_destroy(&ctx->dev->mmidr);
  531. idr_destroy(&ctx->dev->hwtid_idr);
  532. idr_destroy(&ctx->dev->stid_idr);
  533. idr_destroy(&ctx->dev->atid_idr);
  534. iounmap(ctx->dev->rdev.oc_mw_kva);
  535. ib_dealloc_device(&ctx->dev->ibdev);
  536. ctx->dev = NULL;
  537. }
  538. static void c4iw_remove(struct uld_ctx *ctx)
  539. {
  540. PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
  541. c4iw_unregister_device(ctx->dev);
  542. c4iw_dealloc(ctx);
  543. }
  544. static int rdma_supported(const struct cxgb4_lld_info *infop)
  545. {
  546. return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
  547. infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
  548. infop->vr->cq.size > 0;
  549. }
  550. static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
  551. {
  552. struct c4iw_dev *devp;
  553. int ret;
  554. if (!rdma_supported(infop)) {
  555. printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
  556. pci_name(infop->pdev));
  557. return ERR_PTR(-ENOSYS);
  558. }
  559. if (!ocqp_supported(infop))
  560. pr_info("%s: On-Chip Queues not supported on this device.\n",
  561. pci_name(infop->pdev));
  562. if (!is_t4(infop->adapter_type)) {
  563. if (!allow_db_fc_on_t5) {
  564. db_fc_threshold = 100000;
  565. pr_info("DB Flow Control Disabled.\n");
  566. }
  567. if (!allow_db_coalescing_on_t5) {
  568. db_coalescing_threshold = -1;
  569. pr_info("DB Coalescing Disabled.\n");
  570. }
  571. }
  572. devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
  573. if (!devp) {
  574. printk(KERN_ERR MOD "Cannot allocate ib device\n");
  575. return ERR_PTR(-ENOMEM);
  576. }
  577. devp->rdev.lldi = *infop;
  578. devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
  579. (pci_resource_len(devp->rdev.lldi.pdev, 2) -
  580. roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
  581. devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
  582. devp->rdev.lldi.vr->ocq.size);
  583. PDBG(KERN_INFO MOD "ocq memory: "
  584. "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
  585. devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
  586. devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
  587. ret = c4iw_rdev_open(&devp->rdev);
  588. if (ret) {
  589. printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
  590. ib_dealloc_device(&devp->ibdev);
  591. return ERR_PTR(ret);
  592. }
  593. idr_init(&devp->cqidr);
  594. idr_init(&devp->qpidr);
  595. idr_init(&devp->mmidr);
  596. idr_init(&devp->hwtid_idr);
  597. idr_init(&devp->stid_idr);
  598. idr_init(&devp->atid_idr);
  599. spin_lock_init(&devp->lock);
  600. mutex_init(&devp->rdev.stats.lock);
  601. mutex_init(&devp->db_mutex);
  602. if (c4iw_debugfs_root) {
  603. devp->debugfs_root = debugfs_create_dir(
  604. pci_name(devp->rdev.lldi.pdev),
  605. c4iw_debugfs_root);
  606. setup_debugfs(devp);
  607. }
  608. return devp;
  609. }
  610. static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
  611. {
  612. struct uld_ctx *ctx;
  613. static int vers_printed;
  614. int i;
  615. if (!vers_printed++)
  616. pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
  617. DRV_VERSION);
  618. ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
  619. if (!ctx) {
  620. ctx = ERR_PTR(-ENOMEM);
  621. goto out;
  622. }
  623. ctx->lldi = *infop;
  624. PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
  625. __func__, pci_name(ctx->lldi.pdev),
  626. ctx->lldi.nchan, ctx->lldi.nrxq,
  627. ctx->lldi.ntxq, ctx->lldi.nports);
  628. mutex_lock(&dev_mutex);
  629. list_add_tail(&ctx->entry, &uld_ctx_list);
  630. mutex_unlock(&dev_mutex);
  631. for (i = 0; i < ctx->lldi.nrxq; i++)
  632. PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
  633. out:
  634. return ctx;
  635. }
  636. static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
  637. const __be64 *rsp,
  638. u32 pktshift)
  639. {
  640. struct sk_buff *skb;
  641. /*
  642. * Allocate space for cpl_pass_accept_req which will be synthesized by
  643. * driver. Once the driver synthesizes the request the skb will go
  644. * through the regular cpl_pass_accept_req processing.
  645. * The math here assumes sizeof cpl_pass_accept_req >= sizeof
  646. * cpl_rx_pkt.
  647. */
  648. skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
  649. sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
  650. if (unlikely(!skb))
  651. return NULL;
  652. __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
  653. sizeof(struct rss_header) - pktshift);
  654. /*
  655. * This skb will contain:
  656. * rss_header from the rspq descriptor (1 flit)
  657. * cpl_rx_pkt struct from the rspq descriptor (2 flits)
  658. * space for the difference between the size of an
  659. * rx_pkt and pass_accept_req cpl (1 flit)
  660. * the packet data from the gl
  661. */
  662. skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
  663. sizeof(struct rss_header));
  664. skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
  665. sizeof(struct cpl_pass_accept_req),
  666. gl->va + pktshift,
  667. gl->tot_len - pktshift);
  668. return skb;
  669. }
  670. static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
  671. const __be64 *rsp)
  672. {
  673. unsigned int opcode = *(u8 *)rsp;
  674. struct sk_buff *skb;
  675. if (opcode != CPL_RX_PKT)
  676. goto out;
  677. skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
  678. if (skb == NULL)
  679. goto out;
  680. if (c4iw_handlers[opcode] == NULL) {
  681. pr_info("%s no handler opcode 0x%x...\n", __func__,
  682. opcode);
  683. kfree_skb(skb);
  684. goto out;
  685. }
  686. c4iw_handlers[opcode](dev, skb);
  687. return 1;
  688. out:
  689. return 0;
  690. }
  691. static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
  692. const struct pkt_gl *gl)
  693. {
  694. struct uld_ctx *ctx = handle;
  695. struct c4iw_dev *dev = ctx->dev;
  696. struct sk_buff *skb;
  697. u8 opcode;
  698. if (gl == NULL) {
  699. /* omit RSS and rsp_ctrl at end of descriptor */
  700. unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
  701. skb = alloc_skb(256, GFP_ATOMIC);
  702. if (!skb)
  703. goto nomem;
  704. __skb_put(skb, len);
  705. skb_copy_to_linear_data(skb, &rsp[1], len);
  706. } else if (gl == CXGB4_MSG_AN) {
  707. const struct rsp_ctrl *rc = (void *)rsp;
  708. u32 qid = be32_to_cpu(rc->pldbuflen_qid);
  709. c4iw_ev_handler(dev, qid);
  710. return 0;
  711. } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
  712. if (recv_rx_pkt(dev, gl, rsp))
  713. return 0;
  714. pr_info("%s: unexpected FL contents at %p, " \
  715. "RSS %#llx, FL %#llx, len %u\n",
  716. pci_name(ctx->lldi.pdev), gl->va,
  717. (unsigned long long)be64_to_cpu(*rsp),
  718. (unsigned long long)be64_to_cpu(
  719. *(__force __be64 *)gl->va),
  720. gl->tot_len);
  721. return 0;
  722. } else {
  723. skb = cxgb4_pktgl_to_skb(gl, 128, 128);
  724. if (unlikely(!skb))
  725. goto nomem;
  726. }
  727. opcode = *(u8 *)rsp;
  728. if (c4iw_handlers[opcode])
  729. c4iw_handlers[opcode](dev, skb);
  730. else
  731. pr_info("%s no handler opcode 0x%x...\n", __func__,
  732. opcode);
  733. return 0;
  734. nomem:
  735. return -1;
  736. }
  737. static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
  738. {
  739. struct uld_ctx *ctx = handle;
  740. PDBG("%s new_state %u\n", __func__, new_state);
  741. switch (new_state) {
  742. case CXGB4_STATE_UP:
  743. printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
  744. if (!ctx->dev) {
  745. int ret;
  746. ctx->dev = c4iw_alloc(&ctx->lldi);
  747. if (IS_ERR(ctx->dev)) {
  748. printk(KERN_ERR MOD
  749. "%s: initialization failed: %ld\n",
  750. pci_name(ctx->lldi.pdev),
  751. PTR_ERR(ctx->dev));
  752. ctx->dev = NULL;
  753. break;
  754. }
  755. ret = c4iw_register_device(ctx->dev);
  756. if (ret) {
  757. printk(KERN_ERR MOD
  758. "%s: RDMA registration failed: %d\n",
  759. pci_name(ctx->lldi.pdev), ret);
  760. c4iw_dealloc(ctx);
  761. }
  762. }
  763. break;
  764. case CXGB4_STATE_DOWN:
  765. printk(KERN_INFO MOD "%s: Down\n",
  766. pci_name(ctx->lldi.pdev));
  767. if (ctx->dev)
  768. c4iw_remove(ctx);
  769. break;
  770. case CXGB4_STATE_START_RECOVERY:
  771. printk(KERN_INFO MOD "%s: Fatal Error\n",
  772. pci_name(ctx->lldi.pdev));
  773. if (ctx->dev) {
  774. struct ib_event event;
  775. ctx->dev->rdev.flags |= T4_FATAL_ERROR;
  776. memset(&event, 0, sizeof event);
  777. event.event = IB_EVENT_DEVICE_FATAL;
  778. event.device = &ctx->dev->ibdev;
  779. ib_dispatch_event(&event);
  780. c4iw_remove(ctx);
  781. }
  782. break;
  783. case CXGB4_STATE_DETACH:
  784. printk(KERN_INFO MOD "%s: Detach\n",
  785. pci_name(ctx->lldi.pdev));
  786. if (ctx->dev)
  787. c4iw_remove(ctx);
  788. break;
  789. }
  790. return 0;
  791. }
  792. static int disable_qp_db(int id, void *p, void *data)
  793. {
  794. struct c4iw_qp *qp = p;
  795. t4_disable_wq_db(&qp->wq);
  796. return 0;
  797. }
  798. static void stop_queues(struct uld_ctx *ctx)
  799. {
  800. spin_lock_irq(&ctx->dev->lock);
  801. if (ctx->dev->db_state == NORMAL) {
  802. ctx->dev->rdev.stats.db_state_transitions++;
  803. ctx->dev->db_state = FLOW_CONTROL;
  804. idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
  805. }
  806. spin_unlock_irq(&ctx->dev->lock);
  807. }
  808. static int enable_qp_db(int id, void *p, void *data)
  809. {
  810. struct c4iw_qp *qp = p;
  811. t4_enable_wq_db(&qp->wq);
  812. return 0;
  813. }
  814. static void resume_queues(struct uld_ctx *ctx)
  815. {
  816. spin_lock_irq(&ctx->dev->lock);
  817. if (ctx->dev->qpcnt <= db_fc_threshold &&
  818. ctx->dev->db_state == FLOW_CONTROL) {
  819. ctx->dev->db_state = NORMAL;
  820. ctx->dev->rdev.stats.db_state_transitions++;
  821. idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
  822. }
  823. spin_unlock_irq(&ctx->dev->lock);
  824. }
  825. struct qp_list {
  826. unsigned idx;
  827. struct c4iw_qp **qps;
  828. };
  829. static int add_and_ref_qp(int id, void *p, void *data)
  830. {
  831. struct qp_list *qp_listp = data;
  832. struct c4iw_qp *qp = p;
  833. c4iw_qp_add_ref(&qp->ibqp);
  834. qp_listp->qps[qp_listp->idx++] = qp;
  835. return 0;
  836. }
  837. static int count_qps(int id, void *p, void *data)
  838. {
  839. unsigned *countp = data;
  840. (*countp)++;
  841. return 0;
  842. }
  843. static void deref_qps(struct qp_list qp_list)
  844. {
  845. int idx;
  846. for (idx = 0; idx < qp_list.idx; idx++)
  847. c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
  848. }
  849. static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
  850. {
  851. int idx;
  852. int ret;
  853. for (idx = 0; idx < qp_list->idx; idx++) {
  854. struct c4iw_qp *qp = qp_list->qps[idx];
  855. ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
  856. qp->wq.sq.qid,
  857. t4_sq_host_wq_pidx(&qp->wq),
  858. t4_sq_wq_size(&qp->wq));
  859. if (ret) {
  860. printk(KERN_ERR MOD "%s: Fatal error - "
  861. "DB overflow recovery failed - "
  862. "error syncing SQ qid %u\n",
  863. pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
  864. return;
  865. }
  866. ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
  867. qp->wq.rq.qid,
  868. t4_rq_host_wq_pidx(&qp->wq),
  869. t4_rq_wq_size(&qp->wq));
  870. if (ret) {
  871. printk(KERN_ERR MOD "%s: Fatal error - "
  872. "DB overflow recovery failed - "
  873. "error syncing RQ qid %u\n",
  874. pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
  875. return;
  876. }
  877. /* Wait for the dbfifo to drain */
  878. while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
  879. set_current_state(TASK_UNINTERRUPTIBLE);
  880. schedule_timeout(usecs_to_jiffies(10));
  881. }
  882. }
  883. }
  884. static void recover_queues(struct uld_ctx *ctx)
  885. {
  886. int count = 0;
  887. struct qp_list qp_list;
  888. int ret;
  889. /* lock out kernel db ringers */
  890. mutex_lock(&ctx->dev->db_mutex);
  891. /* put all queues in to recovery mode */
  892. spin_lock_irq(&ctx->dev->lock);
  893. ctx->dev->db_state = RECOVERY;
  894. ctx->dev->rdev.stats.db_state_transitions++;
  895. idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
  896. spin_unlock_irq(&ctx->dev->lock);
  897. /* slow everybody down */
  898. set_current_state(TASK_UNINTERRUPTIBLE);
  899. schedule_timeout(usecs_to_jiffies(1000));
  900. /* Wait for the dbfifo to completely drain. */
  901. while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
  902. set_current_state(TASK_UNINTERRUPTIBLE);
  903. schedule_timeout(usecs_to_jiffies(10));
  904. }
  905. /* flush the SGE contexts */
  906. ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
  907. if (ret) {
  908. printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
  909. pci_name(ctx->lldi.pdev));
  910. goto out;
  911. }
  912. /* Count active queues so we can build a list of queues to recover */
  913. spin_lock_irq(&ctx->dev->lock);
  914. idr_for_each(&ctx->dev->qpidr, count_qps, &count);
  915. qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
  916. if (!qp_list.qps) {
  917. printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
  918. pci_name(ctx->lldi.pdev));
  919. spin_unlock_irq(&ctx->dev->lock);
  920. goto out;
  921. }
  922. qp_list.idx = 0;
  923. /* add and ref each qp so it doesn't get freed */
  924. idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
  925. spin_unlock_irq(&ctx->dev->lock);
  926. /* now traverse the list in a safe context to recover the db state*/
  927. recover_lost_dbs(ctx, &qp_list);
  928. /* we're almost done! deref the qps and clean up */
  929. deref_qps(qp_list);
  930. kfree(qp_list.qps);
  931. /* Wait for the dbfifo to completely drain again */
  932. while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
  933. set_current_state(TASK_UNINTERRUPTIBLE);
  934. schedule_timeout(usecs_to_jiffies(10));
  935. }
  936. /* resume the queues */
  937. spin_lock_irq(&ctx->dev->lock);
  938. if (ctx->dev->qpcnt > db_fc_threshold)
  939. ctx->dev->db_state = FLOW_CONTROL;
  940. else {
  941. ctx->dev->db_state = NORMAL;
  942. idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
  943. }
  944. ctx->dev->rdev.stats.db_state_transitions++;
  945. spin_unlock_irq(&ctx->dev->lock);
  946. out:
  947. /* start up kernel db ringers again */
  948. mutex_unlock(&ctx->dev->db_mutex);
  949. }
  950. static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
  951. {
  952. struct uld_ctx *ctx = handle;
  953. switch (control) {
  954. case CXGB4_CONTROL_DB_FULL:
  955. stop_queues(ctx);
  956. mutex_lock(&ctx->dev->rdev.stats.lock);
  957. ctx->dev->rdev.stats.db_full++;
  958. mutex_unlock(&ctx->dev->rdev.stats.lock);
  959. break;
  960. case CXGB4_CONTROL_DB_EMPTY:
  961. resume_queues(ctx);
  962. mutex_lock(&ctx->dev->rdev.stats.lock);
  963. ctx->dev->rdev.stats.db_empty++;
  964. mutex_unlock(&ctx->dev->rdev.stats.lock);
  965. break;
  966. case CXGB4_CONTROL_DB_DROP:
  967. recover_queues(ctx);
  968. mutex_lock(&ctx->dev->rdev.stats.lock);
  969. ctx->dev->rdev.stats.db_drop++;
  970. mutex_unlock(&ctx->dev->rdev.stats.lock);
  971. break;
  972. default:
  973. printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
  974. pci_name(ctx->lldi.pdev), control);
  975. break;
  976. }
  977. return 0;
  978. }
  979. static struct cxgb4_uld_info c4iw_uld_info = {
  980. .name = DRV_NAME,
  981. .add = c4iw_uld_add,
  982. .rx_handler = c4iw_uld_rx_handler,
  983. .state_change = c4iw_uld_state_change,
  984. .control = c4iw_uld_control,
  985. };
  986. static int __init c4iw_init_module(void)
  987. {
  988. int err;
  989. err = c4iw_cm_init();
  990. if (err)
  991. return err;
  992. c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
  993. if (!c4iw_debugfs_root)
  994. printk(KERN_WARNING MOD
  995. "could not create debugfs entry, continuing\n");
  996. cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
  997. return 0;
  998. }
  999. static void __exit c4iw_exit_module(void)
  1000. {
  1001. struct uld_ctx *ctx, *tmp;
  1002. mutex_lock(&dev_mutex);
  1003. list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
  1004. if (ctx->dev)
  1005. c4iw_remove(ctx);
  1006. kfree(ctx);
  1007. }
  1008. mutex_unlock(&dev_mutex);
  1009. cxgb4_unregister_uld(CXGB4_ULD_RDMA);
  1010. c4iw_cm_term();
  1011. debugfs_remove_recursive(c4iw_debugfs_root);
  1012. }
  1013. module_init(c4iw_init_module);
  1014. module_exit(c4iw_exit_module);