csio_init.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  35. #include <linux/kernel.h>
  36. #include <linux/module.h>
  37. #include <linux/init.h>
  38. #include <linux/pci.h>
  39. #include <linux/aer.h>
  40. #include <linux/mm.h>
  41. #include <linux/notifier.h>
  42. #include <linux/kdebug.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/string.h>
  46. #include <linux/export.h>
  47. #include "csio_init.h"
  48. #include "csio_defs.h"
  49. #define CSIO_MIN_MEMPOOL_SZ 64
  50. static struct dentry *csio_debugfs_root;
  51. static struct scsi_transport_template *csio_fcoe_transport;
  52. static struct scsi_transport_template *csio_fcoe_transport_vport;
  53. /*
  54. * debugfs support
  55. */
  56. static int
  57. csio_mem_open(struct inode *inode, struct file *file)
  58. {
  59. file->private_data = inode->i_private;
  60. return 0;
  61. }
  62. static ssize_t
  63. csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  64. {
  65. loff_t pos = *ppos;
  66. loff_t avail = file->f_path.dentry->d_inode->i_size;
  67. unsigned int mem = (uintptr_t)file->private_data & 3;
  68. struct csio_hw *hw = file->private_data - mem;
  69. if (pos < 0)
  70. return -EINVAL;
  71. if (pos >= avail)
  72. return 0;
  73. if (count > avail - pos)
  74. count = avail - pos;
  75. while (count) {
  76. size_t len;
  77. int ret, ofst;
  78. __be32 data[16];
  79. if (mem == MEM_MC)
  80. ret = csio_hw_mc_read(hw, pos, data, NULL);
  81. else
  82. ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
  83. if (ret)
  84. return ret;
  85. ofst = pos % sizeof(data);
  86. len = min(count, sizeof(data) - ofst);
  87. if (copy_to_user(buf, (u8 *)data + ofst, len))
  88. return -EFAULT;
  89. buf += len;
  90. pos += len;
  91. count -= len;
  92. }
  93. count = pos - *ppos;
  94. *ppos = pos;
  95. return count;
  96. }
  97. static const struct file_operations csio_mem_debugfs_fops = {
  98. .owner = THIS_MODULE,
  99. .open = csio_mem_open,
  100. .read = csio_mem_read,
  101. .llseek = default_llseek,
  102. };
  103. static void __devinit
  104. csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
  105. unsigned int idx, unsigned int size_mb)
  106. {
  107. struct dentry *de;
  108. de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
  109. (void *)hw + idx, &csio_mem_debugfs_fops);
  110. if (de && de->d_inode)
  111. de->d_inode->i_size = size_mb << 20;
  112. }
  113. static int __devinit
  114. csio_setup_debugfs(struct csio_hw *hw)
  115. {
  116. int i;
  117. if (IS_ERR_OR_NULL(hw->debugfs_root))
  118. return -1;
  119. i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
  120. if (i & EDRAM0_ENABLE)
  121. csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
  122. if (i & EDRAM1_ENABLE)
  123. csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
  124. if (i & EXT_MEM_ENABLE)
  125. csio_add_debugfs_mem(hw, "mc", MEM_MC,
  126. EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
  127. return 0;
  128. }
  129. /*
  130. * csio_dfs_create - Creates and sets up per-hw debugfs.
  131. *
  132. */
  133. static int
  134. csio_dfs_create(struct csio_hw *hw)
  135. {
  136. if (csio_debugfs_root) {
  137. hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
  138. csio_debugfs_root);
  139. csio_setup_debugfs(hw);
  140. }
  141. return 0;
  142. }
  143. /*
  144. * csio_dfs_destroy - Destroys per-hw debugfs.
  145. */
  146. static int
  147. csio_dfs_destroy(struct csio_hw *hw)
  148. {
  149. if (hw->debugfs_root)
  150. debugfs_remove_recursive(hw->debugfs_root);
  151. return 0;
  152. }
  153. /*
  154. * csio_dfs_init - Debug filesystem initialization for the module.
  155. *
  156. */
  157. static int
  158. csio_dfs_init(void)
  159. {
  160. csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
  161. if (!csio_debugfs_root)
  162. pr_warn("Could not create debugfs entry, continuing\n");
  163. return 0;
  164. }
  165. /*
  166. * csio_dfs_exit - debugfs cleanup for the module.
  167. */
  168. static void
  169. csio_dfs_exit(void)
  170. {
  171. debugfs_remove(csio_debugfs_root);
  172. }
  173. /*
  174. * csio_pci_init - PCI initialization.
  175. * @pdev: PCI device.
  176. * @bars: Bitmask of bars to be requested.
  177. *
  178. * Initializes the PCI function by enabling MMIO, setting bus
  179. * mastership and setting DMA mask.
  180. */
  181. static int
  182. csio_pci_init(struct pci_dev *pdev, int *bars)
  183. {
  184. int rv = -ENODEV;
  185. *bars = pci_select_bars(pdev, IORESOURCE_MEM);
  186. if (pci_enable_device_mem(pdev))
  187. goto err;
  188. if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
  189. goto err_disable_device;
  190. pci_set_master(pdev);
  191. pci_try_set_mwi(pdev);
  192. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  193. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  194. } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  195. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  196. } else {
  197. dev_err(&pdev->dev, "No suitable DMA available.\n");
  198. goto err_release_regions;
  199. }
  200. return 0;
  201. err_release_regions:
  202. pci_release_selected_regions(pdev, *bars);
  203. err_disable_device:
  204. pci_disable_device(pdev);
  205. err:
  206. return rv;
  207. }
  208. /*
  209. * csio_pci_exit - PCI unitialization.
  210. * @pdev: PCI device.
  211. * @bars: Bars to be released.
  212. *
  213. */
  214. static void
  215. csio_pci_exit(struct pci_dev *pdev, int *bars)
  216. {
  217. pci_release_selected_regions(pdev, *bars);
  218. pci_disable_device(pdev);
  219. }
  220. /*
  221. * csio_hw_init_workers - Initialize the HW module's worker threads.
  222. * @hw: HW module.
  223. *
  224. */
  225. static void
  226. csio_hw_init_workers(struct csio_hw *hw)
  227. {
  228. INIT_WORK(&hw->evtq_work, csio_evtq_worker);
  229. }
  230. static void
  231. csio_hw_exit_workers(struct csio_hw *hw)
  232. {
  233. cancel_work_sync(&hw->evtq_work);
  234. flush_scheduled_work();
  235. }
  236. static int
  237. csio_create_queues(struct csio_hw *hw)
  238. {
  239. int i, j;
  240. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  241. int rv;
  242. struct csio_scsi_cpu_info *info;
  243. if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
  244. return 0;
  245. if (hw->intr_mode != CSIO_IM_MSIX) {
  246. rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
  247. 0, hw->pport[0].portid, false, NULL);
  248. if (rv != 0) {
  249. csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
  250. return rv;
  251. }
  252. }
  253. /* FW event queue */
  254. rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
  255. csio_get_fwevt_intr_idx(hw),
  256. hw->pport[0].portid, true, NULL);
  257. if (rv != 0) {
  258. csio_err(hw, "FW event IQ config failed!: %d\n", rv);
  259. return rv;
  260. }
  261. /* Create mgmt queue */
  262. rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
  263. mgmtm->iq_idx, hw->pport[0].portid, NULL);
  264. if (rv != 0) {
  265. csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
  266. goto err;
  267. }
  268. /* Create SCSI queues */
  269. for (i = 0; i < hw->num_pports; i++) {
  270. info = &hw->scsi_cpu_info[i];
  271. for (j = 0; j < info->max_cpus; j++) {
  272. struct csio_scsi_qset *sqset = &hw->sqset[i][j];
  273. rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
  274. sqset->intr_idx, i, false, NULL);
  275. if (rv != 0) {
  276. csio_err(hw,
  277. "SCSI module IQ config failed [%d][%d]:%d\n",
  278. i, j, rv);
  279. goto err;
  280. }
  281. rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
  282. sqset->iq_idx, i, NULL);
  283. if (rv != 0) {
  284. csio_err(hw,
  285. "SCSI module EQ config failed [%d][%d]:%d\n",
  286. i, j, rv);
  287. goto err;
  288. }
  289. } /* for all CPUs */
  290. } /* For all ports */
  291. hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
  292. return 0;
  293. err:
  294. csio_wr_destroy_queues(hw, true);
  295. return -EINVAL;
  296. }
  297. /*
  298. * csio_config_queues - Configure the DMA queues.
  299. * @hw: HW module.
  300. *
  301. * Allocates memory for queues are registers them with FW.
  302. */
  303. int
  304. csio_config_queues(struct csio_hw *hw)
  305. {
  306. int i, j, idx, k = 0;
  307. int rv;
  308. struct csio_scsi_qset *sqset;
  309. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  310. struct csio_scsi_qset *orig;
  311. struct csio_scsi_cpu_info *info;
  312. if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
  313. return csio_create_queues(hw);
  314. /* Calculate number of SCSI queues for MSIX we would like */
  315. hw->num_scsi_msix_cpus = num_online_cpus();
  316. hw->num_sqsets = num_online_cpus() * hw->num_pports;
  317. if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
  318. hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
  319. hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
  320. }
  321. /* Initialize max_cpus, may get reduced during msix allocations */
  322. for (i = 0; i < hw->num_pports; i++)
  323. hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
  324. csio_dbg(hw, "nsqsets:%d scpus:%d\n",
  325. hw->num_sqsets, hw->num_scsi_msix_cpus);
  326. csio_intr_enable(hw);
  327. if (hw->intr_mode != CSIO_IM_MSIX) {
  328. /* Allocate Forward interrupt iq. */
  329. hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
  330. CSIO_INTR_WRSIZE, CSIO_INGRESS,
  331. (void *)hw, 0, 0, NULL);
  332. if (hw->intr_iq_idx == -1) {
  333. csio_err(hw,
  334. "Forward interrupt queue creation failed\n");
  335. goto intr_disable;
  336. }
  337. }
  338. /* Allocate the FW evt queue */
  339. hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
  340. CSIO_FWEVT_WRSIZE,
  341. CSIO_INGRESS, (void *)hw,
  342. CSIO_FWEVT_FLBUFS, 0,
  343. csio_fwevt_intx_handler);
  344. if (hw->fwevt_iq_idx == -1) {
  345. csio_err(hw, "FW evt queue creation failed\n");
  346. goto intr_disable;
  347. }
  348. /* Allocate the mgmt queue */
  349. mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
  350. CSIO_MGMT_EQ_WRSIZE,
  351. CSIO_EGRESS, (void *)hw, 0, 0, NULL);
  352. if (mgmtm->eq_idx == -1) {
  353. csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
  354. goto intr_disable;
  355. }
  356. /* Use FW IQ for MGMT req completion */
  357. mgmtm->iq_idx = hw->fwevt_iq_idx;
  358. /* Allocate SCSI queues */
  359. for (i = 0; i < hw->num_pports; i++) {
  360. info = &hw->scsi_cpu_info[i];
  361. for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
  362. sqset = &hw->sqset[i][j];
  363. if (j >= info->max_cpus) {
  364. k = j % info->max_cpus;
  365. orig = &hw->sqset[i][k];
  366. sqset->eq_idx = orig->eq_idx;
  367. sqset->iq_idx = orig->iq_idx;
  368. continue;
  369. }
  370. idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
  371. CSIO_EGRESS, (void *)hw, 0, 0,
  372. NULL);
  373. if (idx == -1) {
  374. csio_err(hw, "EQ creation failed for idx:%d\n",
  375. idx);
  376. goto intr_disable;
  377. }
  378. sqset->eq_idx = idx;
  379. idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
  380. CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
  381. (void *)hw, 0, 0,
  382. csio_scsi_intx_handler);
  383. if (idx == -1) {
  384. csio_err(hw, "IQ creation failed for idx:%d\n",
  385. idx);
  386. goto intr_disable;
  387. }
  388. sqset->iq_idx = idx;
  389. } /* for all CPUs */
  390. } /* For all ports */
  391. hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
  392. rv = csio_create_queues(hw);
  393. if (rv != 0)
  394. goto intr_disable;
  395. /*
  396. * Now request IRQs for the vectors. In the event of a failure,
  397. * cleanup is handled internally by this function.
  398. */
  399. rv = csio_request_irqs(hw);
  400. if (rv != 0)
  401. return -EINVAL;
  402. return 0;
  403. intr_disable:
  404. csio_intr_disable(hw, false);
  405. return -EINVAL;
  406. }
  407. static int
  408. csio_resource_alloc(struct csio_hw *hw)
  409. {
  410. struct csio_wrm *wrm = csio_hw_to_wrm(hw);
  411. int rv = -ENOMEM;
  412. wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
  413. CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
  414. hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
  415. sizeof(struct csio_mb));
  416. if (!hw->mb_mempool)
  417. goto err;
  418. hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
  419. sizeof(struct csio_rnode));
  420. if (!hw->rnode_mempool)
  421. goto err_free_mb_mempool;
  422. hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
  423. CSIO_SCSI_RSP_LEN, 8, 0);
  424. if (!hw->scsi_pci_pool)
  425. goto err_free_rn_pool;
  426. return 0;
  427. err_free_rn_pool:
  428. mempool_destroy(hw->rnode_mempool);
  429. hw->rnode_mempool = NULL;
  430. err_free_mb_mempool:
  431. mempool_destroy(hw->mb_mempool);
  432. hw->mb_mempool = NULL;
  433. err:
  434. return rv;
  435. }
  436. static void
  437. csio_resource_free(struct csio_hw *hw)
  438. {
  439. pci_pool_destroy(hw->scsi_pci_pool);
  440. hw->scsi_pci_pool = NULL;
  441. mempool_destroy(hw->rnode_mempool);
  442. hw->rnode_mempool = NULL;
  443. mempool_destroy(hw->mb_mempool);
  444. hw->mb_mempool = NULL;
  445. }
  446. /*
  447. * csio_hw_alloc - Allocate and initialize the HW module.
  448. * @pdev: PCI device.
  449. *
  450. * Allocates HW structure, DMA, memory resources, maps BARS to
  451. * host memory and initializes HW module.
  452. */
  453. static struct csio_hw * __devinit
  454. csio_hw_alloc(struct pci_dev *pdev)
  455. {
  456. struct csio_hw *hw;
  457. hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
  458. if (!hw)
  459. goto err;
  460. hw->pdev = pdev;
  461. strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
  462. /* memory pool/DMA pool allocation */
  463. if (csio_resource_alloc(hw))
  464. goto err_free_hw;
  465. /* Get the start address of registers from BAR 0 */
  466. hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
  467. pci_resource_len(pdev, 0));
  468. if (!hw->regstart) {
  469. csio_err(hw, "Could not map BAR 0, regstart = %p\n",
  470. hw->regstart);
  471. goto err_resource_free;
  472. }
  473. csio_hw_init_workers(hw);
  474. if (csio_hw_init(hw))
  475. goto err_unmap_bar;
  476. csio_dfs_create(hw);
  477. csio_dbg(hw, "hw:%p\n", hw);
  478. return hw;
  479. err_unmap_bar:
  480. csio_hw_exit_workers(hw);
  481. iounmap(hw->regstart);
  482. err_resource_free:
  483. csio_resource_free(hw);
  484. err_free_hw:
  485. kfree(hw);
  486. err:
  487. return NULL;
  488. }
  489. /*
  490. * csio_hw_free - Uninitialize and free the HW module.
  491. * @hw: The HW module
  492. *
  493. * Disable interrupts, uninit the HW module, free resources, free hw.
  494. */
  495. static void
  496. csio_hw_free(struct csio_hw *hw)
  497. {
  498. csio_intr_disable(hw, true);
  499. csio_hw_exit_workers(hw);
  500. csio_hw_exit(hw);
  501. iounmap(hw->regstart);
  502. csio_dfs_destroy(hw);
  503. csio_resource_free(hw);
  504. kfree(hw);
  505. }
  506. /**
  507. * csio_shost_init - Create and initialize the lnode module.
  508. * @hw: The HW module.
  509. * @dev: The device associated with this invocation.
  510. * @probe: Called from probe context or not?
  511. * @os_pln: Parent lnode if any.
  512. *
  513. * Allocates lnode structure via scsi_host_alloc, initializes
  514. * shost, initializes lnode module and registers with SCSI ML
  515. * via scsi_host_add. This function is shared between physical and
  516. * virtual node ports.
  517. */
  518. struct csio_lnode *
  519. csio_shost_init(struct csio_hw *hw, struct device *dev,
  520. bool probe, struct csio_lnode *pln)
  521. {
  522. struct Scsi_Host *shost = NULL;
  523. struct csio_lnode *ln;
  524. csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
  525. csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
  526. /*
  527. * hw->pdev is the physical port's PCI dev structure,
  528. * which will be different from the NPIV dev structure.
  529. */
  530. if (dev == &hw->pdev->dev)
  531. shost = scsi_host_alloc(
  532. &csio_fcoe_shost_template,
  533. sizeof(struct csio_lnode));
  534. else
  535. shost = scsi_host_alloc(
  536. &csio_fcoe_shost_vport_template,
  537. sizeof(struct csio_lnode));
  538. if (!shost)
  539. goto err;
  540. ln = shost_priv(shost);
  541. memset(ln, 0, sizeof(struct csio_lnode));
  542. /* Link common lnode to this lnode */
  543. ln->dev_num = (shost->host_no << 16);
  544. shost->can_queue = CSIO_MAX_QUEUE;
  545. shost->this_id = -1;
  546. shost->unique_id = shost->host_no;
  547. shost->max_cmd_len = 16; /* Max CDB length supported */
  548. shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
  549. hw->fres_info.max_ssns);
  550. shost->max_lun = CSIO_MAX_LUN;
  551. if (dev == &hw->pdev->dev)
  552. shost->transportt = csio_fcoe_transport;
  553. else
  554. shost->transportt = csio_fcoe_transport_vport;
  555. /* root lnode */
  556. if (!hw->rln)
  557. hw->rln = ln;
  558. /* Other initialization here: Common, Transport specific */
  559. if (csio_lnode_init(ln, hw, pln))
  560. goto err_shost_put;
  561. if (scsi_add_host(shost, dev))
  562. goto err_lnode_exit;
  563. return ln;
  564. err_lnode_exit:
  565. csio_lnode_exit(ln);
  566. err_shost_put:
  567. scsi_host_put(shost);
  568. err:
  569. return NULL;
  570. }
  571. /**
  572. * csio_shost_exit - De-instantiate the shost.
  573. * @ln: The lnode module corresponding to the shost.
  574. *
  575. */
  576. void
  577. csio_shost_exit(struct csio_lnode *ln)
  578. {
  579. struct Scsi_Host *shost = csio_ln_to_shost(ln);
  580. struct csio_hw *hw = csio_lnode_to_hw(ln);
  581. /* Inform transport */
  582. fc_remove_host(shost);
  583. /* Inform SCSI ML */
  584. scsi_remove_host(shost);
  585. /* Flush all the events, so that any rnode removal events
  586. * already queued are all handled, before we remove the lnode.
  587. */
  588. spin_lock_irq(&hw->lock);
  589. csio_evtq_flush(hw);
  590. spin_unlock_irq(&hw->lock);
  591. csio_lnode_exit(ln);
  592. scsi_host_put(shost);
  593. }
  594. struct csio_lnode *
  595. csio_lnode_alloc(struct csio_hw *hw)
  596. {
  597. return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
  598. }
  599. void
  600. csio_lnodes_block_request(struct csio_hw *hw)
  601. {
  602. struct Scsi_Host *shost;
  603. struct csio_lnode *sln;
  604. struct csio_lnode *ln;
  605. struct list_head *cur_ln, *cur_cln;
  606. struct csio_lnode **lnode_list;
  607. int cur_cnt = 0, ii;
  608. lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
  609. GFP_KERNEL);
  610. if (!lnode_list) {
  611. csio_err(hw, "Failed to allocate lnodes_list");
  612. return;
  613. }
  614. spin_lock_irq(&hw->lock);
  615. /* Traverse sibling lnodes */
  616. list_for_each(cur_ln, &hw->sln_head) {
  617. sln = (struct csio_lnode *) cur_ln;
  618. lnode_list[cur_cnt++] = sln;
  619. /* Traverse children lnodes */
  620. list_for_each(cur_cln, &sln->cln_head)
  621. lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
  622. }
  623. spin_unlock_irq(&hw->lock);
  624. for (ii = 0; ii < cur_cnt; ii++) {
  625. csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
  626. ln = lnode_list[ii];
  627. shost = csio_ln_to_shost(ln);
  628. scsi_block_requests(shost);
  629. }
  630. kfree(lnode_list);
  631. }
  632. void
  633. csio_lnodes_unblock_request(struct csio_hw *hw)
  634. {
  635. struct csio_lnode *ln;
  636. struct Scsi_Host *shost;
  637. struct csio_lnode *sln;
  638. struct list_head *cur_ln, *cur_cln;
  639. struct csio_lnode **lnode_list;
  640. int cur_cnt = 0, ii;
  641. lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
  642. GFP_KERNEL);
  643. if (!lnode_list) {
  644. csio_err(hw, "Failed to allocate lnodes_list");
  645. return;
  646. }
  647. spin_lock_irq(&hw->lock);
  648. /* Traverse sibling lnodes */
  649. list_for_each(cur_ln, &hw->sln_head) {
  650. sln = (struct csio_lnode *) cur_ln;
  651. lnode_list[cur_cnt++] = sln;
  652. /* Traverse children lnodes */
  653. list_for_each(cur_cln, &sln->cln_head)
  654. lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
  655. }
  656. spin_unlock_irq(&hw->lock);
  657. for (ii = 0; ii < cur_cnt; ii++) {
  658. csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
  659. ln = lnode_list[ii];
  660. shost = csio_ln_to_shost(ln);
  661. scsi_unblock_requests(shost);
  662. }
  663. kfree(lnode_list);
  664. }
  665. void
  666. csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
  667. {
  668. struct csio_lnode *ln;
  669. struct Scsi_Host *shost;
  670. struct csio_lnode *sln;
  671. struct list_head *cur_ln, *cur_cln;
  672. struct csio_lnode **lnode_list;
  673. int cur_cnt = 0, ii;
  674. lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
  675. GFP_KERNEL);
  676. if (!lnode_list) {
  677. csio_err(hw, "Failed to allocate lnodes_list");
  678. return;
  679. }
  680. spin_lock_irq(&hw->lock);
  681. /* Traverse sibling lnodes */
  682. list_for_each(cur_ln, &hw->sln_head) {
  683. sln = (struct csio_lnode *) cur_ln;
  684. if (sln->portid != portid)
  685. continue;
  686. lnode_list[cur_cnt++] = sln;
  687. /* Traverse children lnodes */
  688. list_for_each(cur_cln, &sln->cln_head)
  689. lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
  690. }
  691. spin_unlock_irq(&hw->lock);
  692. for (ii = 0; ii < cur_cnt; ii++) {
  693. csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
  694. ln = lnode_list[ii];
  695. shost = csio_ln_to_shost(ln);
  696. scsi_block_requests(shost);
  697. }
  698. kfree(lnode_list);
  699. }
  700. void
  701. csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
  702. {
  703. struct csio_lnode *ln;
  704. struct Scsi_Host *shost;
  705. struct csio_lnode *sln;
  706. struct list_head *cur_ln, *cur_cln;
  707. struct csio_lnode **lnode_list;
  708. int cur_cnt = 0, ii;
  709. lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
  710. GFP_KERNEL);
  711. if (!lnode_list) {
  712. csio_err(hw, "Failed to allocate lnodes_list");
  713. return;
  714. }
  715. spin_lock_irq(&hw->lock);
  716. /* Traverse sibling lnodes */
  717. list_for_each(cur_ln, &hw->sln_head) {
  718. sln = (struct csio_lnode *) cur_ln;
  719. if (sln->portid != portid)
  720. continue;
  721. lnode_list[cur_cnt++] = sln;
  722. /* Traverse children lnodes */
  723. list_for_each(cur_cln, &sln->cln_head)
  724. lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
  725. }
  726. spin_unlock_irq(&hw->lock);
  727. for (ii = 0; ii < cur_cnt; ii++) {
  728. csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
  729. ln = lnode_list[ii];
  730. shost = csio_ln_to_shost(ln);
  731. scsi_unblock_requests(shost);
  732. }
  733. kfree(lnode_list);
  734. }
  735. void
  736. csio_lnodes_exit(struct csio_hw *hw, bool npiv)
  737. {
  738. struct csio_lnode *sln;
  739. struct csio_lnode *ln;
  740. struct list_head *cur_ln, *cur_cln;
  741. struct csio_lnode **lnode_list;
  742. int cur_cnt = 0, ii;
  743. lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
  744. GFP_KERNEL);
  745. if (!lnode_list) {
  746. csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
  747. return;
  748. }
  749. /* Get all child lnodes(NPIV ports) */
  750. spin_lock_irq(&hw->lock);
  751. list_for_each(cur_ln, &hw->sln_head) {
  752. sln = (struct csio_lnode *) cur_ln;
  753. /* Traverse children lnodes */
  754. list_for_each(cur_cln, &sln->cln_head)
  755. lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
  756. }
  757. spin_unlock_irq(&hw->lock);
  758. /* Delete NPIV lnodes */
  759. for (ii = 0; ii < cur_cnt; ii++) {
  760. csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
  761. ln = lnode_list[ii];
  762. fc_vport_terminate(ln->fc_vport);
  763. }
  764. /* Delete only npiv lnodes */
  765. if (npiv)
  766. goto free_lnodes;
  767. cur_cnt = 0;
  768. /* Get all physical lnodes */
  769. spin_lock_irq(&hw->lock);
  770. /* Traverse sibling lnodes */
  771. list_for_each(cur_ln, &hw->sln_head) {
  772. sln = (struct csio_lnode *) cur_ln;
  773. lnode_list[cur_cnt++] = sln;
  774. }
  775. spin_unlock_irq(&hw->lock);
  776. /* Delete physical lnodes */
  777. for (ii = 0; ii < cur_cnt; ii++) {
  778. csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
  779. csio_shost_exit(lnode_list[ii]);
  780. }
  781. free_lnodes:
  782. kfree(lnode_list);
  783. }
  784. /*
  785. * csio_lnode_init_post: Set lnode attributes after starting HW.
  786. * @ln: lnode.
  787. *
  788. */
  789. static void
  790. csio_lnode_init_post(struct csio_lnode *ln)
  791. {
  792. struct Scsi_Host *shost = csio_ln_to_shost(ln);
  793. csio_fchost_attr_init(ln);
  794. scsi_scan_host(shost);
  795. }
  796. /*
  797. * csio_probe_one - Instantiate this function.
  798. * @pdev: PCI device
  799. * @id: Device ID
  800. *
  801. * This is the .probe() callback of the driver. This function:
  802. * - Initializes the PCI function by enabling MMIO, setting bus
  803. * mastership and setting DMA mask.
  804. * - Allocates HW structure, DMA, memory resources, maps BARS to
  805. * host memory and initializes HW module.
  806. * - Allocates lnode structure via scsi_host_alloc, initializes
  807. * shost, initialized lnode module and registers with SCSI ML
  808. * via scsi_host_add.
  809. * - Enables interrupts, and starts the chip by kicking off the
  810. * HW state machine.
  811. * - Once hardware is ready, initiated scan of the host via
  812. * scsi_scan_host.
  813. */
  814. static int __devinit
  815. csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  816. {
  817. int rv;
  818. int bars;
  819. int i;
  820. struct csio_hw *hw;
  821. struct csio_lnode *ln;
  822. rv = csio_pci_init(pdev, &bars);
  823. if (rv)
  824. goto err;
  825. hw = csio_hw_alloc(pdev);
  826. if (!hw) {
  827. rv = -ENODEV;
  828. goto err_pci_exit;
  829. }
  830. pci_set_drvdata(pdev, hw);
  831. if (csio_hw_start(hw) != 0) {
  832. dev_err(&pdev->dev,
  833. "Failed to start FW, continuing in debug mode.\n");
  834. return 0;
  835. }
  836. sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
  837. FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
  838. FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
  839. FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
  840. FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
  841. for (i = 0; i < hw->num_pports; i++) {
  842. ln = csio_shost_init(hw, &pdev->dev, true, NULL);
  843. if (!ln) {
  844. rv = -ENODEV;
  845. break;
  846. }
  847. /* Initialize portid */
  848. ln->portid = hw->pport[i].portid;
  849. spin_lock_irq(&hw->lock);
  850. if (csio_lnode_start(ln) != 0)
  851. rv = -ENODEV;
  852. spin_unlock_irq(&hw->lock);
  853. if (rv)
  854. break;
  855. csio_lnode_init_post(ln);
  856. }
  857. if (rv)
  858. goto err_lnode_exit;
  859. return 0;
  860. err_lnode_exit:
  861. csio_lnodes_block_request(hw);
  862. spin_lock_irq(&hw->lock);
  863. csio_hw_stop(hw);
  864. spin_unlock_irq(&hw->lock);
  865. csio_lnodes_unblock_request(hw);
  866. pci_set_drvdata(hw->pdev, NULL);
  867. csio_lnodes_exit(hw, 0);
  868. csio_hw_free(hw);
  869. err_pci_exit:
  870. csio_pci_exit(pdev, &bars);
  871. err:
  872. dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
  873. return rv;
  874. }
  875. /*
  876. * csio_remove_one - Remove one instance of the driver at this PCI function.
  877. * @pdev: PCI device
  878. *
  879. * Used during hotplug operation.
  880. */
  881. static void __devexit
  882. csio_remove_one(struct pci_dev *pdev)
  883. {
  884. struct csio_hw *hw = pci_get_drvdata(pdev);
  885. int bars = pci_select_bars(pdev, IORESOURCE_MEM);
  886. csio_lnodes_block_request(hw);
  887. spin_lock_irq(&hw->lock);
  888. /* Stops lnode, Rnode s/m
  889. * Quiesce IOs.
  890. * All sessions with remote ports are unregistered.
  891. */
  892. csio_hw_stop(hw);
  893. spin_unlock_irq(&hw->lock);
  894. csio_lnodes_unblock_request(hw);
  895. csio_lnodes_exit(hw, 0);
  896. csio_hw_free(hw);
  897. pci_set_drvdata(pdev, NULL);
  898. csio_pci_exit(pdev, &bars);
  899. }
  900. /*
  901. * csio_pci_error_detected - PCI error was detected
  902. * @pdev: PCI device
  903. *
  904. */
  905. static pci_ers_result_t
  906. csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  907. {
  908. struct csio_hw *hw = pci_get_drvdata(pdev);
  909. csio_lnodes_block_request(hw);
  910. spin_lock_irq(&hw->lock);
  911. /* Post PCI error detected evt to HW s/m
  912. * HW s/m handles this evt by quiescing IOs, unregisters rports
  913. * and finally takes the device to offline.
  914. */
  915. csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
  916. spin_unlock_irq(&hw->lock);
  917. csio_lnodes_unblock_request(hw);
  918. csio_lnodes_exit(hw, 0);
  919. csio_intr_disable(hw, true);
  920. pci_disable_device(pdev);
  921. return state == pci_channel_io_perm_failure ?
  922. PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
  923. }
  924. /*
  925. * csio_pci_slot_reset - PCI slot has been reset.
  926. * @pdev: PCI device
  927. *
  928. */
  929. static pci_ers_result_t
  930. csio_pci_slot_reset(struct pci_dev *pdev)
  931. {
  932. struct csio_hw *hw = pci_get_drvdata(pdev);
  933. int ready;
  934. if (pci_enable_device(pdev)) {
  935. dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
  936. return PCI_ERS_RESULT_DISCONNECT;
  937. }
  938. pci_set_master(pdev);
  939. pci_restore_state(pdev);
  940. pci_save_state(pdev);
  941. pci_cleanup_aer_uncorrect_error_status(pdev);
  942. /* Bring HW s/m to ready state.
  943. * but don't resume IOs.
  944. */
  945. spin_lock_irq(&hw->lock);
  946. csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
  947. ready = csio_is_hw_ready(hw);
  948. spin_unlock_irq(&hw->lock);
  949. if (ready) {
  950. return PCI_ERS_RESULT_RECOVERED;
  951. } else {
  952. dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
  953. return PCI_ERS_RESULT_DISCONNECT;
  954. }
  955. }
  956. /*
  957. * csio_pci_resume - Resume normal operations
  958. * @pdev: PCI device
  959. *
  960. */
  961. static void
  962. csio_pci_resume(struct pci_dev *pdev)
  963. {
  964. struct csio_hw *hw = pci_get_drvdata(pdev);
  965. struct csio_lnode *ln;
  966. int rv = 0;
  967. int i;
  968. /* Bring the LINK UP and Resume IO */
  969. for (i = 0; i < hw->num_pports; i++) {
  970. ln = csio_shost_init(hw, &pdev->dev, true, NULL);
  971. if (!ln) {
  972. rv = -ENODEV;
  973. break;
  974. }
  975. /* Initialize portid */
  976. ln->portid = hw->pport[i].portid;
  977. spin_lock_irq(&hw->lock);
  978. if (csio_lnode_start(ln) != 0)
  979. rv = -ENODEV;
  980. spin_unlock_irq(&hw->lock);
  981. if (rv)
  982. break;
  983. csio_lnode_init_post(ln);
  984. }
  985. if (rv)
  986. goto err_resume_exit;
  987. return;
  988. err_resume_exit:
  989. csio_lnodes_block_request(hw);
  990. spin_lock_irq(&hw->lock);
  991. csio_hw_stop(hw);
  992. spin_unlock_irq(&hw->lock);
  993. csio_lnodes_unblock_request(hw);
  994. csio_lnodes_exit(hw, 0);
  995. csio_hw_free(hw);
  996. dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
  997. }
  998. static struct pci_error_handlers csio_err_handler = {
  999. .error_detected = csio_pci_error_detected,
  1000. .slot_reset = csio_pci_slot_reset,
  1001. .resume = csio_pci_resume,
  1002. };
  1003. static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
  1004. CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
  1005. CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
  1006. CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
  1007. CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
  1008. CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
  1009. CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
  1010. CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
  1011. CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
  1012. CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
  1013. CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
  1014. CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
  1015. CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
  1016. CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
  1017. CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
  1018. CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
  1019. CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
  1020. CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
  1021. { 0, 0, 0, 0, 0, 0, 0 }
  1022. };
  1023. static struct pci_driver csio_pci_driver = {
  1024. .name = KBUILD_MODNAME,
  1025. .driver = {
  1026. .owner = THIS_MODULE,
  1027. },
  1028. .id_table = csio_pci_tbl,
  1029. .probe = csio_probe_one,
  1030. .remove = csio_remove_one,
  1031. .err_handler = &csio_err_handler,
  1032. };
  1033. /*
  1034. * csio_init - Chelsio storage driver initialization function.
  1035. *
  1036. */
  1037. static int __init
  1038. csio_init(void)
  1039. {
  1040. int rv = -ENOMEM;
  1041. pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
  1042. csio_dfs_init();
  1043. csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
  1044. if (!csio_fcoe_transport)
  1045. goto err;
  1046. csio_fcoe_transport_vport =
  1047. fc_attach_transport(&csio_fc_transport_vport_funcs);
  1048. if (!csio_fcoe_transport_vport)
  1049. goto err_vport;
  1050. rv = pci_register_driver(&csio_pci_driver);
  1051. if (rv)
  1052. goto err_pci;
  1053. return 0;
  1054. err_pci:
  1055. fc_release_transport(csio_fcoe_transport_vport);
  1056. err_vport:
  1057. fc_release_transport(csio_fcoe_transport);
  1058. err:
  1059. csio_dfs_exit();
  1060. return rv;
  1061. }
  1062. /*
  1063. * csio_exit - Chelsio storage driver uninitialization .
  1064. *
  1065. * Function that gets called in the unload path.
  1066. */
  1067. static void __exit
  1068. csio_exit(void)
  1069. {
  1070. pci_unregister_driver(&csio_pci_driver);
  1071. csio_dfs_exit();
  1072. fc_release_transport(csio_fcoe_transport_vport);
  1073. fc_release_transport(csio_fcoe_transport);
  1074. }
  1075. module_init(csio_init);
  1076. module_exit(csio_exit);
  1077. MODULE_AUTHOR(CSIO_DRV_AUTHOR);
  1078. MODULE_DESCRIPTION(CSIO_DRV_DESC);
  1079. MODULE_LICENSE(CSIO_DRV_LICENSE);
  1080. MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
  1081. MODULE_VERSION(CSIO_DRV_VERSION);
  1082. MODULE_FIRMWARE(CSIO_FW_FNAME);