qla_mid.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2010 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include <linux/moduleparam.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <scsi/scsi_tcq.h>
  14. #include <scsi/scsicam.h>
  15. #include <linux/delay.h>
  16. void
  17. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  18. {
  19. if (vha->vp_idx && vha->timer_active) {
  20. del_timer_sync(&vha->timer);
  21. vha->timer_active = 0;
  22. }
  23. }
  24. static uint32_t
  25. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  26. {
  27. uint32_t vp_id;
  28. struct qla_hw_data *ha = vha->hw;
  29. /* Find an empty slot and assign an vp_id */
  30. mutex_lock(&ha->vport_lock);
  31. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  32. if (vp_id > ha->max_npiv_vports) {
  33. DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
  34. vp_id, ha->max_npiv_vports));
  35. mutex_unlock(&ha->vport_lock);
  36. return vp_id;
  37. }
  38. set_bit(vp_id, ha->vp_idx_map);
  39. ha->num_vhosts++;
  40. vha->vp_idx = vp_id;
  41. list_add_tail(&vha->list, &ha->vp_list);
  42. mutex_unlock(&ha->vport_lock);
  43. return vp_id;
  44. }
  45. void
  46. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  47. {
  48. uint16_t vp_id;
  49. struct qla_hw_data *ha = vha->hw;
  50. mutex_lock(&ha->vport_lock);
  51. vp_id = vha->vp_idx;
  52. ha->num_vhosts--;
  53. clear_bit(vp_id, ha->vp_idx_map);
  54. list_del(&vha->list);
  55. mutex_unlock(&ha->vport_lock);
  56. }
  57. static scsi_qla_host_t *
  58. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  59. {
  60. scsi_qla_host_t *vha;
  61. struct scsi_qla_host *tvha;
  62. /* Locate matching device in database. */
  63. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  64. if (!memcmp(port_name, vha->port_name, WWN_SIZE))
  65. return vha;
  66. }
  67. return NULL;
  68. }
  69. /*
  70. * qla2x00_mark_vp_devices_dead
  71. * Updates fcport state when device goes offline.
  72. *
  73. * Input:
  74. * ha = adapter block pointer.
  75. * fcport = port structure pointer.
  76. *
  77. * Return:
  78. * None.
  79. *
  80. * Context:
  81. */
  82. static void
  83. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  84. {
  85. fc_port_t *fcport;
  86. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  87. DEBUG15(printk("scsi(%ld): Marking port dead, "
  88. "loop_id=0x%04x :%x\n",
  89. vha->host_no, fcport->loop_id, fcport->vp_idx));
  90. atomic_set(&fcport->state, FCS_DEVICE_DEAD);
  91. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  92. atomic_set(&fcport->state, FCS_UNCONFIGURED);
  93. }
  94. }
  95. int
  96. qla24xx_disable_vp(scsi_qla_host_t *vha)
  97. {
  98. int ret;
  99. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  100. atomic_set(&vha->loop_state, LOOP_DOWN);
  101. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  102. qla2x00_mark_vp_devices_dead(vha);
  103. atomic_set(&vha->vp_state, VP_FAILED);
  104. vha->flags.management_server_logged_in = 0;
  105. if (ret == QLA_SUCCESS) {
  106. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  107. } else {
  108. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  109. return -1;
  110. }
  111. return 0;
  112. }
  113. int
  114. qla24xx_enable_vp(scsi_qla_host_t *vha)
  115. {
  116. int ret;
  117. struct qla_hw_data *ha = vha->hw;
  118. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  119. /* Check if physical ha port is Up */
  120. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  121. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  122. !(ha->current_topology & ISP_CFG_F)) {
  123. vha->vp_err_state = VP_ERR_PORTDWN;
  124. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  125. goto enable_failed;
  126. }
  127. /* Initialize the new vport unless it is a persistent port */
  128. mutex_lock(&ha->vport_lock);
  129. ret = qla24xx_modify_vp_config(vha);
  130. mutex_unlock(&ha->vport_lock);
  131. if (ret != QLA_SUCCESS) {
  132. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  133. goto enable_failed;
  134. }
  135. DEBUG15(qla_printk(KERN_INFO, ha,
  136. "Virtual port with id: %d - Enabled\n", vha->vp_idx));
  137. return 0;
  138. enable_failed:
  139. DEBUG15(qla_printk(KERN_INFO, ha,
  140. "Virtual port with id: %d - Disabled\n", vha->vp_idx));
  141. return 1;
  142. }
  143. static void
  144. qla24xx_configure_vp(scsi_qla_host_t *vha)
  145. {
  146. struct fc_vport *fc_vport;
  147. int ret;
  148. fc_vport = vha->fc_vport;
  149. DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
  150. vha->host_no, __func__));
  151. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  152. if (ret != QLA_SUCCESS) {
  153. DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
  154. "receiving of RSCN requests: 0x%x\n", ret));
  155. return;
  156. } else {
  157. /* Corresponds to SCR enabled */
  158. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  159. }
  160. vha->flags.online = 1;
  161. if (qla24xx_configure_vhba(vha))
  162. return;
  163. atomic_set(&vha->vp_state, VP_ACTIVE);
  164. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  165. }
  166. void
  167. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  168. {
  169. scsi_qla_host_t *vha, *tvha;
  170. struct qla_hw_data *ha = rsp->hw;
  171. int i = 0;
  172. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  173. if (vha->vp_idx) {
  174. switch (mb[0]) {
  175. case MBA_LIP_OCCURRED:
  176. case MBA_LOOP_UP:
  177. case MBA_LOOP_DOWN:
  178. case MBA_LIP_RESET:
  179. case MBA_POINT_TO_POINT:
  180. case MBA_CHG_IN_CONNECTION:
  181. case MBA_PORT_UPDATE:
  182. case MBA_RSCN_UPDATE:
  183. DEBUG15(printk("scsi(%ld)%s: Async_event for"
  184. " VP[%d], mb = 0x%x, vha=%p\n",
  185. vha->host_no, __func__, i, *mb, vha));
  186. qla2x00_async_event(vha, rsp, mb);
  187. break;
  188. }
  189. }
  190. i++;
  191. }
  192. }
  193. int
  194. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  195. {
  196. /*
  197. * Physical port will do most of the abort and recovery work. We can
  198. * just treat it as a loop down
  199. */
  200. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  201. atomic_set(&vha->loop_state, LOOP_DOWN);
  202. qla2x00_mark_all_devices_lost(vha, 0);
  203. } else {
  204. if (!atomic_read(&vha->loop_down_timer))
  205. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  206. }
  207. /*
  208. * To exclusively reset vport, we need to log it out first. Note: this
  209. * control_vp can fail if ISP reset is already issued, this is
  210. * expected, as the vp would be already logged out due to ISP reset.
  211. */
  212. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  213. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  214. DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
  215. vha->host_no, vha->vp_idx));
  216. return qla24xx_enable_vp(vha);
  217. }
  218. static int
  219. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  220. {
  221. qla2x00_do_work(vha);
  222. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  223. /* VP acquired. complete port configuration */
  224. qla24xx_configure_vp(vha);
  225. return 0;
  226. }
  227. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  228. qla2x00_update_fcports(vha);
  229. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  230. }
  231. if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
  232. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  233. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  234. DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
  235. vha->host_no));
  236. qla2x00_relogin(vha);
  237. DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
  238. vha->host_no));
  239. }
  240. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  241. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  242. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  243. }
  244. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  245. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  246. qla2x00_loop_resync(vha);
  247. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  248. }
  249. }
  250. return 0;
  251. }
  252. void
  253. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  254. {
  255. int ret;
  256. struct qla_hw_data *ha = vha->hw;
  257. scsi_qla_host_t *vp;
  258. struct scsi_qla_host *tvp;
  259. if (vha->vp_idx)
  260. return;
  261. if (list_empty(&ha->vp_list))
  262. return;
  263. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  264. if (!(ha->current_topology & ISP_CFG_F))
  265. return;
  266. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  267. if (vp->vp_idx)
  268. ret = qla2x00_do_dpc_vp(vp);
  269. }
  270. }
  271. int
  272. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  273. {
  274. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  275. struct qla_hw_data *ha = base_vha->hw;
  276. scsi_qla_host_t *vha;
  277. uint8_t port_name[WWN_SIZE];
  278. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  279. return VPCERR_UNSUPPORTED;
  280. /* Check up the F/W and H/W support NPIV */
  281. if (!ha->flags.npiv_supported)
  282. return VPCERR_UNSUPPORTED;
  283. /* Check up whether npiv supported switch presented */
  284. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  285. return VPCERR_NO_FABRIC_SUPP;
  286. /* Check up unique WWPN */
  287. u64_to_wwn(fc_vport->port_name, port_name);
  288. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  289. return VPCERR_BAD_WWN;
  290. vha = qla24xx_find_vhost_by_name(ha, port_name);
  291. if (vha)
  292. return VPCERR_BAD_WWN;
  293. /* Check up max-npiv-supports */
  294. if (ha->num_vhosts > ha->max_npiv_vports) {
  295. DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
  296. "max_npv_vports %ud.\n", base_vha->host_no,
  297. ha->num_vhosts, ha->max_npiv_vports));
  298. return VPCERR_UNSUPPORTED;
  299. }
  300. return 0;
  301. }
  302. scsi_qla_host_t *
  303. qla24xx_create_vhost(struct fc_vport *fc_vport)
  304. {
  305. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  306. struct qla_hw_data *ha = base_vha->hw;
  307. scsi_qla_host_t *vha;
  308. struct scsi_host_template *sht = &qla2xxx_driver_template;
  309. struct Scsi_Host *host;
  310. vha = qla2x00_create_host(sht, ha);
  311. if (!vha) {
  312. DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
  313. return(NULL);
  314. }
  315. host = vha->host;
  316. fc_vport->dd_data = vha;
  317. /* New host info */
  318. u64_to_wwn(fc_vport->node_name, vha->node_name);
  319. u64_to_wwn(fc_vport->port_name, vha->port_name);
  320. vha->fc_vport = fc_vport;
  321. vha->device_flags = 0;
  322. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  323. if (vha->vp_idx > ha->max_npiv_vports) {
  324. DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
  325. vha->host_no));
  326. goto create_vhost_failed;
  327. }
  328. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  329. vha->dpc_flags = 0L;
  330. /*
  331. * To fix the issue of processing a parent's RSCN for the vport before
  332. * its SCR is complete.
  333. */
  334. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  335. atomic_set(&vha->loop_state, LOOP_DOWN);
  336. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  337. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  338. vha->req = base_vha->req;
  339. host->can_queue = base_vha->req->length + 128;
  340. host->this_id = 255;
  341. host->cmd_per_lun = 3;
  342. if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
  343. host->max_cmd_len = 32;
  344. else
  345. host->max_cmd_len = MAX_CMDSZ;
  346. host->max_channel = MAX_BUSES - 1;
  347. host->max_lun = MAX_LUNS;
  348. host->unique_id = host->host_no;
  349. host->max_id = MAX_TARGETS_2200;
  350. host->transportt = qla2xxx_transport_vport_template;
  351. DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
  352. vha->host_no, vha));
  353. vha->flags.init_done = 1;
  354. mutex_lock(&ha->vport_lock);
  355. set_bit(vha->vp_idx, ha->vp_idx_map);
  356. ha->cur_vport_count++;
  357. mutex_unlock(&ha->vport_lock);
  358. return vha;
  359. create_vhost_failed:
  360. return NULL;
  361. }
  362. static void
  363. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  364. {
  365. struct qla_hw_data *ha = vha->hw;
  366. uint16_t que_id = req->id;
  367. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  368. sizeof(request_t), req->ring, req->dma);
  369. req->ring = NULL;
  370. req->dma = 0;
  371. if (que_id) {
  372. ha->req_q_map[que_id] = NULL;
  373. mutex_lock(&ha->vport_lock);
  374. clear_bit(que_id, ha->req_qid_map);
  375. mutex_unlock(&ha->vport_lock);
  376. }
  377. kfree(req);
  378. req = NULL;
  379. }
  380. static void
  381. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  382. {
  383. struct qla_hw_data *ha = vha->hw;
  384. uint16_t que_id = rsp->id;
  385. if (rsp->msix && rsp->msix->have_irq) {
  386. free_irq(rsp->msix->vector, rsp);
  387. rsp->msix->have_irq = 0;
  388. rsp->msix->rsp = NULL;
  389. }
  390. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  391. sizeof(response_t), rsp->ring, rsp->dma);
  392. rsp->ring = NULL;
  393. rsp->dma = 0;
  394. if (que_id) {
  395. ha->rsp_q_map[que_id] = NULL;
  396. mutex_lock(&ha->vport_lock);
  397. clear_bit(que_id, ha->rsp_qid_map);
  398. mutex_unlock(&ha->vport_lock);
  399. }
  400. kfree(rsp);
  401. rsp = NULL;
  402. }
  403. int
  404. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  405. {
  406. int ret = -1;
  407. if (req) {
  408. req->options |= BIT_0;
  409. ret = qla25xx_init_req_que(vha, req);
  410. }
  411. if (ret == QLA_SUCCESS)
  412. qla25xx_free_req_que(vha, req);
  413. return ret;
  414. }
  415. static int
  416. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  417. {
  418. int ret = -1;
  419. if (rsp) {
  420. rsp->options |= BIT_0;
  421. ret = qla25xx_init_rsp_que(vha, rsp);
  422. }
  423. if (ret == QLA_SUCCESS)
  424. qla25xx_free_rsp_que(vha, rsp);
  425. return ret;
  426. }
  427. /* Delete all queues for a given vhost */
  428. int
  429. qla25xx_delete_queues(struct scsi_qla_host *vha)
  430. {
  431. int cnt, ret = 0;
  432. struct req_que *req = NULL;
  433. struct rsp_que *rsp = NULL;
  434. struct qla_hw_data *ha = vha->hw;
  435. /* Delete request queues */
  436. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  437. req = ha->req_q_map[cnt];
  438. if (req) {
  439. ret = qla25xx_delete_req_que(vha, req);
  440. if (ret != QLA_SUCCESS) {
  441. qla_printk(KERN_WARNING, ha,
  442. "Couldn't delete req que %d\n",
  443. req->id);
  444. return ret;
  445. }
  446. }
  447. }
  448. /* Delete response queues */
  449. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  450. rsp = ha->rsp_q_map[cnt];
  451. if (rsp) {
  452. ret = qla25xx_delete_rsp_que(vha, rsp);
  453. if (ret != QLA_SUCCESS) {
  454. qla_printk(KERN_WARNING, ha,
  455. "Couldn't delete rsp que %d\n",
  456. rsp->id);
  457. return ret;
  458. }
  459. }
  460. }
  461. return ret;
  462. }
  463. int
  464. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  465. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
  466. {
  467. int ret = 0;
  468. struct req_que *req = NULL;
  469. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  470. uint16_t que_id = 0;
  471. device_reg_t __iomem *reg;
  472. uint32_t cnt;
  473. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  474. if (req == NULL) {
  475. qla_printk(KERN_WARNING, ha, "could not allocate memory"
  476. "for request que\n");
  477. goto failed;
  478. }
  479. req->length = REQUEST_ENTRY_CNT_24XX;
  480. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  481. (req->length + 1) * sizeof(request_t),
  482. &req->dma, GFP_KERNEL);
  483. if (req->ring == NULL) {
  484. qla_printk(KERN_WARNING, ha,
  485. "Memory Allocation failed - request_ring\n");
  486. goto que_failed;
  487. }
  488. mutex_lock(&ha->vport_lock);
  489. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  490. if (que_id >= ha->max_req_queues) {
  491. mutex_unlock(&ha->vport_lock);
  492. qla_printk(KERN_INFO, ha, "No resources to create "
  493. "additional request queue\n");
  494. goto que_failed;
  495. }
  496. set_bit(que_id, ha->req_qid_map);
  497. ha->req_q_map[que_id] = req;
  498. req->rid = rid;
  499. req->vp_idx = vp_idx;
  500. req->qos = qos;
  501. if (rsp_que < 0)
  502. req->rsp = NULL;
  503. else
  504. req->rsp = ha->rsp_q_map[rsp_que];
  505. /* Use alternate PCI bus number */
  506. if (MSB(req->rid))
  507. options |= BIT_4;
  508. /* Use alternate PCI devfn */
  509. if (LSB(req->rid))
  510. options |= BIT_5;
  511. req->options = options;
  512. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
  513. req->outstanding_cmds[cnt] = NULL;
  514. req->current_outstanding_cmd = 1;
  515. req->ring_ptr = req->ring;
  516. req->ring_index = 0;
  517. req->cnt = req->length;
  518. req->id = que_id;
  519. reg = ISP_QUE_REG(ha, que_id);
  520. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  521. mutex_unlock(&ha->vport_lock);
  522. ret = qla25xx_init_req_que(base_vha, req);
  523. if (ret != QLA_SUCCESS) {
  524. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  525. mutex_lock(&ha->vport_lock);
  526. clear_bit(que_id, ha->req_qid_map);
  527. mutex_unlock(&ha->vport_lock);
  528. goto que_failed;
  529. }
  530. return req->id;
  531. que_failed:
  532. qla25xx_free_req_que(base_vha, req);
  533. failed:
  534. return 0;
  535. }
  536. static void qla_do_work(struct work_struct *work)
  537. {
  538. unsigned long flags;
  539. struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
  540. struct scsi_qla_host *vha;
  541. struct qla_hw_data *ha = rsp->hw;
  542. spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
  543. vha = pci_get_drvdata(ha->pdev);
  544. qla24xx_process_response_queue(vha, rsp);
  545. spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
  546. }
  547. /* create response queue */
  548. int
  549. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  550. uint8_t vp_idx, uint16_t rid, int req)
  551. {
  552. int ret = 0;
  553. struct rsp_que *rsp = NULL;
  554. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  555. uint16_t que_id = 0;
  556. device_reg_t __iomem *reg;
  557. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  558. if (rsp == NULL) {
  559. qla_printk(KERN_WARNING, ha, "could not allocate memory for"
  560. " response que\n");
  561. goto failed;
  562. }
  563. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  564. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  565. (rsp->length + 1) * sizeof(response_t),
  566. &rsp->dma, GFP_KERNEL);
  567. if (rsp->ring == NULL) {
  568. qla_printk(KERN_WARNING, ha,
  569. "Memory Allocation failed - response_ring\n");
  570. goto que_failed;
  571. }
  572. mutex_lock(&ha->vport_lock);
  573. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  574. if (que_id >= ha->max_rsp_queues) {
  575. mutex_unlock(&ha->vport_lock);
  576. qla_printk(KERN_INFO, ha, "No resources to create "
  577. "additional response queue\n");
  578. goto que_failed;
  579. }
  580. set_bit(que_id, ha->rsp_qid_map);
  581. if (ha->flags.msix_enabled)
  582. rsp->msix = &ha->msix_entries[que_id + 1];
  583. else
  584. qla_printk(KERN_WARNING, ha, "msix not enabled\n");
  585. ha->rsp_q_map[que_id] = rsp;
  586. rsp->rid = rid;
  587. rsp->vp_idx = vp_idx;
  588. rsp->hw = ha;
  589. /* Use alternate PCI bus number */
  590. if (MSB(rsp->rid))
  591. options |= BIT_4;
  592. /* Use alternate PCI devfn */
  593. if (LSB(rsp->rid))
  594. options |= BIT_5;
  595. /* Enable MSIX handshake mode on for uncapable adapters */
  596. if (!IS_MSIX_NACK_CAPABLE(ha))
  597. options |= BIT_6;
  598. rsp->options = options;
  599. rsp->id = que_id;
  600. reg = ISP_QUE_REG(ha, que_id);
  601. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  602. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  603. mutex_unlock(&ha->vport_lock);
  604. ret = qla25xx_request_irq(rsp);
  605. if (ret)
  606. goto que_failed;
  607. ret = qla25xx_init_rsp_que(base_vha, rsp);
  608. if (ret != QLA_SUCCESS) {
  609. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  610. mutex_lock(&ha->vport_lock);
  611. clear_bit(que_id, ha->rsp_qid_map);
  612. mutex_unlock(&ha->vport_lock);
  613. goto que_failed;
  614. }
  615. if (req >= 0)
  616. rsp->req = ha->req_q_map[req];
  617. else
  618. rsp->req = NULL;
  619. qla2x00_init_response_q_entries(rsp);
  620. if (rsp->hw->wq)
  621. INIT_WORK(&rsp->q_work, qla_do_work);
  622. return rsp->id;
  623. que_failed:
  624. qla25xx_free_rsp_que(base_vha, rsp);
  625. failed:
  626. return 0;
  627. }