qla_mid.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include <linux/moduleparam.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/list.h>
  12. #include <scsi/scsi_tcq.h>
  13. #include <scsi/scsicam.h>
  14. #include <linux/delay.h>
  15. void
  16. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  17. {
  18. if (vha->vp_idx && vha->timer_active) {
  19. del_timer_sync(&vha->timer);
  20. vha->timer_active = 0;
  21. }
  22. }
  23. static uint32_t
  24. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  25. {
  26. uint32_t vp_id;
  27. struct qla_hw_data *ha = vha->hw;
  28. /* Find an empty slot and assign an vp_id */
  29. mutex_lock(&ha->vport_lock);
  30. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  31. if (vp_id > ha->max_npiv_vports) {
  32. DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
  33. vp_id, ha->max_npiv_vports));
  34. mutex_unlock(&ha->vport_lock);
  35. return vp_id;
  36. }
  37. set_bit(vp_id, ha->vp_idx_map);
  38. ha->num_vhosts++;
  39. vha->vp_idx = vp_id;
  40. list_add_tail(&vha->list, &ha->vp_list);
  41. mutex_unlock(&ha->vport_lock);
  42. return vp_id;
  43. }
  44. void
  45. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  46. {
  47. uint16_t vp_id;
  48. struct qla_hw_data *ha = vha->hw;
  49. mutex_lock(&ha->vport_lock);
  50. vp_id = vha->vp_idx;
  51. ha->num_vhosts--;
  52. clear_bit(vp_id, ha->vp_idx_map);
  53. list_del(&vha->list);
  54. mutex_unlock(&ha->vport_lock);
  55. }
  56. static scsi_qla_host_t *
  57. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  58. {
  59. scsi_qla_host_t *vha;
  60. struct scsi_qla_host *tvha;
  61. /* Locate matching device in database. */
  62. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  63. if (!memcmp(port_name, vha->port_name, WWN_SIZE))
  64. return vha;
  65. }
  66. return NULL;
  67. }
  68. /*
  69. * qla2x00_mark_vp_devices_dead
  70. * Updates fcport state when device goes offline.
  71. *
  72. * Input:
  73. * ha = adapter block pointer.
  74. * fcport = port structure pointer.
  75. *
  76. * Return:
  77. * None.
  78. *
  79. * Context:
  80. */
  81. static void
  82. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  83. {
  84. fc_port_t *fcport;
  85. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  86. DEBUG15(printk("scsi(%ld): Marking port dead, "
  87. "loop_id=0x%04x :%x\n",
  88. vha->host_no, fcport->loop_id, fcport->vp_idx));
  89. atomic_set(&fcport->state, FCS_DEVICE_DEAD);
  90. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  91. atomic_set(&fcport->state, FCS_UNCONFIGURED);
  92. }
  93. }
  94. int
  95. qla24xx_disable_vp(scsi_qla_host_t *vha)
  96. {
  97. int ret;
  98. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  99. atomic_set(&vha->loop_state, LOOP_DOWN);
  100. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  101. qla2x00_mark_vp_devices_dead(vha);
  102. atomic_set(&vha->vp_state, VP_FAILED);
  103. vha->flags.management_server_logged_in = 0;
  104. if (ret == QLA_SUCCESS) {
  105. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  106. } else {
  107. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  108. return -1;
  109. }
  110. return 0;
  111. }
  112. int
  113. qla24xx_enable_vp(scsi_qla_host_t *vha)
  114. {
  115. int ret;
  116. struct qla_hw_data *ha = vha->hw;
  117. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  118. /* Check if physical ha port is Up */
  119. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  120. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  121. vha->vp_err_state = VP_ERR_PORTDWN;
  122. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  123. goto enable_failed;
  124. }
  125. /* Initialize the new vport unless it is a persistent port */
  126. mutex_lock(&ha->vport_lock);
  127. ret = qla24xx_modify_vp_config(vha);
  128. mutex_unlock(&ha->vport_lock);
  129. if (ret != QLA_SUCCESS) {
  130. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  131. goto enable_failed;
  132. }
  133. DEBUG15(qla_printk(KERN_INFO, ha,
  134. "Virtual port with id: %d - Enabled\n", vha->vp_idx));
  135. return 0;
  136. enable_failed:
  137. DEBUG15(qla_printk(KERN_INFO, ha,
  138. "Virtual port with id: %d - Disabled\n", vha->vp_idx));
  139. return 1;
  140. }
  141. static void
  142. qla24xx_configure_vp(scsi_qla_host_t *vha)
  143. {
  144. struct fc_vport *fc_vport;
  145. int ret;
  146. fc_vport = vha->fc_vport;
  147. DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
  148. vha->host_no, __func__));
  149. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  150. if (ret != QLA_SUCCESS) {
  151. DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
  152. "receiving of RSCN requests: 0x%x\n", ret));
  153. return;
  154. } else {
  155. /* Corresponds to SCR enabled */
  156. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  157. }
  158. vha->flags.online = 1;
  159. if (qla24xx_configure_vhba(vha))
  160. return;
  161. atomic_set(&vha->vp_state, VP_ACTIVE);
  162. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  163. }
  164. void
  165. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  166. {
  167. scsi_qla_host_t *vha, *tvha;
  168. struct qla_hw_data *ha = rsp->hw;
  169. int i = 0;
  170. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  171. if (vha->vp_idx) {
  172. switch (mb[0]) {
  173. case MBA_LIP_OCCURRED:
  174. case MBA_LOOP_UP:
  175. case MBA_LOOP_DOWN:
  176. case MBA_LIP_RESET:
  177. case MBA_POINT_TO_POINT:
  178. case MBA_CHG_IN_CONNECTION:
  179. case MBA_PORT_UPDATE:
  180. case MBA_RSCN_UPDATE:
  181. DEBUG15(printk("scsi(%ld)%s: Async_event for"
  182. " VP[%d], mb = 0x%x, vha=%p\n",
  183. vha->host_no, __func__, i, *mb, vha));
  184. qla2x00_async_event(vha, rsp, mb);
  185. break;
  186. }
  187. }
  188. i++;
  189. }
  190. }
  191. int
  192. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  193. {
  194. /*
  195. * Physical port will do most of the abort and recovery work. We can
  196. * just treat it as a loop down
  197. */
  198. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  199. atomic_set(&vha->loop_state, LOOP_DOWN);
  200. qla2x00_mark_all_devices_lost(vha, 0);
  201. } else {
  202. if (!atomic_read(&vha->loop_down_timer))
  203. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  204. }
  205. /*
  206. * To exclusively reset vport, we need to log it out first. Note: this
  207. * control_vp can fail if ISP reset is already issued, this is
  208. * expected, as the vp would be already logged out due to ISP reset.
  209. */
  210. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  211. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  212. DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
  213. vha->host_no, vha->vp_idx));
  214. return qla24xx_enable_vp(vha);
  215. }
  216. static int
  217. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  218. {
  219. qla2x00_do_work(vha);
  220. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  221. /* VP acquired. complete port configuration */
  222. qla24xx_configure_vp(vha);
  223. return 0;
  224. }
  225. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  226. qla2x00_update_fcports(vha);
  227. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  228. }
  229. if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
  230. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  231. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  232. DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
  233. vha->host_no));
  234. qla2x00_relogin(vha);
  235. DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
  236. vha->host_no));
  237. }
  238. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  239. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  240. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  241. }
  242. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  243. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  244. qla2x00_loop_resync(vha);
  245. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  246. }
  247. }
  248. return 0;
  249. }
  250. void
  251. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  252. {
  253. int ret;
  254. struct qla_hw_data *ha = vha->hw;
  255. scsi_qla_host_t *vp;
  256. struct scsi_qla_host *tvp;
  257. if (vha->vp_idx)
  258. return;
  259. if (list_empty(&ha->vp_list))
  260. return;
  261. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  262. if (!(ha->current_topology & ISP_CFG_F))
  263. return;
  264. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  265. if (vp->vp_idx)
  266. ret = qla2x00_do_dpc_vp(vp);
  267. }
  268. }
  269. int
  270. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  271. {
  272. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  273. struct qla_hw_data *ha = base_vha->hw;
  274. scsi_qla_host_t *vha;
  275. uint8_t port_name[WWN_SIZE];
  276. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  277. return VPCERR_UNSUPPORTED;
  278. /* Check up the F/W and H/W support NPIV */
  279. if (!ha->flags.npiv_supported)
  280. return VPCERR_UNSUPPORTED;
  281. /* Check up whether npiv supported switch presented */
  282. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  283. return VPCERR_NO_FABRIC_SUPP;
  284. /* Check up unique WWPN */
  285. u64_to_wwn(fc_vport->port_name, port_name);
  286. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  287. return VPCERR_BAD_WWN;
  288. vha = qla24xx_find_vhost_by_name(ha, port_name);
  289. if (vha)
  290. return VPCERR_BAD_WWN;
  291. /* Check up max-npiv-supports */
  292. if (ha->num_vhosts > ha->max_npiv_vports) {
  293. DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
  294. "max_npv_vports %ud.\n", base_vha->host_no,
  295. ha->num_vhosts, ha->max_npiv_vports));
  296. return VPCERR_UNSUPPORTED;
  297. }
  298. return 0;
  299. }
  300. scsi_qla_host_t *
  301. qla24xx_create_vhost(struct fc_vport *fc_vport)
  302. {
  303. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  304. struct qla_hw_data *ha = base_vha->hw;
  305. scsi_qla_host_t *vha;
  306. struct scsi_host_template *sht = &qla2xxx_driver_template;
  307. struct Scsi_Host *host;
  308. vha = qla2x00_create_host(sht, ha);
  309. if (!vha) {
  310. DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
  311. return(NULL);
  312. }
  313. host = vha->host;
  314. fc_vport->dd_data = vha;
  315. /* New host info */
  316. u64_to_wwn(fc_vport->node_name, vha->node_name);
  317. u64_to_wwn(fc_vport->port_name, vha->port_name);
  318. vha->fc_vport = fc_vport;
  319. vha->device_flags = 0;
  320. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  321. if (vha->vp_idx > ha->max_npiv_vports) {
  322. DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
  323. vha->host_no));
  324. goto create_vhost_failed;
  325. }
  326. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  327. vha->dpc_flags = 0L;
  328. /*
  329. * To fix the issue of processing a parent's RSCN for the vport before
  330. * its SCR is complete.
  331. */
  332. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  333. atomic_set(&vha->loop_state, LOOP_DOWN);
  334. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  335. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  336. vha->req = base_vha->req;
  337. host->can_queue = base_vha->req->length + 128;
  338. host->this_id = 255;
  339. host->cmd_per_lun = 3;
  340. host->max_cmd_len = MAX_CMDSZ;
  341. host->max_channel = MAX_BUSES - 1;
  342. host->max_lun = MAX_LUNS;
  343. host->unique_id = host->host_no;
  344. host->max_id = MAX_TARGETS_2200;
  345. host->transportt = qla2xxx_transport_vport_template;
  346. DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
  347. vha->host_no, vha));
  348. vha->flags.init_done = 1;
  349. mutex_lock(&ha->vport_lock);
  350. set_bit(vha->vp_idx, ha->vp_idx_map);
  351. ha->cur_vport_count++;
  352. mutex_unlock(&ha->vport_lock);
  353. return vha;
  354. create_vhost_failed:
  355. return NULL;
  356. }
  357. static void
  358. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  359. {
  360. struct qla_hw_data *ha = vha->hw;
  361. uint16_t que_id = req->id;
  362. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  363. sizeof(request_t), req->ring, req->dma);
  364. req->ring = NULL;
  365. req->dma = 0;
  366. if (que_id) {
  367. ha->req_q_map[que_id] = NULL;
  368. mutex_lock(&ha->vport_lock);
  369. clear_bit(que_id, ha->req_qid_map);
  370. mutex_unlock(&ha->vport_lock);
  371. }
  372. kfree(req);
  373. req = NULL;
  374. }
  375. static void
  376. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  377. {
  378. struct qla_hw_data *ha = vha->hw;
  379. uint16_t que_id = rsp->id;
  380. if (rsp->msix && rsp->msix->have_irq) {
  381. free_irq(rsp->msix->vector, rsp);
  382. rsp->msix->have_irq = 0;
  383. rsp->msix->rsp = NULL;
  384. }
  385. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  386. sizeof(response_t), rsp->ring, rsp->dma);
  387. rsp->ring = NULL;
  388. rsp->dma = 0;
  389. if (que_id) {
  390. ha->rsp_q_map[que_id] = NULL;
  391. mutex_lock(&ha->vport_lock);
  392. clear_bit(que_id, ha->rsp_qid_map);
  393. mutex_unlock(&ha->vport_lock);
  394. }
  395. kfree(rsp);
  396. rsp = NULL;
  397. }
  398. int
  399. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  400. {
  401. int ret = -1;
  402. if (req) {
  403. req->options |= BIT_0;
  404. ret = qla25xx_init_req_que(vha, req);
  405. }
  406. if (ret == QLA_SUCCESS)
  407. qla25xx_free_req_que(vha, req);
  408. return ret;
  409. }
  410. int
  411. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  412. {
  413. int ret = -1;
  414. if (rsp) {
  415. rsp->options |= BIT_0;
  416. ret = qla25xx_init_rsp_que(vha, rsp);
  417. }
  418. if (ret == QLA_SUCCESS)
  419. qla25xx_free_rsp_que(vha, rsp);
  420. return ret;
  421. }
  422. int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
  423. {
  424. int ret = 0;
  425. struct qla_hw_data *ha = vha->hw;
  426. struct req_que *req = ha->req_q_map[que];
  427. req->options |= BIT_3;
  428. req->qos = qos;
  429. ret = qla25xx_init_req_que(vha, req);
  430. if (ret != QLA_SUCCESS)
  431. DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
  432. /* restore options bit */
  433. req->options &= ~BIT_3;
  434. return ret;
  435. }
  436. /* Delete all queues for a given vhost */
  437. int
  438. qla25xx_delete_queues(struct scsi_qla_host *vha)
  439. {
  440. int cnt, ret = 0;
  441. struct req_que *req = NULL;
  442. struct rsp_que *rsp = NULL;
  443. struct qla_hw_data *ha = vha->hw;
  444. /* Delete request queues */
  445. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  446. req = ha->req_q_map[cnt];
  447. if (req) {
  448. ret = qla25xx_delete_req_que(vha, req);
  449. if (ret != QLA_SUCCESS) {
  450. qla_printk(KERN_WARNING, ha,
  451. "Couldn't delete req que %d\n",
  452. req->id);
  453. return ret;
  454. }
  455. }
  456. }
  457. /* Delete response queues */
  458. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  459. rsp = ha->rsp_q_map[cnt];
  460. if (rsp) {
  461. ret = qla25xx_delete_rsp_que(vha, rsp);
  462. if (ret != QLA_SUCCESS) {
  463. qla_printk(KERN_WARNING, ha,
  464. "Couldn't delete rsp que %d\n",
  465. rsp->id);
  466. return ret;
  467. }
  468. }
  469. }
  470. return ret;
  471. }
  472. int
  473. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  474. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
  475. {
  476. int ret = 0;
  477. struct req_que *req = NULL;
  478. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  479. uint16_t que_id = 0;
  480. device_reg_t __iomem *reg;
  481. uint32_t cnt;
  482. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  483. if (req == NULL) {
  484. qla_printk(KERN_WARNING, ha, "could not allocate memory"
  485. "for request que\n");
  486. goto failed;
  487. }
  488. req->length = REQUEST_ENTRY_CNT_24XX;
  489. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  490. (req->length + 1) * sizeof(request_t),
  491. &req->dma, GFP_KERNEL);
  492. if (req->ring == NULL) {
  493. qla_printk(KERN_WARNING, ha,
  494. "Memory Allocation failed - request_ring\n");
  495. goto que_failed;
  496. }
  497. mutex_lock(&ha->vport_lock);
  498. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  499. if (que_id >= ha->max_req_queues) {
  500. mutex_unlock(&ha->vport_lock);
  501. qla_printk(KERN_INFO, ha, "No resources to create "
  502. "additional request queue\n");
  503. goto que_failed;
  504. }
  505. set_bit(que_id, ha->req_qid_map);
  506. ha->req_q_map[que_id] = req;
  507. req->rid = rid;
  508. req->vp_idx = vp_idx;
  509. req->qos = qos;
  510. if (rsp_que < 0)
  511. req->rsp = NULL;
  512. else
  513. req->rsp = ha->rsp_q_map[rsp_que];
  514. /* Use alternate PCI bus number */
  515. if (MSB(req->rid))
  516. options |= BIT_4;
  517. /* Use alternate PCI devfn */
  518. if (LSB(req->rid))
  519. options |= BIT_5;
  520. req->options = options;
  521. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
  522. req->outstanding_cmds[cnt] = NULL;
  523. req->current_outstanding_cmd = 1;
  524. req->ring_ptr = req->ring;
  525. req->ring_index = 0;
  526. req->cnt = req->length;
  527. req->id = que_id;
  528. reg = ISP_QUE_REG(ha, que_id);
  529. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  530. mutex_unlock(&ha->vport_lock);
  531. ret = qla25xx_init_req_que(base_vha, req);
  532. if (ret != QLA_SUCCESS) {
  533. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  534. mutex_lock(&ha->vport_lock);
  535. clear_bit(que_id, ha->req_qid_map);
  536. mutex_unlock(&ha->vport_lock);
  537. goto que_failed;
  538. }
  539. return req->id;
  540. que_failed:
  541. qla25xx_free_req_que(base_vha, req);
  542. failed:
  543. return 0;
  544. }
  545. static void qla_do_work(struct work_struct *work)
  546. {
  547. struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
  548. struct scsi_qla_host *vha;
  549. spin_lock_irq(&rsp->hw->hardware_lock);
  550. vha = qla25xx_get_host(rsp);
  551. qla24xx_process_response_queue(vha, rsp);
  552. spin_unlock_irq(&rsp->hw->hardware_lock);
  553. }
  554. /* create response queue */
  555. int
  556. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  557. uint8_t vp_idx, uint16_t rid, int req)
  558. {
  559. int ret = 0;
  560. struct rsp_que *rsp = NULL;
  561. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  562. uint16_t que_id = 0;
  563. device_reg_t __iomem *reg;
  564. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  565. if (rsp == NULL) {
  566. qla_printk(KERN_WARNING, ha, "could not allocate memory for"
  567. " response que\n");
  568. goto failed;
  569. }
  570. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  571. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  572. (rsp->length + 1) * sizeof(response_t),
  573. &rsp->dma, GFP_KERNEL);
  574. if (rsp->ring == NULL) {
  575. qla_printk(KERN_WARNING, ha,
  576. "Memory Allocation failed - response_ring\n");
  577. goto que_failed;
  578. }
  579. mutex_lock(&ha->vport_lock);
  580. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  581. if (que_id >= ha->max_rsp_queues) {
  582. mutex_unlock(&ha->vport_lock);
  583. qla_printk(KERN_INFO, ha, "No resources to create "
  584. "additional response queue\n");
  585. goto que_failed;
  586. }
  587. set_bit(que_id, ha->rsp_qid_map);
  588. if (ha->flags.msix_enabled)
  589. rsp->msix = &ha->msix_entries[que_id + 1];
  590. else
  591. qla_printk(KERN_WARNING, ha, "msix not enabled\n");
  592. ha->rsp_q_map[que_id] = rsp;
  593. rsp->rid = rid;
  594. rsp->vp_idx = vp_idx;
  595. rsp->hw = ha;
  596. /* Use alternate PCI bus number */
  597. if (MSB(rsp->rid))
  598. options |= BIT_4;
  599. /* Use alternate PCI devfn */
  600. if (LSB(rsp->rid))
  601. options |= BIT_5;
  602. /* Enable MSIX handshake mode on for uncapable adapters */
  603. if (!IS_MSIX_NACK_CAPABLE(ha))
  604. options |= BIT_6;
  605. rsp->options = options;
  606. rsp->id = que_id;
  607. reg = ISP_QUE_REG(ha, que_id);
  608. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  609. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  610. mutex_unlock(&ha->vport_lock);
  611. ret = qla25xx_request_irq(rsp);
  612. if (ret)
  613. goto que_failed;
  614. ret = qla25xx_init_rsp_que(base_vha, rsp);
  615. if (ret != QLA_SUCCESS) {
  616. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  617. mutex_lock(&ha->vport_lock);
  618. clear_bit(que_id, ha->rsp_qid_map);
  619. mutex_unlock(&ha->vport_lock);
  620. goto que_failed;
  621. }
  622. if (req >= 0)
  623. rsp->req = ha->req_q_map[req];
  624. else
  625. rsp->req = NULL;
  626. qla2x00_init_response_q_entries(rsp);
  627. if (rsp->hw->wq)
  628. INIT_WORK(&rsp->q_work, qla_do_work);
  629. return rsp->id;
  630. que_failed:
  631. qla25xx_free_rsp_que(base_vha, rsp);
  632. failed:
  633. return 0;
  634. }
  635. int
  636. qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
  637. {
  638. uint16_t options = 0;
  639. uint8_t ret = 0;
  640. struct qla_hw_data *ha = vha->hw;
  641. struct rsp_que *rsp;
  642. options |= BIT_1;
  643. ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
  644. if (!ret) {
  645. qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
  646. return ret;
  647. } else
  648. qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
  649. rsp = ha->rsp_q_map[ret];
  650. options = 0;
  651. if (qos & BIT_7)
  652. options |= BIT_8;
  653. ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
  654. qos & ~BIT_7);
  655. if (ret) {
  656. vha->req = ha->req_q_map[ret];
  657. qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
  658. } else
  659. qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
  660. rsp->req = ha->req_q_map[ret];
  661. return ret;
  662. }