qla_mid.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2010 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include <linux/moduleparam.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <scsi/scsi_tcq.h>
  14. #include <scsi/scsicam.h>
  15. #include <linux/delay.h>
  16. void
  17. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  18. {
  19. if (vha->vp_idx && vha->timer_active) {
  20. del_timer_sync(&vha->timer);
  21. vha->timer_active = 0;
  22. }
  23. }
  24. static uint32_t
  25. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  26. {
  27. uint32_t vp_id;
  28. struct qla_hw_data *ha = vha->hw;
  29. unsigned long flags;
  30. /* Find an empty slot and assign an vp_id */
  31. mutex_lock(&ha->vport_lock);
  32. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  33. if (vp_id > ha->max_npiv_vports) {
  34. DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
  35. vp_id, ha->max_npiv_vports));
  36. mutex_unlock(&ha->vport_lock);
  37. return vp_id;
  38. }
  39. set_bit(vp_id, ha->vp_idx_map);
  40. ha->num_vhosts++;
  41. vha->vp_idx = vp_id;
  42. spin_lock_irqsave(&ha->vport_slock, flags);
  43. list_add_tail(&vha->list, &ha->vp_list);
  44. spin_unlock_irqrestore(&ha->vport_slock, flags);
  45. mutex_unlock(&ha->vport_lock);
  46. return vp_id;
  47. }
  48. void
  49. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  50. {
  51. uint16_t vp_id;
  52. struct qla_hw_data *ha = vha->hw;
  53. unsigned long flags = 0;
  54. mutex_lock(&ha->vport_lock);
  55. /*
  56. * Wait for all pending activities to finish before removing vport from
  57. * the list.
  58. * Lock needs to be held for safe removal from the list (it
  59. * ensures no active vp_list traversal while the vport is removed
  60. * from the queue)
  61. */
  62. spin_lock_irqsave(&ha->vport_slock, flags);
  63. while (atomic_read(&vha->vref_count)) {
  64. spin_unlock_irqrestore(&ha->vport_slock, flags);
  65. msleep(500);
  66. spin_lock_irqsave(&ha->vport_slock, flags);
  67. }
  68. list_del(&vha->list);
  69. spin_unlock_irqrestore(&ha->vport_slock, flags);
  70. vp_id = vha->vp_idx;
  71. ha->num_vhosts--;
  72. clear_bit(vp_id, ha->vp_idx_map);
  73. mutex_unlock(&ha->vport_lock);
  74. }
  75. static scsi_qla_host_t *
  76. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  77. {
  78. scsi_qla_host_t *vha;
  79. struct scsi_qla_host *tvha;
  80. unsigned long flags;
  81. spin_lock_irqsave(&ha->vport_slock, flags);
  82. /* Locate matching device in database. */
  83. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  84. if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
  85. spin_unlock_irqrestore(&ha->vport_slock, flags);
  86. return vha;
  87. }
  88. }
  89. spin_unlock_irqrestore(&ha->vport_slock, flags);
  90. return NULL;
  91. }
  92. /*
  93. * qla2x00_mark_vp_devices_dead
  94. * Updates fcport state when device goes offline.
  95. *
  96. * Input:
  97. * ha = adapter block pointer.
  98. * fcport = port structure pointer.
  99. *
  100. * Return:
  101. * None.
  102. *
  103. * Context:
  104. */
  105. static void
  106. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  107. {
  108. /*
  109. * !!! NOTE !!!
  110. * This function, if called in contexts other than vp create, disable
  111. * or delete, please make sure this is synchronized with the
  112. * delete thread.
  113. */
  114. fc_port_t *fcport;
  115. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  116. DEBUG15(printk("scsi(%ld): Marking port dead, "
  117. "loop_id=0x%04x :%x\n",
  118. vha->host_no, fcport->loop_id, fcport->vp_idx));
  119. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  120. atomic_set(&fcport->state, FCS_UNCONFIGURED);
  121. }
  122. }
  123. int
  124. qla24xx_disable_vp(scsi_qla_host_t *vha)
  125. {
  126. int ret;
  127. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  128. atomic_set(&vha->loop_state, LOOP_DOWN);
  129. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  130. qla2x00_mark_vp_devices_dead(vha);
  131. atomic_set(&vha->vp_state, VP_FAILED);
  132. vha->flags.management_server_logged_in = 0;
  133. if (ret == QLA_SUCCESS) {
  134. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  135. } else {
  136. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  137. return -1;
  138. }
  139. return 0;
  140. }
  141. int
  142. qla24xx_enable_vp(scsi_qla_host_t *vha)
  143. {
  144. int ret;
  145. struct qla_hw_data *ha = vha->hw;
  146. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  147. /* Check if physical ha port is Up */
  148. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  149. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  150. !(ha->current_topology & ISP_CFG_F)) {
  151. vha->vp_err_state = VP_ERR_PORTDWN;
  152. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  153. goto enable_failed;
  154. }
  155. /* Initialize the new vport unless it is a persistent port */
  156. mutex_lock(&ha->vport_lock);
  157. ret = qla24xx_modify_vp_config(vha);
  158. mutex_unlock(&ha->vport_lock);
  159. if (ret != QLA_SUCCESS) {
  160. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  161. goto enable_failed;
  162. }
  163. DEBUG15(qla_printk(KERN_INFO, ha,
  164. "Virtual port with id: %d - Enabled\n", vha->vp_idx));
  165. return 0;
  166. enable_failed:
  167. DEBUG15(qla_printk(KERN_INFO, ha,
  168. "Virtual port with id: %d - Disabled\n", vha->vp_idx));
  169. return 1;
  170. }
  171. static void
  172. qla24xx_configure_vp(scsi_qla_host_t *vha)
  173. {
  174. struct fc_vport *fc_vport;
  175. int ret;
  176. fc_vport = vha->fc_vport;
  177. DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
  178. vha->host_no, __func__));
  179. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  180. if (ret != QLA_SUCCESS) {
  181. DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
  182. "receiving of RSCN requests: 0x%x\n", ret));
  183. return;
  184. } else {
  185. /* Corresponds to SCR enabled */
  186. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  187. }
  188. vha->flags.online = 1;
  189. if (qla24xx_configure_vhba(vha))
  190. return;
  191. atomic_set(&vha->vp_state, VP_ACTIVE);
  192. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  193. }
  194. void
  195. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  196. {
  197. scsi_qla_host_t *vha;
  198. struct qla_hw_data *ha = rsp->hw;
  199. int i = 0;
  200. unsigned long flags;
  201. spin_lock_irqsave(&ha->vport_slock, flags);
  202. list_for_each_entry(vha, &ha->vp_list, list) {
  203. if (vha->vp_idx) {
  204. atomic_inc(&vha->vref_count);
  205. spin_unlock_irqrestore(&ha->vport_slock, flags);
  206. switch (mb[0]) {
  207. case MBA_LIP_OCCURRED:
  208. case MBA_LOOP_UP:
  209. case MBA_LOOP_DOWN:
  210. case MBA_LIP_RESET:
  211. case MBA_POINT_TO_POINT:
  212. case MBA_CHG_IN_CONNECTION:
  213. case MBA_PORT_UPDATE:
  214. case MBA_RSCN_UPDATE:
  215. DEBUG15(printk("scsi(%ld)%s: Async_event for"
  216. " VP[%d], mb = 0x%x, vha=%p\n",
  217. vha->host_no, __func__, i, *mb, vha));
  218. qla2x00_async_event(vha, rsp, mb);
  219. break;
  220. }
  221. spin_lock_irqsave(&ha->vport_slock, flags);
  222. atomic_dec(&vha->vref_count);
  223. }
  224. i++;
  225. }
  226. spin_unlock_irqrestore(&ha->vport_slock, flags);
  227. }
  228. int
  229. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  230. {
  231. /*
  232. * Physical port will do most of the abort and recovery work. We can
  233. * just treat it as a loop down
  234. */
  235. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  236. atomic_set(&vha->loop_state, LOOP_DOWN);
  237. qla2x00_mark_all_devices_lost(vha, 0);
  238. } else {
  239. if (!atomic_read(&vha->loop_down_timer))
  240. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  241. }
  242. /*
  243. * To exclusively reset vport, we need to log it out first. Note: this
  244. * control_vp can fail if ISP reset is already issued, this is
  245. * expected, as the vp would be already logged out due to ISP reset.
  246. */
  247. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  248. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  249. DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
  250. vha->host_no, vha->vp_idx));
  251. return qla24xx_enable_vp(vha);
  252. }
  253. static int
  254. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  255. {
  256. qla2x00_do_work(vha);
  257. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  258. /* VP acquired. complete port configuration */
  259. qla24xx_configure_vp(vha);
  260. return 0;
  261. }
  262. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  263. qla2x00_update_fcports(vha);
  264. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  265. }
  266. if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
  267. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  268. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  269. DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
  270. vha->host_no));
  271. qla2x00_relogin(vha);
  272. DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
  273. vha->host_no));
  274. }
  275. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  276. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  277. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  278. }
  279. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  280. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  281. qla2x00_loop_resync(vha);
  282. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  283. }
  284. }
  285. return 0;
  286. }
  287. void
  288. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  289. {
  290. int ret;
  291. struct qla_hw_data *ha = vha->hw;
  292. scsi_qla_host_t *vp;
  293. unsigned long flags = 0;
  294. if (vha->vp_idx)
  295. return;
  296. if (list_empty(&ha->vp_list))
  297. return;
  298. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  299. if (!(ha->current_topology & ISP_CFG_F))
  300. return;
  301. spin_lock_irqsave(&ha->vport_slock, flags);
  302. list_for_each_entry(vp, &ha->vp_list, list) {
  303. if (vp->vp_idx) {
  304. atomic_inc(&vp->vref_count);
  305. spin_unlock_irqrestore(&ha->vport_slock, flags);
  306. ret = qla2x00_do_dpc_vp(vp);
  307. spin_lock_irqsave(&ha->vport_slock, flags);
  308. atomic_dec(&vp->vref_count);
  309. }
  310. }
  311. spin_unlock_irqrestore(&ha->vport_slock, flags);
  312. }
  313. int
  314. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  315. {
  316. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  317. struct qla_hw_data *ha = base_vha->hw;
  318. scsi_qla_host_t *vha;
  319. uint8_t port_name[WWN_SIZE];
  320. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  321. return VPCERR_UNSUPPORTED;
  322. /* Check up the F/W and H/W support NPIV */
  323. if (!ha->flags.npiv_supported)
  324. return VPCERR_UNSUPPORTED;
  325. /* Check up whether npiv supported switch presented */
  326. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  327. return VPCERR_NO_FABRIC_SUPP;
  328. /* Check up unique WWPN */
  329. u64_to_wwn(fc_vport->port_name, port_name);
  330. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  331. return VPCERR_BAD_WWN;
  332. vha = qla24xx_find_vhost_by_name(ha, port_name);
  333. if (vha)
  334. return VPCERR_BAD_WWN;
  335. /* Check up max-npiv-supports */
  336. if (ha->num_vhosts > ha->max_npiv_vports) {
  337. DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
  338. "max_npv_vports %ud.\n", base_vha->host_no,
  339. ha->num_vhosts, ha->max_npiv_vports));
  340. return VPCERR_UNSUPPORTED;
  341. }
  342. return 0;
  343. }
  344. scsi_qla_host_t *
  345. qla24xx_create_vhost(struct fc_vport *fc_vport)
  346. {
  347. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  348. struct qla_hw_data *ha = base_vha->hw;
  349. scsi_qla_host_t *vha;
  350. struct scsi_host_template *sht = &qla2xxx_driver_template;
  351. struct Scsi_Host *host;
  352. vha = qla2x00_create_host(sht, ha);
  353. if (!vha) {
  354. DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
  355. return(NULL);
  356. }
  357. host = vha->host;
  358. fc_vport->dd_data = vha;
  359. /* New host info */
  360. u64_to_wwn(fc_vport->node_name, vha->node_name);
  361. u64_to_wwn(fc_vport->port_name, vha->port_name);
  362. vha->fc_vport = fc_vport;
  363. vha->device_flags = 0;
  364. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  365. if (vha->vp_idx > ha->max_npiv_vports) {
  366. DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
  367. vha->host_no));
  368. goto create_vhost_failed;
  369. }
  370. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  371. vha->dpc_flags = 0L;
  372. /*
  373. * To fix the issue of processing a parent's RSCN for the vport before
  374. * its SCR is complete.
  375. */
  376. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  377. atomic_set(&vha->loop_state, LOOP_DOWN);
  378. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  379. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  380. vha->req = base_vha->req;
  381. host->can_queue = base_vha->req->length + 128;
  382. host->this_id = 255;
  383. host->cmd_per_lun = 3;
  384. if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
  385. host->max_cmd_len = 32;
  386. else
  387. host->max_cmd_len = MAX_CMDSZ;
  388. host->max_channel = MAX_BUSES - 1;
  389. host->max_lun = MAX_LUNS;
  390. host->unique_id = host->host_no;
  391. host->max_id = MAX_TARGETS_2200;
  392. host->transportt = qla2xxx_transport_vport_template;
  393. DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
  394. vha->host_no, vha));
  395. vha->flags.init_done = 1;
  396. mutex_lock(&ha->vport_lock);
  397. set_bit(vha->vp_idx, ha->vp_idx_map);
  398. ha->cur_vport_count++;
  399. mutex_unlock(&ha->vport_lock);
  400. return vha;
  401. create_vhost_failed:
  402. return NULL;
  403. }
  404. static void
  405. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  406. {
  407. struct qla_hw_data *ha = vha->hw;
  408. uint16_t que_id = req->id;
  409. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  410. sizeof(request_t), req->ring, req->dma);
  411. req->ring = NULL;
  412. req->dma = 0;
  413. if (que_id) {
  414. ha->req_q_map[que_id] = NULL;
  415. mutex_lock(&ha->vport_lock);
  416. clear_bit(que_id, ha->req_qid_map);
  417. mutex_unlock(&ha->vport_lock);
  418. }
  419. kfree(req);
  420. req = NULL;
  421. }
  422. static void
  423. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  424. {
  425. struct qla_hw_data *ha = vha->hw;
  426. uint16_t que_id = rsp->id;
  427. if (rsp->msix && rsp->msix->have_irq) {
  428. free_irq(rsp->msix->vector, rsp);
  429. rsp->msix->have_irq = 0;
  430. rsp->msix->rsp = NULL;
  431. }
  432. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  433. sizeof(response_t), rsp->ring, rsp->dma);
  434. rsp->ring = NULL;
  435. rsp->dma = 0;
  436. if (que_id) {
  437. ha->rsp_q_map[que_id] = NULL;
  438. mutex_lock(&ha->vport_lock);
  439. clear_bit(que_id, ha->rsp_qid_map);
  440. mutex_unlock(&ha->vport_lock);
  441. }
  442. kfree(rsp);
  443. rsp = NULL;
  444. }
  445. int
  446. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  447. {
  448. int ret = -1;
  449. if (req) {
  450. req->options |= BIT_0;
  451. ret = qla25xx_init_req_que(vha, req);
  452. }
  453. if (ret == QLA_SUCCESS)
  454. qla25xx_free_req_que(vha, req);
  455. return ret;
  456. }
  457. static int
  458. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  459. {
  460. int ret = -1;
  461. if (rsp) {
  462. rsp->options |= BIT_0;
  463. ret = qla25xx_init_rsp_que(vha, rsp);
  464. }
  465. if (ret == QLA_SUCCESS)
  466. qla25xx_free_rsp_que(vha, rsp);
  467. return ret;
  468. }
  469. /* Delete all queues for a given vhost */
  470. int
  471. qla25xx_delete_queues(struct scsi_qla_host *vha)
  472. {
  473. int cnt, ret = 0;
  474. struct req_que *req = NULL;
  475. struct rsp_que *rsp = NULL;
  476. struct qla_hw_data *ha = vha->hw;
  477. /* Delete request queues */
  478. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  479. req = ha->req_q_map[cnt];
  480. if (req) {
  481. ret = qla25xx_delete_req_que(vha, req);
  482. if (ret != QLA_SUCCESS) {
  483. qla_printk(KERN_WARNING, ha,
  484. "Couldn't delete req que %d\n",
  485. req->id);
  486. return ret;
  487. }
  488. }
  489. }
  490. /* Delete response queues */
  491. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  492. rsp = ha->rsp_q_map[cnt];
  493. if (rsp) {
  494. ret = qla25xx_delete_rsp_que(vha, rsp);
  495. if (ret != QLA_SUCCESS) {
  496. qla_printk(KERN_WARNING, ha,
  497. "Couldn't delete rsp que %d\n",
  498. rsp->id);
  499. return ret;
  500. }
  501. }
  502. }
  503. return ret;
  504. }
  505. int
  506. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  507. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
  508. {
  509. int ret = 0;
  510. struct req_que *req = NULL;
  511. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  512. uint16_t que_id = 0;
  513. device_reg_t __iomem *reg;
  514. uint32_t cnt;
  515. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  516. if (req == NULL) {
  517. qla_printk(KERN_WARNING, ha, "could not allocate memory"
  518. "for request que\n");
  519. goto failed;
  520. }
  521. req->length = REQUEST_ENTRY_CNT_24XX;
  522. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  523. (req->length + 1) * sizeof(request_t),
  524. &req->dma, GFP_KERNEL);
  525. if (req->ring == NULL) {
  526. qla_printk(KERN_WARNING, ha,
  527. "Memory Allocation failed - request_ring\n");
  528. goto que_failed;
  529. }
  530. mutex_lock(&ha->vport_lock);
  531. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  532. if (que_id >= ha->max_req_queues) {
  533. mutex_unlock(&ha->vport_lock);
  534. qla_printk(KERN_INFO, ha, "No resources to create "
  535. "additional request queue\n");
  536. goto que_failed;
  537. }
  538. set_bit(que_id, ha->req_qid_map);
  539. ha->req_q_map[que_id] = req;
  540. req->rid = rid;
  541. req->vp_idx = vp_idx;
  542. req->qos = qos;
  543. if (rsp_que < 0)
  544. req->rsp = NULL;
  545. else
  546. req->rsp = ha->rsp_q_map[rsp_que];
  547. /* Use alternate PCI bus number */
  548. if (MSB(req->rid))
  549. options |= BIT_4;
  550. /* Use alternate PCI devfn */
  551. if (LSB(req->rid))
  552. options |= BIT_5;
  553. req->options = options;
  554. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
  555. req->outstanding_cmds[cnt] = NULL;
  556. req->current_outstanding_cmd = 1;
  557. req->ring_ptr = req->ring;
  558. req->ring_index = 0;
  559. req->cnt = req->length;
  560. req->id = que_id;
  561. reg = ISP_QUE_REG(ha, que_id);
  562. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  563. mutex_unlock(&ha->vport_lock);
  564. ret = qla25xx_init_req_que(base_vha, req);
  565. if (ret != QLA_SUCCESS) {
  566. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  567. mutex_lock(&ha->vport_lock);
  568. clear_bit(que_id, ha->req_qid_map);
  569. mutex_unlock(&ha->vport_lock);
  570. goto que_failed;
  571. }
  572. return req->id;
  573. que_failed:
  574. qla25xx_free_req_que(base_vha, req);
  575. failed:
  576. return 0;
  577. }
  578. static void qla_do_work(struct work_struct *work)
  579. {
  580. unsigned long flags;
  581. struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
  582. struct scsi_qla_host *vha;
  583. struct qla_hw_data *ha = rsp->hw;
  584. spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
  585. vha = pci_get_drvdata(ha->pdev);
  586. qla24xx_process_response_queue(vha, rsp);
  587. spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
  588. }
  589. /* create response queue */
  590. int
  591. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  592. uint8_t vp_idx, uint16_t rid, int req)
  593. {
  594. int ret = 0;
  595. struct rsp_que *rsp = NULL;
  596. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  597. uint16_t que_id = 0;
  598. device_reg_t __iomem *reg;
  599. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  600. if (rsp == NULL) {
  601. qla_printk(KERN_WARNING, ha, "could not allocate memory for"
  602. " response que\n");
  603. goto failed;
  604. }
  605. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  606. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  607. (rsp->length + 1) * sizeof(response_t),
  608. &rsp->dma, GFP_KERNEL);
  609. if (rsp->ring == NULL) {
  610. qla_printk(KERN_WARNING, ha,
  611. "Memory Allocation failed - response_ring\n");
  612. goto que_failed;
  613. }
  614. mutex_lock(&ha->vport_lock);
  615. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  616. if (que_id >= ha->max_rsp_queues) {
  617. mutex_unlock(&ha->vport_lock);
  618. qla_printk(KERN_INFO, ha, "No resources to create "
  619. "additional response queue\n");
  620. goto que_failed;
  621. }
  622. set_bit(que_id, ha->rsp_qid_map);
  623. if (ha->flags.msix_enabled)
  624. rsp->msix = &ha->msix_entries[que_id + 1];
  625. else
  626. qla_printk(KERN_WARNING, ha, "msix not enabled\n");
  627. ha->rsp_q_map[que_id] = rsp;
  628. rsp->rid = rid;
  629. rsp->vp_idx = vp_idx;
  630. rsp->hw = ha;
  631. /* Use alternate PCI bus number */
  632. if (MSB(rsp->rid))
  633. options |= BIT_4;
  634. /* Use alternate PCI devfn */
  635. if (LSB(rsp->rid))
  636. options |= BIT_5;
  637. /* Enable MSIX handshake mode on for uncapable adapters */
  638. if (!IS_MSIX_NACK_CAPABLE(ha))
  639. options |= BIT_6;
  640. rsp->options = options;
  641. rsp->id = que_id;
  642. reg = ISP_QUE_REG(ha, que_id);
  643. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  644. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  645. mutex_unlock(&ha->vport_lock);
  646. ret = qla25xx_request_irq(rsp);
  647. if (ret)
  648. goto que_failed;
  649. ret = qla25xx_init_rsp_que(base_vha, rsp);
  650. if (ret != QLA_SUCCESS) {
  651. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  652. mutex_lock(&ha->vport_lock);
  653. clear_bit(que_id, ha->rsp_qid_map);
  654. mutex_unlock(&ha->vport_lock);
  655. goto que_failed;
  656. }
  657. if (req >= 0)
  658. rsp->req = ha->req_q_map[req];
  659. else
  660. rsp->req = NULL;
  661. qla2x00_init_response_q_entries(rsp);
  662. if (rsp->hw->wq)
  663. INIT_WORK(&rsp->q_work, qla_do_work);
  664. return rsp->id;
  665. que_failed:
  666. qla25xx_free_rsp_que(base_vha, rsp);
  667. failed:
  668. return 0;
  669. }