qla_mid.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include <linux/moduleparam.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/smp_lock.h>
  12. #include <linux/list.h>
  13. #include <scsi/scsi_tcq.h>
  14. #include <scsi/scsicam.h>
  15. #include <linux/delay.h>
  16. void
  17. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  18. {
  19. if (vha->vp_idx && vha->timer_active) {
  20. del_timer_sync(&vha->timer);
  21. vha->timer_active = 0;
  22. }
  23. }
  24. static uint32_t
  25. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  26. {
  27. uint32_t vp_id;
  28. struct qla_hw_data *ha = vha->hw;
  29. /* Find an empty slot and assign an vp_id */
  30. mutex_lock(&ha->vport_lock);
  31. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  32. if (vp_id > ha->max_npiv_vports) {
  33. DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
  34. vp_id, ha->max_npiv_vports));
  35. mutex_unlock(&ha->vport_lock);
  36. return vp_id;
  37. }
  38. set_bit(vp_id, ha->vp_idx_map);
  39. ha->num_vhosts++;
  40. ha->cur_vport_count++;
  41. vha->vp_idx = vp_id;
  42. list_add_tail(&vha->list, &ha->vp_list);
  43. mutex_unlock(&ha->vport_lock);
  44. return vp_id;
  45. }
  46. void
  47. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  48. {
  49. uint16_t vp_id;
  50. struct qla_hw_data *ha = vha->hw;
  51. mutex_lock(&ha->vport_lock);
  52. vp_id = vha->vp_idx;
  53. ha->num_vhosts--;
  54. ha->cur_vport_count--;
  55. clear_bit(vp_id, ha->vp_idx_map);
  56. list_del(&vha->list);
  57. mutex_unlock(&ha->vport_lock);
  58. }
  59. static scsi_qla_host_t *
  60. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  61. {
  62. scsi_qla_host_t *vha;
  63. struct scsi_qla_host *tvha;
  64. /* Locate matching device in database. */
  65. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  66. if (!memcmp(port_name, vha->port_name, WWN_SIZE))
  67. return vha;
  68. }
  69. return NULL;
  70. }
  71. /*
  72. * qla2x00_mark_vp_devices_dead
  73. * Updates fcport state when device goes offline.
  74. *
  75. * Input:
  76. * ha = adapter block pointer.
  77. * fcport = port structure pointer.
  78. *
  79. * Return:
  80. * None.
  81. *
  82. * Context:
  83. */
  84. static void
  85. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  86. {
  87. fc_port_t *fcport;
  88. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  89. DEBUG15(printk("scsi(%ld): Marking port dead, "
  90. "loop_id=0x%04x :%x\n",
  91. vha->host_no, fcport->loop_id, fcport->vp_idx));
  92. atomic_set(&fcport->state, FCS_DEVICE_DEAD);
  93. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  94. atomic_set(&fcport->state, FCS_UNCONFIGURED);
  95. }
  96. }
  97. int
  98. qla24xx_disable_vp(scsi_qla_host_t *vha)
  99. {
  100. int ret;
  101. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  102. atomic_set(&vha->loop_state, LOOP_DOWN);
  103. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  104. qla2x00_mark_vp_devices_dead(vha);
  105. atomic_set(&vha->vp_state, VP_FAILED);
  106. vha->flags.management_server_logged_in = 0;
  107. if (ret == QLA_SUCCESS) {
  108. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  109. } else {
  110. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  111. return -1;
  112. }
  113. return 0;
  114. }
  115. int
  116. qla24xx_enable_vp(scsi_qla_host_t *vha)
  117. {
  118. int ret;
  119. struct qla_hw_data *ha = vha->hw;
  120. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  121. /* Check if physical ha port is Up */
  122. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  123. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  124. vha->vp_err_state = VP_ERR_PORTDWN;
  125. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  126. goto enable_failed;
  127. }
  128. /* Initialize the new vport unless it is a persistent port */
  129. mutex_lock(&ha->vport_lock);
  130. ret = qla24xx_modify_vp_config(vha);
  131. mutex_unlock(&ha->vport_lock);
  132. if (ret != QLA_SUCCESS) {
  133. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  134. goto enable_failed;
  135. }
  136. DEBUG15(qla_printk(KERN_INFO, ha,
  137. "Virtual port with id: %d - Enabled\n", vha->vp_idx));
  138. return 0;
  139. enable_failed:
  140. DEBUG15(qla_printk(KERN_INFO, ha,
  141. "Virtual port with id: %d - Disabled\n", vha->vp_idx));
  142. return 1;
  143. }
  144. static void
  145. qla24xx_configure_vp(scsi_qla_host_t *vha)
  146. {
  147. struct fc_vport *fc_vport;
  148. int ret;
  149. fc_vport = vha->fc_vport;
  150. DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
  151. vha->host_no, __func__));
  152. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  153. if (ret != QLA_SUCCESS) {
  154. DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
  155. "receiving of RSCN requests: 0x%x\n", ret));
  156. return;
  157. } else {
  158. /* Corresponds to SCR enabled */
  159. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  160. }
  161. vha->flags.online = 1;
  162. if (qla24xx_configure_vhba(vha))
  163. return;
  164. atomic_set(&vha->vp_state, VP_ACTIVE);
  165. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  166. }
  167. void
  168. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  169. {
  170. scsi_qla_host_t *vha, *tvha;
  171. struct qla_hw_data *ha = rsp->hw;
  172. int i = 0;
  173. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  174. if (vha->vp_idx) {
  175. switch (mb[0]) {
  176. case MBA_LIP_OCCURRED:
  177. case MBA_LOOP_UP:
  178. case MBA_LOOP_DOWN:
  179. case MBA_LIP_RESET:
  180. case MBA_POINT_TO_POINT:
  181. case MBA_CHG_IN_CONNECTION:
  182. case MBA_PORT_UPDATE:
  183. case MBA_RSCN_UPDATE:
  184. DEBUG15(printk("scsi(%ld)%s: Async_event for"
  185. " VP[%d], mb = 0x%x, vha=%p\n",
  186. vha->host_no, __func__, i, *mb, vha));
  187. qla2x00_async_event(vha, rsp, mb);
  188. break;
  189. }
  190. }
  191. i++;
  192. }
  193. }
  194. int
  195. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  196. {
  197. /*
  198. * Physical port will do most of the abort and recovery work. We can
  199. * just treat it as a loop down
  200. */
  201. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  202. atomic_set(&vha->loop_state, LOOP_DOWN);
  203. qla2x00_mark_all_devices_lost(vha, 0);
  204. } else {
  205. if (!atomic_read(&vha->loop_down_timer))
  206. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  207. }
  208. /* To exclusively reset vport, we need to log it out first.*/
  209. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  210. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  211. DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
  212. vha->host_no, vha->vp_idx));
  213. return qla24xx_enable_vp(vha);
  214. }
  215. static int
  216. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  217. {
  218. struct qla_hw_data *ha = vha->hw;
  219. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  220. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  221. /* VP acquired. complete port configuration */
  222. if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
  223. qla24xx_configure_vp(vha);
  224. } else {
  225. set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
  226. set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
  227. }
  228. return 0;
  229. }
  230. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  231. qla2x00_update_fcports(vha);
  232. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  233. }
  234. if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
  235. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  236. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  237. DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
  238. vha->host_no));
  239. qla2x00_relogin(vha);
  240. DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
  241. vha->host_no));
  242. }
  243. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  244. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  245. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  246. }
  247. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  248. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  249. qla2x00_loop_resync(vha);
  250. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  251. }
  252. }
  253. return 0;
  254. }
  255. void
  256. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  257. {
  258. int ret;
  259. struct qla_hw_data *ha = vha->hw;
  260. scsi_qla_host_t *vp;
  261. struct scsi_qla_host *tvp;
  262. if (vha->vp_idx)
  263. return;
  264. if (list_empty(&ha->vp_list))
  265. return;
  266. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  267. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  268. if (vp->vp_idx)
  269. ret = qla2x00_do_dpc_vp(vp);
  270. }
  271. }
  272. int
  273. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  274. {
  275. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  276. struct qla_hw_data *ha = base_vha->hw;
  277. scsi_qla_host_t *vha;
  278. uint8_t port_name[WWN_SIZE];
  279. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  280. return VPCERR_UNSUPPORTED;
  281. /* Check up the F/W and H/W support NPIV */
  282. if (!ha->flags.npiv_supported)
  283. return VPCERR_UNSUPPORTED;
  284. /* Check up whether npiv supported switch presented */
  285. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  286. return VPCERR_NO_FABRIC_SUPP;
  287. /* Check up unique WWPN */
  288. u64_to_wwn(fc_vport->port_name, port_name);
  289. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  290. return VPCERR_BAD_WWN;
  291. vha = qla24xx_find_vhost_by_name(ha, port_name);
  292. if (vha)
  293. return VPCERR_BAD_WWN;
  294. /* Check up max-npiv-supports */
  295. if (ha->num_vhosts > ha->max_npiv_vports) {
  296. DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
  297. "max_npv_vports %ud.\n", base_vha->host_no,
  298. ha->num_vhosts, ha->max_npiv_vports));
  299. return VPCERR_UNSUPPORTED;
  300. }
  301. return 0;
  302. }
  303. scsi_qla_host_t *
  304. qla24xx_create_vhost(struct fc_vport *fc_vport)
  305. {
  306. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  307. struct qla_hw_data *ha = base_vha->hw;
  308. scsi_qla_host_t *vha;
  309. struct scsi_host_template *sht = &qla2xxx_driver_template;
  310. struct Scsi_Host *host;
  311. vha = qla2x00_create_host(sht, ha);
  312. if (!vha) {
  313. DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
  314. return(NULL);
  315. }
  316. host = vha->host;
  317. fc_vport->dd_data = vha;
  318. /* New host info */
  319. u64_to_wwn(fc_vport->node_name, vha->node_name);
  320. u64_to_wwn(fc_vport->port_name, vha->port_name);
  321. vha->fc_vport = fc_vport;
  322. vha->device_flags = 0;
  323. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  324. if (vha->vp_idx > ha->max_npiv_vports) {
  325. DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
  326. vha->host_no));
  327. goto create_vhost_failed;
  328. }
  329. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  330. vha->dpc_flags = 0L;
  331. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  332. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  333. /*
  334. * To fix the issue of processing a parent's RSCN for the vport before
  335. * its SCR is complete.
  336. */
  337. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  338. atomic_set(&vha->loop_state, LOOP_DOWN);
  339. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  340. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  341. memset(vha->req_ques, 0, sizeof(vha->req_ques));
  342. vha->req_ques[0] = ha->req_q_map[0]->id;
  343. host->can_queue = ha->req_q_map[0]->length + 128;
  344. host->this_id = 255;
  345. host->cmd_per_lun = 3;
  346. host->max_cmd_len = MAX_CMDSZ;
  347. host->max_channel = MAX_BUSES - 1;
  348. host->max_lun = MAX_LUNS;
  349. host->unique_id = host->host_no;
  350. host->max_id = MAX_TARGETS_2200;
  351. host->transportt = qla2xxx_transport_vport_template;
  352. DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
  353. vha->host_no, vha));
  354. vha->flags.init_done = 1;
  355. return vha;
  356. create_vhost_failed:
  357. return NULL;
  358. }
  359. static void
  360. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  361. {
  362. struct qla_hw_data *ha = vha->hw;
  363. uint16_t que_id = req->id;
  364. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  365. sizeof(request_t), req->ring, req->dma);
  366. req->ring = NULL;
  367. req->dma = 0;
  368. if (que_id) {
  369. ha->req_q_map[que_id] = NULL;
  370. mutex_lock(&ha->vport_lock);
  371. clear_bit(que_id, ha->req_qid_map);
  372. mutex_unlock(&ha->vport_lock);
  373. }
  374. kfree(req);
  375. req = NULL;
  376. }
  377. static void
  378. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  379. {
  380. struct qla_hw_data *ha = vha->hw;
  381. uint16_t que_id = rsp->id;
  382. if (rsp->msix && rsp->msix->have_irq) {
  383. free_irq(rsp->msix->vector, rsp);
  384. rsp->msix->have_irq = 0;
  385. rsp->msix->rsp = NULL;
  386. }
  387. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  388. sizeof(response_t), rsp->ring, rsp->dma);
  389. rsp->ring = NULL;
  390. rsp->dma = 0;
  391. if (que_id) {
  392. ha->rsp_q_map[que_id] = NULL;
  393. mutex_lock(&ha->vport_lock);
  394. clear_bit(que_id, ha->rsp_qid_map);
  395. mutex_unlock(&ha->vport_lock);
  396. }
  397. kfree(rsp);
  398. rsp = NULL;
  399. }
  400. int
  401. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  402. {
  403. int ret = -1;
  404. if (req) {
  405. req->options |= BIT_0;
  406. ret = qla25xx_init_req_que(vha, req);
  407. }
  408. if (ret == QLA_SUCCESS)
  409. qla25xx_free_req_que(vha, req);
  410. return ret;
  411. }
  412. int
  413. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  414. {
  415. int ret = -1;
  416. if (rsp) {
  417. rsp->options |= BIT_0;
  418. ret = qla25xx_init_rsp_que(vha, rsp);
  419. }
  420. if (ret == QLA_SUCCESS)
  421. qla25xx_free_rsp_que(vha, rsp);
  422. return ret;
  423. }
  424. int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
  425. {
  426. int ret = 0;
  427. struct qla_hw_data *ha = vha->hw;
  428. struct req_que *req = ha->req_q_map[que];
  429. req->options |= BIT_3;
  430. req->qos = qos;
  431. ret = qla25xx_init_req_que(vha, req);
  432. if (ret != QLA_SUCCESS)
  433. DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
  434. /* restore options bit */
  435. req->options &= ~BIT_3;
  436. return ret;
  437. }
  438. /* Delete all queues for a given vhost */
  439. int
  440. qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
  441. {
  442. int cnt, ret = 0;
  443. struct req_que *req = NULL;
  444. struct rsp_que *rsp = NULL;
  445. struct qla_hw_data *ha = vha->hw;
  446. if (que_no) {
  447. /* Delete request queue */
  448. req = ha->req_q_map[que_no];
  449. if (req) {
  450. rsp = req->rsp;
  451. ret = qla25xx_delete_req_que(vha, req);
  452. if (ret != QLA_SUCCESS) {
  453. qla_printk(KERN_WARNING, ha,
  454. "Couldn't delete req que %d\n", req->id);
  455. return ret;
  456. }
  457. /* Delete associated response queue */
  458. if (rsp) {
  459. ret = qla25xx_delete_rsp_que(vha, rsp);
  460. if (ret != QLA_SUCCESS) {
  461. qla_printk(KERN_WARNING, ha,
  462. "Couldn't delete rsp que %d\n",
  463. rsp->id);
  464. return ret;
  465. }
  466. }
  467. }
  468. } else { /* delete all queues of this host */
  469. for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
  470. /* Delete request queues */
  471. req = ha->req_q_map[vha->req_ques[cnt]];
  472. if (req && req->id) {
  473. rsp = req->rsp;
  474. ret = qla25xx_delete_req_que(vha, req);
  475. if (ret != QLA_SUCCESS) {
  476. qla_printk(KERN_WARNING, ha,
  477. "Couldn't delete req que %d\n",
  478. vha->req_ques[cnt]);
  479. return ret;
  480. }
  481. vha->req_ques[cnt] = ha->req_q_map[0]->id;
  482. /* Delete associated response queue */
  483. if (rsp && rsp->id) {
  484. ret = qla25xx_delete_rsp_que(vha, rsp);
  485. if (ret != QLA_SUCCESS) {
  486. qla_printk(KERN_WARNING, ha,
  487. "Couldn't delete rsp que %d\n",
  488. rsp->id);
  489. return ret;
  490. }
  491. }
  492. }
  493. }
  494. }
  495. qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
  496. vha->vp_idx);
  497. return ret;
  498. }
  499. int
  500. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  501. uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
  502. {
  503. int ret = 0;
  504. struct req_que *req = NULL;
  505. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  506. uint16_t que_id = 0;
  507. device_reg_t __iomem *reg;
  508. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  509. if (req == NULL) {
  510. qla_printk(KERN_WARNING, ha, "could not allocate memory"
  511. "for request que\n");
  512. goto que_failed;
  513. }
  514. req->length = REQUEST_ENTRY_CNT_24XX;
  515. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  516. (req->length + 1) * sizeof(request_t),
  517. &req->dma, GFP_KERNEL);
  518. if (req->ring == NULL) {
  519. qla_printk(KERN_WARNING, ha,
  520. "Memory Allocation failed - request_ring\n");
  521. goto que_failed;
  522. }
  523. mutex_lock(&ha->vport_lock);
  524. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
  525. if (que_id >= ha->max_queues) {
  526. mutex_unlock(&ha->vport_lock);
  527. qla_printk(KERN_INFO, ha, "No resources to create "
  528. "additional request queue\n");
  529. goto que_failed;
  530. }
  531. set_bit(que_id, ha->req_qid_map);
  532. ha->req_q_map[que_id] = req;
  533. req->rid = rid;
  534. req->vp_idx = vp_idx;
  535. req->qos = qos;
  536. if (ha->rsp_q_map[rsp_que]) {
  537. req->rsp = ha->rsp_q_map[rsp_que];
  538. req->rsp->req = req;
  539. }
  540. /* Use alternate PCI bus number */
  541. if (MSB(req->rid))
  542. options |= BIT_4;
  543. /* Use alternate PCI devfn */
  544. if (LSB(req->rid))
  545. options |= BIT_5;
  546. req->options = options;
  547. req->ring_ptr = req->ring;
  548. req->ring_index = 0;
  549. req->cnt = req->length;
  550. req->id = que_id;
  551. reg = ISP_QUE_REG(ha, que_id);
  552. req->req_q_in = &reg->isp25mq.req_q_in;
  553. req->req_q_out = &reg->isp25mq.req_q_out;
  554. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  555. mutex_unlock(&ha->vport_lock);
  556. ret = qla25xx_init_req_que(base_vha, req);
  557. if (ret != QLA_SUCCESS) {
  558. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  559. mutex_lock(&ha->vport_lock);
  560. clear_bit(que_id, ha->req_qid_map);
  561. mutex_unlock(&ha->vport_lock);
  562. goto que_failed;
  563. }
  564. return req->id;
  565. que_failed:
  566. qla25xx_free_req_que(base_vha, req);
  567. return 0;
  568. }
  569. /* create response queue */
  570. int
  571. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  572. uint8_t vp_idx, uint16_t rid)
  573. {
  574. int ret = 0;
  575. struct rsp_que *rsp = NULL;
  576. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  577. uint16_t que_id = 0;
  578. device_reg_t __iomem *reg;
  579. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  580. if (rsp == NULL) {
  581. qla_printk(KERN_WARNING, ha, "could not allocate memory for"
  582. " response que\n");
  583. goto que_failed;
  584. }
  585. rsp->length = RESPONSE_ENTRY_CNT_2300;
  586. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  587. (rsp->length + 1) * sizeof(response_t),
  588. &rsp->dma, GFP_KERNEL);
  589. if (rsp->ring == NULL) {
  590. qla_printk(KERN_WARNING, ha,
  591. "Memory Allocation failed - response_ring\n");
  592. goto que_failed;
  593. }
  594. mutex_lock(&ha->vport_lock);
  595. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
  596. if (que_id >= ha->max_queues) {
  597. mutex_unlock(&ha->vport_lock);
  598. qla_printk(KERN_INFO, ha, "No resources to create "
  599. "additional response queue\n");
  600. goto que_failed;
  601. }
  602. set_bit(que_id, ha->rsp_qid_map);
  603. if (ha->flags.msix_enabled)
  604. rsp->msix = &ha->msix_entries[que_id + 1];
  605. else
  606. qla_printk(KERN_WARNING, ha, "msix not enabled\n");
  607. ha->rsp_q_map[que_id] = rsp;
  608. rsp->rid = rid;
  609. rsp->vp_idx = vp_idx;
  610. rsp->hw = ha;
  611. /* Use alternate PCI bus number */
  612. if (MSB(rsp->rid))
  613. options |= BIT_4;
  614. /* Use alternate PCI devfn */
  615. if (LSB(rsp->rid))
  616. options |= BIT_5;
  617. rsp->options = options;
  618. rsp->ring_ptr = rsp->ring;
  619. rsp->ring_index = 0;
  620. rsp->id = que_id;
  621. reg = ISP_QUE_REG(ha, que_id);
  622. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  623. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  624. mutex_unlock(&ha->vport_lock);
  625. ret = qla25xx_request_irq(rsp);
  626. if (ret)
  627. goto que_failed;
  628. ret = qla25xx_init_rsp_que(base_vha, rsp);
  629. if (ret != QLA_SUCCESS) {
  630. qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
  631. mutex_lock(&ha->vport_lock);
  632. clear_bit(que_id, ha->rsp_qid_map);
  633. mutex_unlock(&ha->vport_lock);
  634. goto que_failed;
  635. }
  636. qla2x00_init_response_q_entries(rsp);
  637. return rsp->id;
  638. que_failed:
  639. qla25xx_free_rsp_que(base_vha, rsp);
  640. return 0;
  641. }
  642. int
  643. qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
  644. {
  645. uint16_t options = 0;
  646. uint8_t ret = 0;
  647. struct qla_hw_data *ha = vha->hw;
  648. options |= BIT_1;
  649. ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
  650. if (!ret) {
  651. qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
  652. return ret;
  653. } else
  654. qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
  655. options = 0;
  656. if (qos & BIT_7)
  657. options |= BIT_8;
  658. ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
  659. qos & ~BIT_7);
  660. if (ret) {
  661. vha->req_ques[0] = ret;
  662. qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
  663. } else
  664. qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
  665. return ret;
  666. }