|
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
|
|
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
|
|
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
|
|
|
|
|
|
/* If the next EQE is not valid then we are done */
|
|
/* If the next EQE is not valid then we are done */
|
|
- if (!bf_get(lpfc_eqe_valid, eqe))
|
|
|
|
|
|
+ if (!bf_get_le32(lpfc_eqe_valid, eqe))
|
|
return NULL;
|
|
return NULL;
|
|
/* If the host has not yet processed the next entry then we are done */
|
|
/* If the host has not yet processed the next entry then we are done */
|
|
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
|
|
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
|
|
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
|
|
/* while there are valid entries */
|
|
/* while there are valid entries */
|
|
while (q->hba_index != q->host_index) {
|
|
while (q->hba_index != q->host_index) {
|
|
temp_eqe = q->qe[q->host_index].eqe;
|
|
temp_eqe = q->qe[q->host_index].eqe;
|
|
- bf_set(lpfc_eqe_valid, temp_eqe, 0);
|
|
|
|
|
|
+ bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
|
|
released++;
|
|
released++;
|
|
q->host_index = ((q->host_index + 1) % q->entry_count);
|
|
q->host_index = ((q->host_index + 1) % q->entry_count);
|
|
}
|
|
}
|
|
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
|
|
struct lpfc_cqe *cqe;
|
|
struct lpfc_cqe *cqe;
|
|
|
|
|
|
/* If the next CQE is not valid then we are done */
|
|
/* If the next CQE is not valid then we are done */
|
|
- if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
|
|
|
|
|
|
+ if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
|
|
return NULL;
|
|
return NULL;
|
|
/* If the host has not yet processed the next entry then we are done */
|
|
/* If the host has not yet processed the next entry then we are done */
|
|
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
|
|
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
|
|
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
|
|
/* while there are valid entries */
|
|
/* while there are valid entries */
|
|
while (q->hba_index != q->host_index) {
|
|
while (q->hba_index != q->host_index) {
|
|
temp_qe = q->qe[q->host_index].cqe;
|
|
temp_qe = q->qe[q->host_index].cqe;
|
|
- bf_set(lpfc_cqe_valid, temp_qe, 0);
|
|
|
|
|
|
+ bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
|
|
released++;
|
|
released++;
|
|
q->host_index = ((q->host_index + 1) % q->entry_count);
|
|
q->host_index = ((q->host_index + 1) % q->entry_count);
|
|
}
|
|
}
|
|
@@ -1659,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
|
|
case MBX_INIT_VPI:
|
|
case MBX_INIT_VPI:
|
|
case MBX_INIT_VFI:
|
|
case MBX_INIT_VFI:
|
|
case MBX_RESUME_RPI:
|
|
case MBX_RESUME_RPI:
|
|
|
|
+ case MBX_READ_EVENT_LOG_STATUS:
|
|
|
|
+ case MBX_READ_EVENT_LOG:
|
|
ret = mbxCommand;
|
|
ret = mbxCommand;
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
@@ -4296,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
"2570 Failed to read FCoE parameters\n");
|
|
"2570 Failed to read FCoE parameters\n");
|
|
|
|
|
|
/* Issue READ_REV to collect vpd and FW information. */
|
|
/* Issue READ_REV to collect vpd and FW information. */
|
|
- vpd_size = PAGE_SIZE;
|
|
|
|
|
|
+ vpd_size = SLI4_PAGE_SIZE;
|
|
vpd = kzalloc(vpd_size, GFP_KERNEL);
|
|
vpd = kzalloc(vpd_size, GFP_KERNEL);
|
|
if (!vpd) {
|
|
if (!vpd) {
|
|
rc = -ENOMEM;
|
|
rc = -ENOMEM;
|
|
@@ -4891,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
|
|
mb->mbxOwner = OWN_CHIP;
|
|
mb->mbxOwner = OWN_CHIP;
|
|
|
|
|
|
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
|
|
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
|
|
- /* First copy command data to host SLIM area */
|
|
|
|
|
|
+ /* Populate mbox extension offset word. */
|
|
|
|
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
|
|
|
|
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
|
|
|
|
+ = (uint8_t *)phba->mbox_ext
|
|
|
|
+ - (uint8_t *)phba->mbox;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Copy the mailbox extension data */
|
|
|
|
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
|
|
|
|
+ lpfc_sli_pcimem_bcopy(pmbox->context2,
|
|
|
|
+ (uint8_t *)phba->mbox_ext,
|
|
|
|
+ pmbox->in_ext_byte_len);
|
|
|
|
+ }
|
|
|
|
+ /* Copy command data to host SLIM area */
|
|
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
|
|
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
|
|
} else {
|
|
} else {
|
|
|
|
+ /* Populate mbox extension offset word. */
|
|
|
|
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
|
|
|
|
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
|
|
|
|
+ = MAILBOX_HBA_EXT_OFFSET;
|
|
|
|
+
|
|
|
|
+ /* Copy the mailbox extension data */
|
|
|
|
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
|
|
|
|
+ lpfc_memcpy_to_slim(phba->MBslimaddr +
|
|
|
|
+ MAILBOX_HBA_EXT_OFFSET,
|
|
|
|
+ pmbox->context2, pmbox->in_ext_byte_len);
|
|
|
|
+
|
|
|
|
+ }
|
|
if (mb->mbxCommand == MBX_CONFIG_PORT) {
|
|
if (mb->mbxCommand == MBX_CONFIG_PORT) {
|
|
/* copy command data into host mbox for cmpl */
|
|
/* copy command data into host mbox for cmpl */
|
|
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
|
|
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
|
|
@@ -5003,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
|
|
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
|
|
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
|
|
/* copy results back to user */
|
|
/* copy results back to user */
|
|
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
|
|
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
|
|
|
|
+ /* Copy the mailbox extension data */
|
|
|
|
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
|
|
|
|
+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
|
|
|
|
+ pmbox->context2,
|
|
|
|
+ pmbox->out_ext_byte_len);
|
|
|
|
+ }
|
|
} else {
|
|
} else {
|
|
/* First copy command data */
|
|
/* First copy command data */
|
|
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
|
|
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
|
|
MAILBOX_CMD_SIZE);
|
|
MAILBOX_CMD_SIZE);
|
|
- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
|
|
|
|
- pmbox->context2) {
|
|
|
|
- lpfc_memcpy_from_slim((void *)pmbox->context2,
|
|
|
|
- phba->MBslimaddr + DMP_RSP_OFFSET,
|
|
|
|
- mb->un.varDmp.word_cnt);
|
|
|
|
|
|
+ /* Copy the mailbox extension data */
|
|
|
|
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
|
|
|
|
+ lpfc_memcpy_from_slim(pmbox->context2,
|
|
|
|
+ phba->MBslimaddr +
|
|
|
|
+ MAILBOX_HBA_EXT_OFFSET,
|
|
|
|
+ pmbox->out_ext_byte_len);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -7104,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
*/
|
|
*/
|
|
list_del_init(&abort_iocb->list);
|
|
list_del_init(&abort_iocb->list);
|
|
pring->txcmplq_cnt--;
|
|
pring->txcmplq_cnt--;
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
|
|
/* Firmware could still be in progress of DMAing
|
|
/* Firmware could still be in progress of DMAing
|
|
* payload, so don't free data buffer till after
|
|
* payload, so don't free data buffer till after
|
|
* a hbeat.
|
|
* a hbeat.
|
|
*/
|
|
*/
|
|
- spin_lock_irq(&phba->hbalock);
|
|
|
|
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
|
|
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
|
|
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
|
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
|
spin_unlock_irq(&phba->hbalock);
|
|
spin_unlock_irq(&phba->hbalock);
|
|
@@ -7118,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
|
|
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
|
|
abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
|
|
abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
|
|
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
|
|
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
|
|
- }
|
|
|
|
|
|
+ } else
|
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
}
|
|
}
|
|
|
|
|
|
lpfc_sli_release_iocbq(phba, cmdiocb);
|
|
lpfc_sli_release_iocbq(phba, cmdiocb);
|
|
@@ -8133,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
|
|
if (pmb->mbox_cmpl) {
|
|
if (pmb->mbox_cmpl) {
|
|
lpfc_sli_pcimem_bcopy(mbox, pmbox,
|
|
lpfc_sli_pcimem_bcopy(mbox, pmbox,
|
|
MAILBOX_CMD_SIZE);
|
|
MAILBOX_CMD_SIZE);
|
|
|
|
+ if (pmb->out_ext_byte_len &&
|
|
|
|
+ pmb->context2)
|
|
|
|
+ lpfc_sli_pcimem_bcopy(
|
|
|
|
+ phba->mbox_ext,
|
|
|
|
+ pmb->context2,
|
|
|
|
+ pmb->out_ext_byte_len);
|
|
}
|
|
}
|
|
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
|
|
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
|
|
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
|
|
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
|
|
@@ -8983,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
|
|
int ecount = 0;
|
|
int ecount = 0;
|
|
uint16_t cqid;
|
|
uint16_t cqid;
|
|
|
|
|
|
- if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
|
|
|
|
|
|
+ if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
"0359 Not a valid slow-path completion "
|
|
"0359 Not a valid slow-path completion "
|
|
"event: majorcode=x%x, minorcode=x%x\n",
|
|
"event: majorcode=x%x, minorcode=x%x\n",
|
|
- bf_get(lpfc_eqe_major_code, eqe),
|
|
|
|
- bf_get(lpfc_eqe_minor_code, eqe));
|
|
|
|
|
|
+ bf_get_le32(lpfc_eqe_major_code, eqe),
|
|
|
|
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
/* Get the reference to the corresponding CQ */
|
|
/* Get the reference to the corresponding CQ */
|
|
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
|
|
|
|
|
|
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
|
|
|
|
|
|
/* Search for completion queue pointer matching this cqid */
|
|
/* Search for completion queue pointer matching this cqid */
|
|
speq = phba->sli4_hba.sp_eq;
|
|
speq = phba->sli4_hba.sp_eq;
|
|
@@ -9221,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
|
uint16_t cqid;
|
|
uint16_t cqid;
|
|
int ecount = 0;
|
|
int ecount = 0;
|
|
|
|
|
|
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
|
|
|
|
|
|
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
"0366 Not a valid fast-path completion "
|
|
"0366 Not a valid fast-path completion "
|
|
"event: majorcode=x%x, minorcode=x%x\n",
|
|
"event: majorcode=x%x, minorcode=x%x\n",
|
|
- bf_get(lpfc_eqe_major_code, eqe),
|
|
|
|
- bf_get(lpfc_eqe_minor_code, eqe));
|
|
|
|
|
|
+ bf_get_le32(lpfc_eqe_major_code, eqe),
|
|
|
|
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -9239,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
|
}
|
|
}
|
|
|
|
|
|
/* Get the reference to the corresponding CQ */
|
|
/* Get the reference to the corresponding CQ */
|
|
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
|
|
|
|
|
|
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
|
|
if (unlikely(cqid != cq->queue_id)) {
|
|
if (unlikely(cqid != cq->queue_id)) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
"0368 Miss-matched fast-path completion "
|
|
"0368 Miss-matched fast-path completion "
|
|
@@ -9506,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
|
|
while (!list_empty(&queue->page_list)) {
|
|
while (!list_empty(&queue->page_list)) {
|
|
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
|
|
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
|
|
list);
|
|
list);
|
|
- dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
|
|
|
|
|
|
+ dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
|
|
dmabuf->virt, dmabuf->phys);
|
|
dmabuf->virt, dmabuf->phys);
|
|
kfree(dmabuf);
|
|
kfree(dmabuf);
|
|
}
|
|
}
|
|
@@ -9532,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
|
struct lpfc_dmabuf *dmabuf;
|
|
struct lpfc_dmabuf *dmabuf;
|
|
int x, total_qe_count;
|
|
int x, total_qe_count;
|
|
void *dma_pointer;
|
|
void *dma_pointer;
|
|
|
|
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
|
|
|
|
|
|
|
+ if (!phba->sli4_hba.pc_sli4_params.supported)
|
|
|
|
+ hw_page_size = SLI4_PAGE_SIZE;
|
|
|
|
|
|
queue = kzalloc(sizeof(struct lpfc_queue) +
|
|
queue = kzalloc(sizeof(struct lpfc_queue) +
|
|
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
|
|
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
|
|
if (!queue)
|
|
if (!queue)
|
|
return NULL;
|
|
return NULL;
|
|
- queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
|
|
|
|
|
|
+ queue->page_count = (ALIGN(entry_size * entry_count,
|
|
|
|
+ hw_page_size))/hw_page_size;
|
|
INIT_LIST_HEAD(&queue->list);
|
|
INIT_LIST_HEAD(&queue->list);
|
|
INIT_LIST_HEAD(&queue->page_list);
|
|
INIT_LIST_HEAD(&queue->page_list);
|
|
INIT_LIST_HEAD(&queue->child_list);
|
|
INIT_LIST_HEAD(&queue->child_list);
|
|
@@ -9547,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
|
if (!dmabuf)
|
|
if (!dmabuf)
|
|
goto out_fail;
|
|
goto out_fail;
|
|
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
|
|
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
|
|
- PAGE_SIZE, &dmabuf->phys,
|
|
|
|
|
|
+ hw_page_size, &dmabuf->phys,
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!dmabuf->virt) {
|
|
if (!dmabuf->virt) {
|
|
kfree(dmabuf);
|
|
kfree(dmabuf);
|
|
goto out_fail;
|
|
goto out_fail;
|
|
}
|
|
}
|
|
- memset(dmabuf->virt, 0, PAGE_SIZE);
|
|
|
|
|
|
+ memset(dmabuf->virt, 0, hw_page_size);
|
|
dmabuf->buffer_tag = x;
|
|
dmabuf->buffer_tag = x;
|
|
list_add_tail(&dmabuf->list, &queue->page_list);
|
|
list_add_tail(&dmabuf->list, &queue->page_list);
|
|
/* initialize queue's entry array */
|
|
/* initialize queue's entry array */
|
|
dma_pointer = dmabuf->virt;
|
|
dma_pointer = dmabuf->virt;
|
|
for (; total_qe_count < entry_count &&
|
|
for (; total_qe_count < entry_count &&
|
|
- dma_pointer < (PAGE_SIZE + dmabuf->virt);
|
|
|
|
|
|
+ dma_pointer < (hw_page_size + dmabuf->virt);
|
|
total_qe_count++, dma_pointer += entry_size) {
|
|
total_qe_count++, dma_pointer += entry_size) {
|
|
queue->qe[total_qe_count].address = dma_pointer;
|
|
queue->qe[total_qe_count].address = dma_pointer;
|
|
}
|
|
}
|
|
@@ -9604,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
|
|
uint32_t shdr_status, shdr_add_status;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
uint16_t dmult;
|
|
uint16_t dmult;
|
|
|
|
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
|
|
|
+
|
|
|
|
+ if (!phba->sli4_hba.pc_sli4_params.supported)
|
|
|
|
+ hw_page_size = SLI4_PAGE_SIZE;
|
|
|
|
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
if (!mbox)
|
|
if (!mbox)
|
|
@@ -9653,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
list_for_each_entry(dmabuf, &eq->page_list, list) {
|
|
list_for_each_entry(dmabuf, &eq->page_list, list) {
|
|
|
|
+ memset(dmabuf->virt, 0, hw_page_size);
|
|
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
putPaddrLow(dmabuf->phys);
|
|
putPaddrLow(dmabuf->phys);
|
|
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
@@ -9715,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
|
int rc, length, status = 0;
|
|
int rc, length, status = 0;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
|
|
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
|
|
|
+
|
|
|
|
+ if (!phba->sli4_hba.pc_sli4_params.supported)
|
|
|
|
+ hw_page_size = SLI4_PAGE_SIZE;
|
|
|
|
+
|
|
|
|
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
if (!mbox)
|
|
if (!mbox)
|
|
@@ -9752,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
list_for_each_entry(dmabuf, &cq->page_list, list) {
|
|
list_for_each_entry(dmabuf, &cq->page_list, list) {
|
|
|
|
+ memset(dmabuf->virt, 0, hw_page_size);
|
|
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
putPaddrLow(dmabuf->phys);
|
|
putPaddrLow(dmabuf->phys);
|
|
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
@@ -9790,10 +9844,71 @@ out:
|
|
return status;
|
|
return status;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
|
|
|
|
+ * @phba: HBA structure that indicates port to create a queue on.
|
|
|
|
+ * @mq: The queue structure to use to create the mailbox queue.
|
|
|
|
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
|
|
|
|
+ * @cq: The completion queue to associate with this cq.
|
|
|
|
+ *
|
|
|
|
+ * This function provides failback (fb) functionality when the
|
|
|
|
+ * mq_create_ext fails on older FW generations. It's purpose is identical
|
|
|
|
+ * to mq_create_ext otherwise.
|
|
|
|
+ *
|
|
|
|
+ * This routine cannot fail as all attributes were previously accessed and
|
|
|
|
+ * initialized in mq_create_ext.
|
|
|
|
+ **/
|
|
|
|
+static void
|
|
|
|
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|
|
|
+ LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
|
|
|
|
+{
|
|
|
|
+ struct lpfc_mbx_mq_create *mq_create;
|
|
|
|
+ struct lpfc_dmabuf *dmabuf;
|
|
|
|
+ int length;
|
|
|
|
+
|
|
|
|
+ length = (sizeof(struct lpfc_mbx_mq_create) -
|
|
|
|
+ sizeof(struct lpfc_sli4_cfg_mhdr));
|
|
|
|
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
|
|
|
|
+ LPFC_MBOX_OPCODE_MQ_CREATE,
|
|
|
|
+ length, LPFC_SLI4_MBX_EMBED);
|
|
|
|
+ mq_create = &mbox->u.mqe.un.mq_create;
|
|
|
|
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
|
|
|
|
+ mq->page_count);
|
|
|
|
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
|
|
|
|
+ cq->queue_id);
|
|
|
|
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
|
|
|
|
+ switch (mq->entry_count) {
|
|
|
|
+ case 16:
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
+ LPFC_MQ_CNT_16);
|
|
|
|
+ break;
|
|
|
|
+ case 32:
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
+ LPFC_MQ_CNT_32);
|
|
|
|
+ break;
|
|
|
|
+ case 64:
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
+ LPFC_MQ_CNT_64);
|
|
|
|
+ break;
|
|
|
|
+ case 128:
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
+ LPFC_MQ_CNT_128);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
|
|
|
|
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
|
|
+ putPaddrLow(dmabuf->phys);
|
|
|
|
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
|
|
+ putPaddrHigh(dmabuf->phys);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* lpfc_mq_create - Create a mailbox Queue on the HBA
|
|
* lpfc_mq_create - Create a mailbox Queue on the HBA
|
|
* @phba: HBA structure that indicates port to create a queue on.
|
|
* @phba: HBA structure that indicates port to create a queue on.
|
|
* @mq: The queue structure to use to create the mailbox queue.
|
|
* @mq: The queue structure to use to create the mailbox queue.
|
|
|
|
+ * @cq: The completion queue to associate with this cq.
|
|
|
|
+ * @subtype: The queue's subtype.
|
|
*
|
|
*
|
|
* This function creates a mailbox queue, as detailed in @mq, on a port,
|
|
* This function creates a mailbox queue, as detailed in @mq, on a port,
|
|
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
|
|
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
|
|
@@ -9809,31 +9924,43 @@ out:
|
|
* memory this function will return ENOMEM. If the queue create mailbox command
|
|
* memory this function will return ENOMEM. If the queue create mailbox command
|
|
* fails this function will return ENXIO.
|
|
* fails this function will return ENXIO.
|
|
**/
|
|
**/
|
|
-uint32_t
|
|
|
|
|
|
+int32_t
|
|
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|
struct lpfc_queue *cq, uint32_t subtype)
|
|
struct lpfc_queue *cq, uint32_t subtype)
|
|
{
|
|
{
|
|
struct lpfc_mbx_mq_create *mq_create;
|
|
struct lpfc_mbx_mq_create *mq_create;
|
|
|
|
+ struct lpfc_mbx_mq_create_ext *mq_create_ext;
|
|
struct lpfc_dmabuf *dmabuf;
|
|
struct lpfc_dmabuf *dmabuf;
|
|
LPFC_MBOXQ_t *mbox;
|
|
LPFC_MBOXQ_t *mbox;
|
|
int rc, length, status = 0;
|
|
int rc, length, status = 0;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
|
|
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
|
|
|
+
|
|
|
|
+ if (!phba->sli4_hba.pc_sli4_params.supported)
|
|
|
|
+ hw_page_size = SLI4_PAGE_SIZE;
|
|
|
|
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
if (!mbox)
|
|
if (!mbox)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
- length = (sizeof(struct lpfc_mbx_mq_create) -
|
|
|
|
|
|
+ length = (sizeof(struct lpfc_mbx_mq_create_ext) -
|
|
sizeof(struct lpfc_sli4_cfg_mhdr));
|
|
sizeof(struct lpfc_sli4_cfg_mhdr));
|
|
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
|
|
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
|
|
- LPFC_MBOX_OPCODE_MQ_CREATE,
|
|
|
|
|
|
+ LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
|
|
length, LPFC_SLI4_MBX_EMBED);
|
|
length, LPFC_SLI4_MBX_EMBED);
|
|
- mq_create = &mbox->u.mqe.un.mq_create;
|
|
|
|
- bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
|
|
|
|
|
|
+
|
|
|
|
+ mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
|
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
|
|
mq->page_count);
|
|
mq->page_count);
|
|
- bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
|
|
|
|
- cq->queue_id);
|
|
|
|
- bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
|
|
|
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
|
|
|
|
+ 1);
|
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
|
|
|
|
+ &mq_create_ext->u.request, 1);
|
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
|
|
|
|
+ &mq_create_ext->u.request, 1);
|
|
|
|
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
|
|
|
|
+ cq->queue_id);
|
|
|
|
+ bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
|
|
switch (mq->entry_count) {
|
|
switch (mq->entry_count) {
|
|
default:
|
|
default:
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
@@ -9843,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
/* otherwise default to smallest count (drop through) */
|
|
/* otherwise default to smallest count (drop through) */
|
|
case 16:
|
|
case 16:
|
|
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
|
|
LPFC_MQ_CNT_16);
|
|
LPFC_MQ_CNT_16);
|
|
break;
|
|
break;
|
|
case 32:
|
|
case 32:
|
|
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
|
|
LPFC_MQ_CNT_32);
|
|
LPFC_MQ_CNT_32);
|
|
break;
|
|
break;
|
|
case 64:
|
|
case 64:
|
|
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
|
|
LPFC_MQ_CNT_64);
|
|
LPFC_MQ_CNT_64);
|
|
break;
|
|
break;
|
|
case 128:
|
|
case 128:
|
|
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
|
|
|
|
|
|
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
|
|
LPFC_MQ_CNT_128);
|
|
LPFC_MQ_CNT_128);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
list_for_each_entry(dmabuf, &mq->page_list, list) {
|
|
list_for_each_entry(dmabuf, &mq->page_list, list) {
|
|
- mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
|
|
|
|
+ memset(dmabuf->virt, 0, hw_page_size);
|
|
|
|
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
putPaddrLow(dmabuf->phys);
|
|
putPaddrLow(dmabuf->phys);
|
|
- mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
|
|
|
|
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
putPaddrHigh(dmabuf->phys);
|
|
putPaddrHigh(dmabuf->phys);
|
|
}
|
|
}
|
|
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
|
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
|
|
|
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
|
|
|
|
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
|
|
|
|
+ &mq_create_ext->u.response);
|
|
|
|
+ if (rc != MBX_SUCCESS) {
|
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
|
+ "2795 MQ_CREATE_EXT failed with "
|
|
|
|
+ "status x%x. Failback to MQ_CREATE.\n",
|
|
|
|
+ rc);
|
|
|
|
+ lpfc_mq_create_fb_init(phba, mq, mbox, cq);
|
|
|
|
+ mq_create = &mbox->u.mqe.un.mq_create;
|
|
|
|
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
|
|
|
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
|
|
|
|
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
|
|
|
|
+ &mq_create->u.response);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* The IOCTL status is embedded in the mailbox subheader. */
|
|
/* The IOCTL status is embedded in the mailbox subheader. */
|
|
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
|
|
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
@@ -9878,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|
status = -ENXIO;
|
|
status = -ENXIO;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
- mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
|
|
|
|
if (mq->queue_id == 0xFFFF) {
|
|
if (mq->queue_id == 0xFFFF) {
|
|
status = -ENXIO;
|
|
status = -ENXIO;
|
|
goto out;
|
|
goto out;
|
|
@@ -9927,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|
int rc, length, status = 0;
|
|
int rc, length, status = 0;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
|
|
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
|
|
|
+
|
|
|
|
+ if (!phba->sli4_hba.pc_sli4_params.supported)
|
|
|
|
+ hw_page_size = SLI4_PAGE_SIZE;
|
|
|
|
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
if (!mbox)
|
|
if (!mbox)
|
|
@@ -9942,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
|
|
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
|
|
cq->queue_id);
|
|
cq->queue_id);
|
|
list_for_each_entry(dmabuf, &wq->page_list, list) {
|
|
list_for_each_entry(dmabuf, &wq->page_list, list) {
|
|
|
|
+ memset(dmabuf->virt, 0, hw_page_size);
|
|
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
putPaddrLow(dmabuf->phys);
|
|
putPaddrLow(dmabuf->phys);
|
|
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
@@ -10010,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|
int rc, length, status = 0;
|
|
int rc, length, status = 0;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
uint32_t shdr_status, shdr_add_status;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
|
|
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
|
|
|
+
|
|
|
|
+ if (!phba->sli4_hba.pc_sli4_params.supported)
|
|
|
|
+ hw_page_size = SLI4_PAGE_SIZE;
|
|
|
|
|
|
if (hrq->entry_count != drq->entry_count)
|
|
if (hrq->entry_count != drq->entry_count)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -10054,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
|
|
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
|
|
LPFC_HDR_BUF_SIZE);
|
|
LPFC_HDR_BUF_SIZE);
|
|
list_for_each_entry(dmabuf, &hrq->page_list, list) {
|
|
list_for_each_entry(dmabuf, &hrq->page_list, list) {
|
|
|
|
+ memset(dmabuf->virt, 0, hw_page_size);
|
|
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
|
putPaddrLow(dmabuf->phys);
|
|
putPaddrLow(dmabuf->phys);
|
|
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
|
@@ -10626,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
|
|
|
|
|
|
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
|
|
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
|
|
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
|
|
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
|
|
- if (reqlen > PAGE_SIZE) {
|
|
|
|
|
|
+ if (reqlen > SLI4_PAGE_SIZE) {
|
|
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
|
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
|
"2559 Block sgl registration required DMA "
|
|
"2559 Block sgl registration required DMA "
|
|
"size (%d) great than a page\n", reqlen);
|
|
"size (%d) great than a page\n", reqlen);
|
|
@@ -10732,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
|
|
/* Calculate the requested length of the dma memory */
|
|
/* Calculate the requested length of the dma memory */
|
|
reqlen = cnt * sizeof(struct sgl_page_pairs) +
|
|
reqlen = cnt * sizeof(struct sgl_page_pairs) +
|
|
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
|
|
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
|
|
- if (reqlen > PAGE_SIZE) {
|
|
|
|
|
|
+ if (reqlen > SLI4_PAGE_SIZE) {
|
|
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
|
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
|
"0217 Block sgl registration required DMA "
|
|
"0217 Block sgl registration required DMA "
|
|
"size (%d) great than a page\n", reqlen);
|
|
"size (%d) great than a page\n", reqlen);
|
|
@@ -11568,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
|
|
*
|
|
*
|
|
* This routine is invoked to post rpi header templates to the
|
|
* This routine is invoked to post rpi header templates to the
|
|
* HBA consistent with the SLI-4 interface spec. This routine
|
|
* HBA consistent with the SLI-4 interface spec. This routine
|
|
- * posts a PAGE_SIZE memory region to the port to hold up to
|
|
|
|
- * PAGE_SIZE modulo 64 rpi context headers.
|
|
|
|
|
|
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
|
|
|
|
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
|
|
*
|
|
*
|
|
* This routine does not require any locks. It's usage is expected
|
|
* This routine does not require any locks. It's usage is expected
|
|
* to be driver load or reset recovery when the driver is
|
|
* to be driver load or reset recovery when the driver is
|
|
@@ -11672,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
|
|
*
|
|
*
|
|
* This routine is invoked to post rpi header templates to the
|
|
* This routine is invoked to post rpi header templates to the
|
|
* HBA consistent with the SLI-4 interface spec. This routine
|
|
* HBA consistent with the SLI-4 interface spec. This routine
|
|
- * posts a PAGE_SIZE memory region to the port to hold up to
|
|
|
|
- * PAGE_SIZE modulo 64 rpi context headers.
|
|
|
|
|
|
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
|
|
|
|
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
|
|
*
|
|
*
|
|
* Returns
|
|
* Returns
|
|
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
|
|
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
|
|
@@ -12040,9 +12192,11 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
|
|
phba->hba_flag |= FCF_DISC_INPROGRESS;
|
|
phba->hba_flag |= FCF_DISC_INPROGRESS;
|
|
spin_unlock_irq(&phba->hbalock);
|
|
spin_unlock_irq(&phba->hbalock);
|
|
/* Reset FCF round robin index bmask for new scan */
|
|
/* Reset FCF round robin index bmask for new scan */
|
|
- if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
|
|
|
|
|
|
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
|
|
memset(phba->fcf.fcf_rr_bmask, 0,
|
|
memset(phba->fcf.fcf_rr_bmask, 0,
|
|
sizeof(*phba->fcf.fcf_rr_bmask));
|
|
sizeof(*phba->fcf.fcf_rr_bmask));
|
|
|
|
+ phba->fcf.eligible_fcf_cnt = 0;
|
|
|
|
+ }
|
|
error = 0;
|
|
error = 0;
|
|
}
|
|
}
|
|
fail_fcf_scan:
|
|
fail_fcf_scan:
|
|
@@ -12507,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
|
struct lpfc_hba *phba = vport->phba;
|
|
struct lpfc_hba *phba = vport->phba;
|
|
LPFC_MBOXQ_t *mb, *nextmb;
|
|
LPFC_MBOXQ_t *mb, *nextmb;
|
|
struct lpfc_dmabuf *mp;
|
|
struct lpfc_dmabuf *mp;
|
|
|
|
+ struct lpfc_nodelist *ndlp;
|
|
|
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
spin_lock_irq(&phba->hbalock);
|
|
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
|
|
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
|
|
@@ -12523,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
|
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
|
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
|
kfree(mp);
|
|
kfree(mp);
|
|
}
|
|
}
|
|
|
|
+ ndlp = (struct lpfc_nodelist *) mb->context2;
|
|
|
|
+ if (ndlp) {
|
|
|
|
+ lpfc_nlp_put(ndlp);
|
|
|
|
+ mb->context2 = NULL;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
list_del(&mb->list);
|
|
list_del(&mb->list);
|
|
mempool_free(mb, phba->mbox_mem_pool);
|
|
mempool_free(mb, phba->mbox_mem_pool);
|
|
@@ -12532,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
|
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
|
|
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
|
|
(mb->u.mb.mbxCommand == MBX_REG_VPI))
|
|
(mb->u.mb.mbxCommand == MBX_REG_VPI))
|
|
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
|
|
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
|
|
|
+ ndlp = (struct lpfc_nodelist *) mb->context2;
|
|
|
|
+ if (ndlp) {
|
|
|
|
+ lpfc_nlp_put(ndlp);
|
|
|
|
+ mb->context2 = NULL;
|
|
|
|
+ }
|
|
|
|
+ /* Unregister the RPI when mailbox complete */
|
|
|
|
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
spin_unlock_irq(&phba->hbalock);
|
|
spin_unlock_irq(&phba->hbalock);
|
|
}
|
|
}
|