|
@@ -512,9 +512,345 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
|
|
|
return sglq;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @ndlp: nodelist pointer for this target.
|
|
|
+ * @xritag: xri used in this exchange.
|
|
|
+ * @rxid: Remote Exchange ID.
|
|
|
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
|
|
|
+ *
|
|
|
+ * This function is called with hbalock held.
|
|
|
+ * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
|
|
|
+ * rrq struct and adds it to the active_rrq_list.
|
|
|
+ *
|
|
|
+ * returns 0 for rrq slot for this xri
|
|
|
+ * < 0 Were not able to get rrq mem or invalid parameter.
|
|
|
+ **/
|
|
|
+static int
|
|
|
+__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|
|
+ uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
|
|
|
+{
|
|
|
+ uint16_t adj_xri;
|
|
|
+ struct lpfc_node_rrq *rrq;
|
|
|
+ int empty;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * set the active bit even if there is no mem available.
|
|
|
+ */
|
|
|
+ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
|
|
|
+ if (!ndlp)
|
|
|
+ return -EINVAL;
|
|
|
+ if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
|
|
|
+ return -EINVAL;
|
|
|
+ rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
|
|
|
+ if (rrq) {
|
|
|
+ rrq->send_rrq = send_rrq;
|
|
|
+ rrq->xritag = xritag;
|
|
|
+ rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
|
|
|
+ rrq->ndlp = ndlp;
|
|
|
+ rrq->nlp_DID = ndlp->nlp_DID;
|
|
|
+ rrq->vport = ndlp->vport;
|
|
|
+ rrq->rxid = rxid;
|
|
|
+ empty = list_empty(&phba->active_rrq_list);
|
|
|
+ if (phba->cfg_enable_rrq && send_rrq)
|
|
|
+ /*
|
|
|
+ * We need the xri before we can add this to the
|
|
|
+ * phba active rrq list.
|
|
|
+ */
|
|
|
+ rrq->send_rrq = send_rrq;
|
|
|
+ else
|
|
|
+ rrq->send_rrq = 0;
|
|
|
+ list_add_tail(&rrq->list, &phba->active_rrq_list);
|
|
|
+ if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
|
|
|
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
|
|
|
+ if (empty)
|
|
|
+ lpfc_worker_wake_up(phba);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @xritag: xri used in this exchange.
|
|
|
+ * @rrq: The RRQ to be cleared.
|
|
|
+ *
|
|
|
+ * This function is called with hbalock held. This function
|
|
|
+ **/
|
|
|
+static void
|
|
|
+__lpfc_clr_rrq_active(struct lpfc_hba *phba,
|
|
|
+ uint16_t xritag,
|
|
|
+ struct lpfc_node_rrq *rrq)
|
|
|
+{
|
|
|
+ uint16_t adj_xri;
|
|
|
+ struct lpfc_nodelist *ndlp;
|
|
|
+
|
|
|
+ ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
|
|
|
+
|
|
|
+ /* The target DID could have been swapped (cable swap)
|
|
|
+ * we should use the ndlp from the findnode if it is
|
|
|
+ * available.
|
|
|
+ */
|
|
|
+ if (!ndlp)
|
|
|
+ ndlp = rrq->ndlp;
|
|
|
+
|
|
|
+ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
|
|
|
+ if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
|
|
|
+ rrq->send_rrq = 0;
|
|
|
+ rrq->xritag = 0;
|
|
|
+ rrq->rrq_stop_time = 0;
|
|
|
+ }
|
|
|
+ mempool_free(rrq, phba->rrq_pool);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ *
|
|
|
+ * This function is called with hbalock held. This function
|
|
|
+ * Checks if stop_time (ratov from setting rrq active) has
|
|
|
+ * been reached, if it has and the send_rrq flag is set then
|
|
|
+ * it will call lpfc_send_rrq. If the send_rrq flag is not set
|
|
|
+ * then it will just call the routine to clear the rrq and
|
|
|
+ * free the rrq resource.
|
|
|
+ * The timer is set to the next rrq that is going to expire before
|
|
|
+ * leaving the routine.
|
|
|
+ *
|
|
|
+ **/
|
|
|
+void
|
|
|
+lpfc_handle_rrq_active(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_node_rrq *rrq;
|
|
|
+ struct lpfc_node_rrq *nextrrq;
|
|
|
+ unsigned long next_time;
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
|
|
+ next_time = jiffies + HZ * (phba->fc_ratov + 1);
|
|
|
+ list_for_each_entry_safe(rrq, nextrrq,
|
|
|
+ &phba->active_rrq_list, list) {
|
|
|
+ if (time_after(jiffies, rrq->rrq_stop_time)) {
|
|
|
+ list_del(&rrq->list);
|
|
|
+ if (!rrq->send_rrq)
|
|
|
+ /* this call will free the rrq */
|
|
|
+ __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
|
|
+ else {
|
|
|
+ /* if we send the rrq then the completion handler
|
|
|
+ * will clear the bit in the xribitmap.
|
|
|
+ */
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ if (lpfc_send_rrq(phba, rrq)) {
|
|
|
+ lpfc_clr_rrq_active(phba, rrq->xritag,
|
|
|
+ rrq);
|
|
|
+ }
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ }
|
|
|
+ } else if (time_before(rrq->rrq_stop_time, next_time))
|
|
|
+ next_time = rrq->rrq_stop_time;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ if (!list_empty(&phba->active_rrq_list))
|
|
|
+ mod_timer(&phba->rrq_tmr, next_time);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_get_active_rrq - Get the active RRQ for this exchange.
|
|
|
+ * @vport: Pointer to vport context object.
|
|
|
+ * @xri: The xri used in the exchange.
|
|
|
+ * @did: The targets DID for this exchange.
|
|
|
+ *
|
|
|
+ * returns NULL = rrq not found in the phba->active_rrq_list.
|
|
|
+ * rrq = rrq for this xri and target.
|
|
|
+ **/
|
|
|
+struct lpfc_node_rrq *
|
|
|
+lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
|
|
|
+{
|
|
|
+ struct lpfc_hba *phba = vport->phba;
|
|
|
+ struct lpfc_node_rrq *rrq;
|
|
|
+ struct lpfc_node_rrq *nextrrq;
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ if (phba->sli_rev != LPFC_SLI_REV4)
|
|
|
+ return NULL;
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
|
|
|
+ if (rrq->vport == vport && rrq->xritag == xri &&
|
|
|
+ rrq->nlp_DID == did){
|
|
|
+ list_del(&rrq->list);
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ return rrq;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
|
|
|
+ * @vport: Pointer to vport context object.
|
|
|
+ *
|
|
|
+ * Remove all active RRQs for this vport from the phba->active_rrq_list and
|
|
|
+ * clear the rrq.
|
|
|
+ **/
|
|
|
+void
|
|
|
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
|
|
|
+
|
|
|
+{
|
|
|
+ struct lpfc_hba *phba = vport->phba;
|
|
|
+ struct lpfc_node_rrq *rrq;
|
|
|
+ struct lpfc_node_rrq *nextrrq;
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ if (phba->sli_rev != LPFC_SLI_REV4)
|
|
|
+ return;
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
|
|
|
+ if (rrq->vport == vport) {
|
|
|
+ list_del(&rrq->list);
|
|
|
+ __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ *
|
|
|
+ * Remove all rrqs from the phba->active_rrq_list and free them by
|
|
|
+ * calling __lpfc_clr_active_rrq
|
|
|
+ *
|
|
|
+ **/
|
|
|
+void
|
|
|
+lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_node_rrq *rrq;
|
|
|
+ struct lpfc_node_rrq *nextrrq;
|
|
|
+ unsigned long next_time;
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ if (phba->sli_rev != LPFC_SLI_REV4)
|
|
|
+ return;
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
|
|
+ next_time = jiffies + HZ * (phba->fc_ratov * 2);
|
|
|
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
|
|
|
+ list_del(&rrq->list);
|
|
|
+ __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ if (!list_empty(&phba->active_rrq_list))
|
|
|
+ mod_timer(&phba->rrq_tmr, next_time);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @ndlp: Targets nodelist pointer for this exchange.
|
|
|
+ * @xritag the xri in the bitmap to test.
|
|
|
+ *
|
|
|
+ * This function is called with hbalock held. This function
|
|
|
+ * returns 0 = rrq not active for this xri
|
|
|
+ * 1 = rrq is valid for this xri.
|
|
|
+ **/
|
|
|
+static int
|
|
|
+__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|
|
+ uint16_t xritag)
|
|
|
+{
|
|
|
+ uint16_t adj_xri;
|
|
|
+
|
|
|
+ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
|
|
|
+ if (!ndlp)
|
|
|
+ return 0;
|
|
|
+ if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
|
|
|
+ return 1;
|
|
|
+ else
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @ndlp: nodelist pointer for this target.
|
|
|
+ * @xritag: xri used in this exchange.
|
|
|
+ * @rxid: Remote Exchange ID.
|
|
|
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
|
|
|
+ *
|
|
|
+ * This function takes the hbalock.
|
|
|
+ * The active bit is always set in the active rrq xri_bitmap even
|
|
|
+ * if there is no slot avaiable for the other rrq information.
|
|
|
+ *
|
|
|
+ * returns 0 rrq actived for this xri
|
|
|
+ * < 0 No memory or invalid ndlp.
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|
|
+ uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @xritag: xri used in this exchange.
|
|
|
+ * @rrq: The RRQ to be cleared.
|
|
|
+ *
|
|
|
+ * This function is takes the hbalock.
|
|
|
+ **/
|
|
|
+void
|
|
|
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
|
|
|
+ uint16_t xritag,
|
|
|
+ struct lpfc_node_rrq *rrq)
|
|
|
+{
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ __lpfc_clr_rrq_active(phba, xritag, rrq);
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @ndlp: Targets nodelist pointer for this exchange.
|
|
|
+ * @xritag the xri in the bitmap to test.
|
|
|
+ *
|
|
|
+ * This function takes the hbalock.
|
|
|
+ * returns 0 = rrq not active for this xri
|
|
|
+ * 1 = rrq is valid for this xri.
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|
|
+ uint16_t xritag)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ unsigned long iflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
+ ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
|
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
|
|
|
* @phba: Pointer to HBA context object.
|
|
|
+ * @piocb: Pointer to the iocbq.
|
|
|
*
|
|
|
* This function is called with hbalock held. This function
|
|
|
* Gets a new driver sglq object from the sglq list. If the
|
|
@@ -522,17 +858,51 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
|
|
|
* allocated sglq object else it returns NULL.
|
|
|
**/
|
|
|
static struct lpfc_sglq *
|
|
|
-__lpfc_sli_get_sglq(struct lpfc_hba *phba)
|
|
|
+__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
|
|
|
{
|
|
|
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
|
|
|
struct lpfc_sglq *sglq = NULL;
|
|
|
+ struct lpfc_sglq *start_sglq = NULL;
|
|
|
uint16_t adj_xri;
|
|
|
+ struct lpfc_scsi_buf *lpfc_cmd;
|
|
|
+ struct lpfc_nodelist *ndlp;
|
|
|
+ int found = 0;
|
|
|
+
|
|
|
+ if (piocbq->iocb_flag & LPFC_IO_FCP) {
|
|
|
+ lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
|
|
|
+ ndlp = lpfc_cmd->rdata->pnode;
|
|
|
+ } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
|
|
|
+ !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
|
|
|
+ ndlp = piocbq->context_un.ndlp;
|
|
|
+ else
|
|
|
+ ndlp = piocbq->context1;
|
|
|
+
|
|
|
list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
|
|
|
- if (!sglq)
|
|
|
- return NULL;
|
|
|
- adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
|
|
|
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
|
|
|
- sglq->state = SGL_ALLOCATED;
|
|
|
+ start_sglq = sglq;
|
|
|
+ while (!found) {
|
|
|
+ if (!sglq)
|
|
|
+ return NULL;
|
|
|
+ adj_xri = sglq->sli4_xritag -
|
|
|
+ phba->sli4_hba.max_cfg_param.xri_base;
|
|
|
+ if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
|
|
|
+ /* This xri has an rrq outstanding for this DID.
|
|
|
+ * put it back in the list and get another xri.
|
|
|
+ */
|
|
|
+ list_add_tail(&sglq->list, lpfc_sgl_list);
|
|
|
+ sglq = NULL;
|
|
|
+ list_remove_head(lpfc_sgl_list, sglq,
|
|
|
+ struct lpfc_sglq, list);
|
|
|
+ if (sglq == start_sglq) {
|
|
|
+ sglq = NULL;
|
|
|
+ break;
|
|
|
+ } else
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ sglq->ndlp = ndlp;
|
|
|
+ found = 1;
|
|
|
+ phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
|
|
|
+ sglq->state = SGL_ALLOCATED;
|
|
|
+ }
|
|
|
return sglq;
|
|
|
}
|
|
|
|
|
@@ -598,6 +968,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
|
|
|
&phba->sli4_hba.abts_sgl_list_lock, iflag);
|
|
|
} else {
|
|
|
sglq->state = SGL_FREED;
|
|
|
+ sglq->ndlp = NULL;
|
|
|
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
|
|
|
|
|
|
/* Check if TXQ queue needs to be serviced */
|
|
@@ -1634,7 +2005,6 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
|
|
|
case MBX_READ_LNK_STAT:
|
|
|
case MBX_REG_LOGIN:
|
|
|
case MBX_UNREG_LOGIN:
|
|
|
- case MBX_READ_LA:
|
|
|
case MBX_CLEAR_LA:
|
|
|
case MBX_DUMP_MEMORY:
|
|
|
case MBX_DUMP_CONTEXT:
|
|
@@ -1656,7 +2026,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
|
|
|
case MBX_READ_SPARM64:
|
|
|
case MBX_READ_RPI64:
|
|
|
case MBX_REG_LOGIN64:
|
|
|
- case MBX_READ_LA64:
|
|
|
+ case MBX_READ_TOPOLOGY:
|
|
|
case MBX_WRITE_WWN:
|
|
|
case MBX_SET_DEBUG:
|
|
|
case MBX_LOAD_EXP_ROM:
|
|
@@ -1746,11 +2116,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
|
kfree(mp);
|
|
|
}
|
|
|
|
|
|
- if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
|
|
|
- (phba->sli_rev == LPFC_SLI_REV4) &&
|
|
|
- (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
|
|
|
- lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
|
|
|
-
|
|
|
/*
|
|
|
* If a REG_LOGIN succeeded after node is destroyed or node
|
|
|
* is in re-discovery driver need to cleanup the RPI.
|
|
@@ -3483,12 +3848,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
|
|
phba->pport->fc_myDID = 0;
|
|
|
phba->pport->fc_prevDID = 0;
|
|
|
|
|
|
- /* Turn off parity checking and serr during the physical reset */
|
|
|
- pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
|
|
|
- pci_write_config_word(phba->pcidev, PCI_COMMAND,
|
|
|
- (cfg_value &
|
|
|
- ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
|
|
|
-
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
psli->sli_flag &= ~(LPFC_PROCESS_LA);
|
|
|
phba->fcf.fcf_flag = 0;
|
|
@@ -3508,9 +3867,18 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
|
|
/* Now physically reset the device */
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
"0389 Performing PCI function reset!\n");
|
|
|
+
|
|
|
+ /* Turn off parity checking and serr during the physical reset */
|
|
|
+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
|
|
|
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
|
|
|
+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
|
|
|
+
|
|
|
/* Perform FCoE PCI function reset */
|
|
|
lpfc_pci_function_reset(phba);
|
|
|
|
|
|
+ /* Restore PCI cmd register */
|
|
|
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -4317,6 +4685,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
struct lpfc_vport *vport = phba->pport;
|
|
|
struct lpfc_dmabuf *mp;
|
|
|
|
|
|
+ /*
|
|
|
+ * TODO: Why does this routine execute these task in a different
|
|
|
+ * order from probe?
|
|
|
+ */
|
|
|
/* Perform a PCI function reset to start from clean */
|
|
|
rc = lpfc_pci_function_reset(phba);
|
|
|
if (unlikely(rc))
|
|
@@ -4357,13 +4729,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
}
|
|
|
|
|
|
rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
|
|
|
- if (unlikely(rc))
|
|
|
- goto out_free_vpd;
|
|
|
-
|
|
|
+ if (unlikely(rc)) {
|
|
|
+ kfree(vpd);
|
|
|
+ goto out_free_mbox;
|
|
|
+ }
|
|
|
mqe = &mboxq->u.mqe;
|
|
|
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
|
|
|
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
|
|
|
- phba->hba_flag |= HBA_FCOE_SUPPORT;
|
|
|
+ phba->hba_flag |= HBA_FCOE_MODE;
|
|
|
+ else
|
|
|
+ phba->hba_flag &= ~HBA_FCOE_MODE;
|
|
|
|
|
|
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
|
|
|
LPFC_DCBX_CEE_MODE)
|
|
@@ -4372,13 +4747,14 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
phba->hba_flag &= ~HBA_FIP_SUPPORT;
|
|
|
|
|
|
if (phba->sli_rev != LPFC_SLI_REV4 ||
|
|
|
- !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
|
|
|
+ !(phba->hba_flag & HBA_FCOE_MODE)) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
|
|
"0376 READ_REV Error. SLI Level %d "
|
|
|
"FCoE enabled %d\n",
|
|
|
- phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
|
|
|
+ phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
|
|
|
rc = -EIO;
|
|
|
- goto out_free_vpd;
|
|
|
+ kfree(vpd);
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
/*
|
|
|
* Evaluate the read rev and vpd data. Populate the driver
|
|
@@ -4392,6 +4768,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
"Using defaults.\n", rc);
|
|
|
rc = 0;
|
|
|
}
|
|
|
+ kfree(vpd);
|
|
|
|
|
|
/* Save information as VPD data */
|
|
|
phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
|
|
@@ -4428,7 +4805,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
|
|
if (unlikely(rc)) {
|
|
|
rc = -EIO;
|
|
|
- goto out_free_vpd;
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4476,7 +4853,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
if (rc) {
|
|
|
phba->link_state = LPFC_HBA_ERROR;
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_free_vpd;
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
|
|
|
mboxq->vport = vport;
|
|
@@ -4501,7 +4878,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
rc, bf_get(lpfc_mqe_status, mqe));
|
|
|
phba->link_state = LPFC_HBA_ERROR;
|
|
|
rc = -EIO;
|
|
|
- goto out_free_vpd;
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
|
|
|
if (phba->cfg_soft_wwnn)
|
|
@@ -4526,7 +4903,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
"0582 Error %d during sgl post operation\n",
|
|
|
rc);
|
|
|
rc = -ENODEV;
|
|
|
- goto out_free_vpd;
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
|
|
|
/* Register SCSI SGL pool to the device */
|
|
@@ -4538,7 +4915,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
/* Some Scsi buffers were moved to the abort scsi list */
|
|
|
/* A pci function reset will repost them */
|
|
|
rc = -ENODEV;
|
|
|
- goto out_free_vpd;
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
|
|
|
/* Post the rpi header region to the device. */
|
|
@@ -4548,7 +4925,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
"0393 Error %d during rpi post operation\n",
|
|
|
rc);
|
|
|
rc = -ENODEV;
|
|
|
- goto out_free_vpd;
|
|
|
+ goto out_free_mbox;
|
|
|
}
|
|
|
|
|
|
/* Set up all the queues to the device */
|
|
@@ -4608,33 +4985,33 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
|
|
+ /*
|
|
|
+ * The FC Port needs to register FCFI (index 0)
|
|
|
+ */
|
|
|
+ lpfc_reg_fcfi(phba, mboxq);
|
|
|
+ mboxq->vport = phba->pport;
|
|
|
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
|
|
+ if (rc == MBX_SUCCESS)
|
|
|
+ rc = 0;
|
|
|
+ else
|
|
|
+ goto out_unset_queue;
|
|
|
+ }
|
|
|
/*
|
|
|
* The port is ready, set the host's link state to LINK_DOWN
|
|
|
* in preparation for link interrupts.
|
|
|
*/
|
|
|
- lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
|
|
|
- mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
|
- lpfc_set_loopback_flag(phba);
|
|
|
- /* Change driver state to LPFC_LINK_DOWN right before init link */
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
phba->link_state = LPFC_LINK_DOWN;
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
|
|
- if (unlikely(rc != MBX_NOT_FINISHED)) {
|
|
|
- kfree(vpd);
|
|
|
- return 0;
|
|
|
- } else
|
|
|
- rc = -EIO;
|
|
|
-
|
|
|
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
|
|
|
+out_unset_queue:
|
|
|
/* Unset all the queues set up in this routine when error out */
|
|
|
if (rc)
|
|
|
lpfc_sli4_queue_unset(phba);
|
|
|
-
|
|
|
out_stop_timers:
|
|
|
if (rc)
|
|
|
lpfc_stop_hba_timers(phba);
|
|
|
-out_free_vpd:
|
|
|
- kfree(vpd);
|
|
|
out_free_mbox:
|
|
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
|
|
return rc;
|
|
@@ -5863,6 +6240,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
|
|
|
IOCB_t *icmd;
|
|
|
int numBdes = 0;
|
|
|
int i = 0;
|
|
|
+ uint32_t offset = 0; /* accumulated offset in the sg request list */
|
|
|
+ int inbound = 0; /* number of sg reply entries inbound from firmware */
|
|
|
|
|
|
if (!piocbq || !sglq)
|
|
|
return xritag;
|
|
@@ -5897,6 +6276,20 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
|
|
|
*/
|
|
|
bde.tus.w = le32_to_cpu(bpl->tus.w);
|
|
|
sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
|
|
|
+ /* The offsets in the sgl need to be accumulated
|
|
|
+ * separately for the request and reply lists.
|
|
|
+ * The request is always first, the reply follows.
|
|
|
+ */
|
|
|
+ if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
|
|
|
+ /* add up the reply sg entries */
|
|
|
+ if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
|
|
|
+ inbound++;
|
|
|
+ /* first inbound? reset the offset */
|
|
|
+ if (inbound == 1)
|
|
|
+ offset = 0;
|
|
|
+ bf_set(lpfc_sli4_sge_offset, sgl, offset);
|
|
|
+ offset += bde.tus.f.bdeSize;
|
|
|
+ }
|
|
|
bpl++;
|
|
|
sgl++;
|
|
|
}
|
|
@@ -6028,11 +6421,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
|
|
bf_set(els_req64_vf, &wqe->els_req, 0);
|
|
|
/* And a VFID for word 12 */
|
|
|
bf_set(els_req64_vfid, &wqe->els_req, 0);
|
|
|
- /*
|
|
|
- * Set ct field to 3, indicates that the context_tag field
|
|
|
- * contains the FCFI and remote N_Port_ID is
|
|
|
- * in word 5.
|
|
|
- */
|
|
|
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
|
|
|
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
|
|
|
iocbq->iocb.ulpContext);
|
|
@@ -6140,6 +6528,18 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
|
|
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
|
|
|
break;
|
|
|
case CMD_GEN_REQUEST64_CR:
|
|
|
+ /* For this command calculate the xmit length of the
|
|
|
+ * request bde.
|
|
|
+ */
|
|
|
+ xmit_len = 0;
|
|
|
+ numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
|
|
|
+ sizeof(struct ulp_bde64);
|
|
|
+ for (i = 0; i < numBdes; i++) {
|
|
|
+ if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
|
|
|
+ break;
|
|
|
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
|
|
|
+ xmit_len += bde.tus.f.bdeSize;
|
|
|
+ }
|
|
|
/* word3 iocb=IO_TAG wqe=request_payload_len */
|
|
|
wqe->gen_req.request_payload_len = xmit_len;
|
|
|
/* word4 iocb=parameter wqe=relative_offset memcpy */
|
|
@@ -6320,7 +6720,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
|
|
|
return IOCB_BUSY;
|
|
|
}
|
|
|
} else {
|
|
|
- sglq = __lpfc_sli_get_sglq(phba);
|
|
|
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
|
|
|
if (!sglq) {
|
|
|
if (!(flag & SLI_IOCB_RET_IOCB)) {
|
|
|
__lpfc_sli_ringtx_put(phba,
|
|
@@ -8033,29 +8433,66 @@ static int
|
|
|
lpfc_sli4_eratt_read(struct lpfc_hba *phba)
|
|
|
{
|
|
|
uint32_t uerr_sta_hi, uerr_sta_lo;
|
|
|
+ uint32_t if_type, portsmphr;
|
|
|
+ struct lpfc_register portstat_reg;
|
|
|
|
|
|
- /* For now, use the SLI4 device internal unrecoverable error
|
|
|
+ /*
|
|
|
+ * For now, use the SLI4 device internal unrecoverable error
|
|
|
* registers for error attention. This can be changed later.
|
|
|
*/
|
|
|
- uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
|
|
|
- uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
|
|
|
- if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
|
|
|
- (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
|
|
|
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
|
|
|
+ switch (if_type) {
|
|
|
+ case LPFC_SLI_INTF_IF_TYPE_0:
|
|
|
+ uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
|
|
|
+ uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
|
|
|
+ if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
|
|
|
+ (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "1423 HBA Unrecoverable error: "
|
|
|
+ "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
|
|
|
+ "ue_mask_lo_reg=0x%x, "
|
|
|
+ "ue_mask_hi_reg=0x%x\n",
|
|
|
+ uerr_sta_lo, uerr_sta_hi,
|
|
|
+ phba->sli4_hba.ue_mask_lo,
|
|
|
+ phba->sli4_hba.ue_mask_hi);
|
|
|
+ phba->work_status[0] = uerr_sta_lo;
|
|
|
+ phba->work_status[1] = uerr_sta_hi;
|
|
|
+ phba->work_ha |= HA_ERATT;
|
|
|
+ phba->hba_flag |= HBA_ERATT_HANDLED;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case LPFC_SLI_INTF_IF_TYPE_2:
|
|
|
+ portstat_reg.word0 =
|
|
|
+ readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
|
|
|
+ portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
|
|
|
+ if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
|
|
|
+ phba->work_status[0] =
|
|
|
+ readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
|
|
|
+ phba->work_status[1] =
|
|
|
+ readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "2885 Port Error Detected: "
|
|
|
+ "port status reg 0x%x, "
|
|
|
+ "port smphr reg 0x%x, "
|
|
|
+ "error 1=0x%x, error 2=0x%x\n",
|
|
|
+ portstat_reg.word0,
|
|
|
+ portsmphr,
|
|
|
+ phba->work_status[0],
|
|
|
+ phba->work_status[1]);
|
|
|
+ phba->work_ha |= HA_ERATT;
|
|
|
+ phba->hba_flag |= HBA_ERATT_HANDLED;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case LPFC_SLI_INTF_IF_TYPE_1:
|
|
|
+ default:
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "1423 HBA Unrecoverable error: "
|
|
|
- "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
|
|
|
- "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
|
|
|
- uerr_sta_lo, uerr_sta_hi,
|
|
|
- phba->sli4_hba.ue_mask_lo,
|
|
|
- phba->sli4_hba.ue_mask_hi);
|
|
|
- phba->work_status[0] = uerr_sta_lo;
|
|
|
- phba->work_status[1] = uerr_sta_hi;
|
|
|
- /* Set the driver HA work bitmap */
|
|
|
- phba->work_ha |= HA_ERATT;
|
|
|
- /* Indicate polling handles this ERATT */
|
|
|
- phba->hba_flag |= HBA_ERATT_HANDLED;
|
|
|
+ "2886 HBA Error Attention on unsupported "
|
|
|
+ "if type %d.", if_type);
|
|
|
return 1;
|
|
|
}
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -8110,7 +8547,7 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
|
|
|
ha_copy = lpfc_sli_eratt_read(phba);
|
|
|
break;
|
|
|
case LPFC_SLI_REV4:
|
|
|
- /* Read devcie Uncoverable Error (UERR) registers */
|
|
|
+ /* Read device Uncoverable Error (UERR) registers */
|
|
|
ha_copy = lpfc_sli4_eratt_read(phba);
|
|
|
break;
|
|
|
default:
|
|
@@ -10155,16 +10592,20 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|
|
length, LPFC_SLI4_MBX_EMBED);
|
|
|
|
|
|
mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
|
|
|
- bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
|
|
|
- mq->page_count);
|
|
|
- bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
|
|
|
- 1);
|
|
|
- bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_num_pages,
|
|
|
+ &mq_create_ext->u.request, mq->page_count);
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
|
|
|
+ &mq_create_ext->u.request, 1);
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
|
|
|
&mq_create_ext->u.request, 1);
|
|
|
bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
|
|
|
&mq_create_ext->u.request, 1);
|
|
|
- bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
|
|
|
- cq->queue_id);
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
|
|
|
+ &mq_create_ext->u.request, 1);
|
|
|
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
|
|
|
+ &mq_create_ext->u.request, 1);
|
|
|
+ bf_set(lpfc_mq_context_cq_id,
|
|
|
+ &mq_create_ext->u.request.context, cq->queue_id);
|
|
|
bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
|
|
|
switch (mq->entry_count) {
|
|
|
default:
|
|
@@ -11137,7 +11578,8 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
|
|
|
static int
|
|
|
lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
|
|
|
{
|
|
|
- char *rctl_names[] = FC_RCTL_NAMES_INIT;
|
|
|
+ /* make rctl_names static to save stack space */
|
|
|
+ static char *rctl_names[] = FC_RCTL_NAMES_INIT;
|
|
|
char *type_names[] = FC_TYPE_NAMES_INIT;
|
|
|
struct fc_vft_header *fc_vft_hdr;
|
|
|
|
|
@@ -11538,6 +11980,10 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
|
|
|
"SID:x%x\n", oxid, sid);
|
|
|
return;
|
|
|
}
|
|
|
+ if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
|
|
|
+ && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
|
|
|
+ + phba->sli4_hba.max_cfg_param.xri_base))
|
|
|
+ lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
|
|
|
|
|
|
/* Allocate buffer for acc iocb */
|
|
|
ctiocb = lpfc_sli_get_iocbq(phba);
|
|
@@ -11560,6 +12006,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
|
|
|
icmd->ulpLe = 1;
|
|
|
icmd->ulpClass = CLASS3;
|
|
|
icmd->ulpContext = ndlp->nlp_rpi;
|
|
|
+ ctiocb->context1 = ndlp;
|
|
|
|
|
|
ctiocb->iocb_cmpl = NULL;
|
|
|
ctiocb->vport = phba->pport;
|
|
@@ -12129,42 +12576,37 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
|
|
|
|
|
|
/**
|
|
|
* lpfc_sli4_init_vpi - Initialize a vpi with the port
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- * @vpi: vpi value to activate with the port.
|
|
|
+ * @vport: Pointer to the vport for which the vpi is being initialized
|
|
|
*
|
|
|
- * This routine is invoked to activate a vpi with the
|
|
|
- * port when the host intends to use vports with a
|
|
|
- * nonzero vpi.
|
|
|
+ * This routine is invoked to activate a vpi with the port.
|
|
|
*
|
|
|
* Returns:
|
|
|
* 0 success
|
|
|
* -Evalue otherwise
|
|
|
**/
|
|
|
int
|
|
|
-lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
|
|
|
+lpfc_sli4_init_vpi(struct lpfc_vport *vport)
|
|
|
{
|
|
|
LPFC_MBOXQ_t *mboxq;
|
|
|
int rc = 0;
|
|
|
int retval = MBX_SUCCESS;
|
|
|
uint32_t mbox_tmo;
|
|
|
-
|
|
|
- if (vpi == 0)
|
|
|
- return -EINVAL;
|
|
|
+ struct lpfc_hba *phba = vport->phba;
|
|
|
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
|
if (!mboxq)
|
|
|
return -ENOMEM;
|
|
|
- lpfc_init_vpi(phba, mboxq, vpi);
|
|
|
+ lpfc_init_vpi(phba, mboxq, vport->vpi);
|
|
|
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
|
|
|
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
|
|
|
if (rc != MBX_SUCCESS) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
|
|
|
"2022 INIT VPI Mailbox failed "
|
|
|
"status %d, mbxStatus x%x\n", rc,
|
|
|
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
|
|
retval = -EIO;
|
|
|
}
|
|
|
if (rc != MBX_TIMEOUT)
|
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
|
+ mempool_free(mboxq, vport->phba->mbox_mem_pool);
|
|
|
|
|
|
return retval;
|
|
|
}
|
|
@@ -12854,6 +13296,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
|
|
struct lpfc_nodelist *act_mbx_ndlp = NULL;
|
|
|
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
|
|
LIST_HEAD(mbox_cmd_list);
|
|
|
+ uint8_t restart_loop;
|
|
|
|
|
|
/* Clean up internally queued mailbox commands with the vport */
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
@@ -12882,15 +13325,44 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
|
|
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
|
|
|
}
|
|
|
}
|
|
|
+ /* Cleanup any mailbox completions which are not yet processed */
|
|
|
+ do {
|
|
|
+ restart_loop = 0;
|
|
|
+ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
|
|
|
+ /*
|
|
|
+ * If this mailox is already processed or it is
|
|
|
+ * for another vport ignore it.
|
|
|
+ */
|
|
|
+ if ((mb->vport != vport) ||
|
|
|
+ (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
|
|
|
+ (mb->u.mb.mbxCommand != MBX_REG_VPI))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
|
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
|
|
+ ndlp = (struct lpfc_nodelist *)mb->context2;
|
|
|
+ /* Unregister the RPI when mailbox complete */
|
|
|
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
|
|
|
+ restart_loop = 1;
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+ spin_lock(shost->host_lock);
|
|
|
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
|
|
|
+ spin_unlock(shost->host_lock);
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } while (restart_loop);
|
|
|
+
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
/* Release the cleaned-up mailbox commands */
|
|
|
while (!list_empty(&mbox_cmd_list)) {
|
|
|
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
|
|
|
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
|
|
- if (phba->sli_rev == LPFC_SLI_REV4)
|
|
|
- __lpfc_sli4_free_rpi(phba,
|
|
|
- mb->u.mb.un.varRegLogin.rpi);
|
|
|
mp = (struct lpfc_dmabuf *) (mb->context1);
|
|
|
if (mp) {
|
|
|
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
|
@@ -12948,12 +13420,13 @@ lpfc_drain_txq(struct lpfc_hba *phba)
|
|
|
while (pring->txq_cnt) {
|
|
|
spin_lock_irqsave(&phba->hbalock, iflags);
|
|
|
|
|
|
- sglq = __lpfc_sli_get_sglq(phba);
|
|
|
+ piocbq = lpfc_sli_ringtx_get(phba, pring);
|
|
|
+ sglq = __lpfc_sli_get_sglq(phba, piocbq);
|
|
|
if (!sglq) {
|
|
|
+ __lpfc_sli_ringtx_put(phba, pring, piocbq);
|
|
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
break;
|
|
|
} else {
|
|
|
- piocbq = lpfc_sli_ringtx_get(phba, pring);
|
|
|
if (!piocbq) {
|
|
|
/* The txq_cnt out of sync. This should
|
|
|
* never happen
|