diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvme.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 174 |
1 files changed, 111 insertions, 63 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 23bdb1ca106e..517ae570e507 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -416,6 +416,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, lport = (struct lpfc_nvme_lport *)pnvme_lport->private; vport = lport->vport; + if (vport->load_flag & FC_UNLOADING) + return -ENODEV; + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, @@ -667,15 +670,17 @@ lpfc_nvme_ktime(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) { uint64_t seg1, seg2, seg3, seg4; + uint64_t segsum; - if (!phba->ktime_on) - return; if (!lpfc_ncmd->ts_last_cmd || !lpfc_ncmd->ts_cmd_start || !lpfc_ncmd->ts_cmd_wqput || !lpfc_ncmd->ts_isr_cmpl || !lpfc_ncmd->ts_data_nvme) return; + + if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start) + return; if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) return; if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) @@ -695,15 +700,23 @@ lpfc_nvme_ktime(struct lpfc_hba *phba, * cmpl is handled off to the NVME Layer. */ seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; - if (seg1 > 5000000) /* 5 ms - for sequential IOs */ - return; + if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ + seg1 = 0; /* Calculate times relative to start of IO */ seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); - seg3 = (lpfc_ncmd->ts_isr_cmpl - - lpfc_ncmd->ts_cmd_start) - seg2; - seg4 = (lpfc_ncmd->ts_data_nvme - - lpfc_ncmd->ts_cmd_start) - seg2 - seg3; + segsum = seg2; + seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start; + if (segsum > seg3) + return; + seg3 -= segsum; + segsum += seg3; + + seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start; + if (segsum > seg4) + return; + seg4 -= segsum; + phba->ktime_data_samples++; phba->ktime_seg1_total += seg1; if (seg1 < phba->ktime_seg1_min) @@ -840,7 +853,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, } else { lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK); - lpfc_ncmd->result = wcqe->parameter; + lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); /* For NVME, the only failure path that results in an * IO error is when the adapter rejects it. All other @@ -874,9 +887,20 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_ncmd->status, lpfc_ncmd->result, wcqe->total_data_placed); break; + case IOSTAT_LOCAL_REJECT: + /* Let fall through to set command final state. */ + if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NVME_IOERR, + "6032 Delay Aborted cmd %p " + "nvme cmd %p, xri x%x, " + "xb %d\n", + lpfc_ncmd, nCmd, + lpfc_ncmd->cur_iocbq.sli4_xritag, + bf_get(lpfc_wcqe_c_xb, wcqe)); default: out_err: - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6072 NVME Completion Error: xri %x " "status x%x result x%x placed x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, @@ -902,7 +926,7 @@ out_err: * owns the dma address. */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->ktime_on) { + if (lpfc_ncmd->ts_cmd_start) { lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; lpfc_ncmd->ts_data_nvme = ktime_get_ns(); phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; @@ -920,12 +944,18 @@ out_err: #endif freqpriv = nCmd->private; freqpriv->nvme_buf = NULL; - nCmd->done(nCmd); + + /* NVME targets need completion held off until the abort exchange + * completes. + */ + if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) + nCmd->done(nCmd); spin_lock_irqsave(&phba->hbalock, flags); lpfc_ncmd->nrport = NULL; spin_unlock_irqrestore(&phba->hbalock, flags); + /* Call release with XB=1 to queue the IO into the abort list. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); } @@ -1119,12 +1149,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, first_data_sgl = sgl; lpfc_ncmd->seg_cnt = nCmd->sg_cnt; - if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) { + if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6058 Too many sg segments from " "NVME Transport. Max %d, " "nvmeIO sg_cnt %d\n", - phba->cfg_nvme_seg_cnt, + phba->cfg_nvme_seg_cnt + 1, lpfc_ncmd->seg_cnt); lpfc_ncmd->seg_cnt = 0; return 1; @@ -1225,6 +1255,21 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; phba = vport->phba; + if (vport->load_flag & FC_UNLOADING) { + ret = -ENODEV; + goto out_fail; + } + + /* Validate pointers. */ + if (!pnvme_lport || !pnvme_rport || !freqpriv) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE, + "6117 No Send:IO submit ptrs NULL, lport %p, " + "rport %p fcreq_priv %p\n", + pnvme_lport, pnvme_rport, freqpriv); + ret = -ENODEV; + goto out_fail; + } + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->ktime_on) start = ktime_get_ns(); @@ -1283,9 +1328,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, goto out_fail; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->ktime_on) { + if (start) { lpfc_ncmd->ts_cmd_start = start; lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; + } else { + lpfc_ncmd->ts_cmd_start = 0; } #endif @@ -1327,7 +1374,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); if (ret) { atomic_dec(&ndlp->cmd_pending); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6113 FCP could not issue WQE err %x " "sid: x%x did: x%x oxid: x%x\n", ret, vport->fc_myDID, ndlp->nlp_DID, @@ -1336,7 +1383,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->ktime_on) + if (lpfc_ncmd->ts_cmd_start) lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { @@ -1387,7 +1434,7 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_wcqe_complete *abts_cmpl) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6145 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " "req_tag x%x, status x%x, hwstatus x%x\n", @@ -1938,14 +1985,13 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) * pci bus space for an I/O. The DMA buffer includes the * number of SGE's necessary to support the sg_tablesize. */ - lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, - GFP_KERNEL, - &lpfc_ncmd->dma_handle); + lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, + &lpfc_ncmd->dma_handle); if (!lpfc_ncmd->data) { kfree(lpfc_ncmd); break; } - memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { @@ -2042,9 +2088,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &phba->lpfc_nvme_buf_list_get, list) { - if (lpfc_test_rrq_active(phba, ndlp, - lpfc_ncmd->cur_iocbq.sli4_lxritag)) - continue; list_del_init(&lpfc_ncmd->list); found = 1; break; @@ -2057,9 +2100,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) spin_unlock(&phba->nvme_buf_list_put_lock); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &phba->lpfc_nvme_buf_list_get, list) { - if (lpfc_test_rrq_active( - phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) - continue; list_del_init(&lpfc_ncmd->list); found = 1; break; @@ -2096,7 +2136,6 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); - lpfc_ncmd->nvmeCmd = NULL; list_add_tail(&lpfc_ncmd->list, &phba->sli4_hba.lpfc_abts_nvme_buf_list); spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, @@ -2296,6 +2335,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct lpfc_nvme_rport *rport; struct nvme_fc_remote_port *remote_port; struct nvme_fc_port_info rpinfo; + struct lpfc_nodelist *prev_ndlp; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, "6006 Register NVME PORT. DID x%06x nlptype x%x\n", @@ -2332,7 +2372,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * new rport. */ rport = remote_port->private; - if (ndlp->nrport == rport) { + if (ndlp->nrport) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, "6014 Rebinding lport to " @@ -2343,24 +2383,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) remote_port->port_role, ndlp->nlp_type, ndlp->nlp_DID); - } else { - /* New rport. */ - rport->remoteport = remote_port; - rport->lport = lport; - rport->ndlp = lpfc_nlp_get(ndlp); - if (!rport->ndlp) - return -1; - ndlp->nrport = rport; - lpfc_printf_vlog(vport, KERN_INFO, - LOG_NVME_DISC | LOG_NODE, - "6022 Binding new rport to " - "lport %p Rport WWNN 0x%llx, " - "Rport WWPN 0x%llx DID " - "x%06x Role x%x\n", - lport, - rpinfo.node_name, rpinfo.port_name, - rpinfo.port_id, rpinfo.port_role); + prev_ndlp = rport->ndlp; + + /* Sever the ndlp<->rport connection before dropping + * the ndlp ref from register. + */ + ndlp->nrport = NULL; + rport->ndlp = NULL; + if (prev_ndlp) + lpfc_nlp_put(ndlp); } + + /* Clean bind the rport to the ndlp. */ + rport->remoteport = remote_port; + rport->lport = lport; + rport->ndlp = lpfc_nlp_get(ndlp); + if (!rport->ndlp) + return -1; + ndlp->nrport = rport; + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NVME_DISC | LOG_NODE, + "6022 Binding new rport to " + "lport %p Rport WWNN 0x%llx, " + "Rport WWPN 0x%llx DID " + "x%06x Role x%x\n", + lport, + rpinfo.node_name, rpinfo.port_name, + rpinfo.port_id, rpinfo.port_role); } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC | LOG_NODE, @@ -2454,18 +2503,18 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * @axri: pointer to the fcp xri abort wcqe structure. * * This routine is invoked by the worker thread to process a SLI4 fast-path - * FCP aborted xri. + * NVME aborted xri. Aborted NVME IO commands are completed to the transport + * here. **/ void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); - uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; + struct nvmefc_fcp_req *nvme_cmd = NULL; struct lpfc_nodelist *ndlp; unsigned long iflag = 0; - int rrq_empty = 0; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return; @@ -2481,25 +2530,24 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, spin_unlock( &phba->sli4_hba.abts_nvme_buf_list_lock); - rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); ndlp = lpfc_ncmd->ndlp; - if (ndlp) { - lpfc_set_rrq_active( - phba, ndlp, - lpfc_ncmd->cur_iocbq.sli4_lxritag, - rxid, 1); + if (ndlp) lpfc_sli4_abts_err_handler(phba, ndlp, axri); - } lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6311 XRI Aborted xri x%x tag x%x " - "released\n", - xri, lpfc_ncmd->cur_iocbq.iotag); - + "6311 nvme_cmd %p xri x%x tag x%x " + "abort complete and xri released\n", + lpfc_ncmd->nvmeCmd, xri, + lpfc_ncmd->cur_iocbq.iotag); + + /* Aborted NVME commands are required to not complete + * before the abort exchange command fully completes. + * Once completed, it is available via the put list. + */ + nvme_cmd = lpfc_ncmd->nvmeCmd; + nvme_cmd->done(nvme_cmd); lpfc_release_nvme_buf(phba, lpfc_ncmd); - if (rrq_empty) - lpfc_worker_wake_up(phba); return; } } |